summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/arm/include/asm
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/arm/include/asm/Kbuild8
-rw-r--r--arch/arm/include/asm/arch_gicv3.h261
-rw-r--r--arch/arm/include/asm/arch_timer.h146
-rw-r--r--arch/arm/include/asm/archrandom.h12
-rw-r--r--arch/arm/include/asm/arm-cci.h31
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h252
-rw-r--r--arch/arm/include/asm/asm-offsets.h1
-rw-r--r--arch/arm/include/asm/assembler.h794
-rw-r--r--arch/arm/include/asm/atomic.h514
-rw-r--r--arch/arm/include/asm/auxvec.h1
-rw-r--r--arch/arm/include/asm/bL_switcher.h74
-rw-r--r--arch/arm/include/asm/barrier.h103
-rw-r--r--arch/arm/include/asm/bitops.h278
-rw-r--r--arch/arm/include/asm/bitrev.h21
-rw-r--r--arch/arm/include/asm/bug.h93
-rw-r--r--arch/arm/include/asm/bugs.h16
-rw-r--r--arch/arm/include/asm/cache.h29
-rw-r--r--arch/arm/include/asm/cacheflush.h474
-rw-r--r--arch/arm/include/asm/cachetype.h99
-rw-r--r--arch/arm/include/asm/checksum.h166
-rw-r--r--arch/arm/include/asm/clocksource.h7
-rw-r--r--arch/arm/include/asm/cmpxchg.h280
-rw-r--r--arch/arm/include/asm/compiler.h29
-rw-r--r--arch/arm/include/asm/cp15.h122
-rw-r--r--arch/arm/include/asm/cpu.h23
-rw-r--r--arch/arm/include/asm/cpufeature.h35
-rw-r--r--arch/arm/include/asm/cpuidle.h58
-rw-r--r--arch/arm/include/asm/cputype.h348
-rw-r--r--arch/arm/include/asm/cti.h160
-rw-r--r--arch/arm/include/asm/current.h71
-rw-r--r--arch/arm/include/asm/dcc.h33
-rw-r--r--arch/arm/include/asm/delay.h100
-rw-r--r--arch/arm/include/asm/device.h29
-rw-r--r--arch/arm/include/asm/div64.h111
-rw-r--r--arch/arm/include/asm/dma-iommu.h36
-rw-r--r--arch/arm/include/asm/dma.h149
-rw-r--r--arch/arm/include/asm/dmi.h15
-rw-r--r--arch/arm/include/asm/domain.h141
-rw-r--r--arch/arm/include/asm/ecard.h219
-rw-r--r--arch/arm/include/asm/edac.h38
-rw-r--r--arch/arm/include/asm/efi.h81
-rw-r--r--arch/arm/include/asm/elf.h156
-rw-r--r--arch/arm/include/asm/exception.h15
-rw-r--r--arch/arm/include/asm/fb.h6
-rw-r--r--arch/arm/include/asm/fiq.h57
-rw-r--r--arch/arm/include/asm/firmware.h79
-rw-r--r--arch/arm/include/asm/fixmap.h66
-rw-r--r--arch/arm/include/asm/floppy.h83
-rw-r--r--arch/arm/include/asm/fncpy.h82
-rw-r--r--arch/arm/include/asm/fpstate.h79
-rw-r--r--arch/arm/include/asm/ftrace.h84
-rw-r--r--arch/arm/include/asm/futex.h180
-rw-r--r--arch/arm/include/asm/glue-cache.h161
-rw-r--r--arch/arm/include/asm/glue-df.h99
-rw-r--r--arch/arm/include/asm/glue-pf.h54
-rw-r--r--arch/arm/include/asm/glue-proc.h261
-rw-r--r--arch/arm/include/asm/glue.h22
-rw-r--r--arch/arm/include/asm/hardirq.h12
-rw-r--r--arch/arm/include/asm/hardware/cache-aurora-l2.h100
-rw-r--r--arch/arm/include/asm/hardware/cache-b15-rac.h10
-rw-r--r--arch/arm/include/asm/hardware/cache-feroceon-l2.h9
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h192
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h11
-rw-r--r--arch/arm/include/asm/hardware/cache-uniphier.h21
-rw-r--r--arch/arm/include/asm/hardware/cp14.h534
-rw-r--r--arch/arm/include/asm/hardware/dec21285.h138
-rw-r--r--arch/arm/include/asm/hardware/ioc.h69
-rw-r--r--arch/arm/include/asm/hardware/iomd.h182
-rw-r--r--arch/arm/include/asm/hardware/locomo.h219
-rw-r--r--arch/arm/include/asm/hardware/memc.h23
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h442
-rw-r--r--arch/arm/include/asm/hardware/scoop.h67
-rw-r--r--arch/arm/include/asm/hardware/ssp.h25
-rw-r--r--arch/arm/include/asm/highmem.h78
-rw-r--r--arch/arm/include/asm/hugetlb-3level.h29
-rw-r--r--arch/arm/include/asm/hugetlb.h24
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h145
-rw-r--r--arch/arm/include/asm/hw_irq.h17
-rw-r--r--arch/arm/include/asm/hwcap.h16
-rw-r--r--arch/arm/include/asm/hypervisor.h10
-rw-r--r--arch/arm/include/asm/idmap.h15
-rw-r--r--arch/arm/include/asm/insn.h47
-rw-r--r--arch/arm/include/asm/io.h435
-rw-r--r--arch/arm/include/asm/irq.h47
-rw-r--r--arch/arm/include/asm/irq_work.h14
-rw-r--r--arch/arm/include/asm/irqflags.h187
-rw-r--r--arch/arm/include/asm/jump_label.h49
-rw-r--r--arch/arm/include/asm/kasan.h33
-rw-r--r--arch/arm/include/asm/kasan_def.h81
-rw-r--r--arch/arm/include/asm/kexec-internal.h12
-rw-r--r--arch/arm/include/asm/kexec.h83
-rw-r--r--arch/arm/include/asm/kfence.h53
-rw-r--r--arch/arm/include/asm/kgdb.h107
-rw-r--r--arch/arm/include/asm/kprobes.h80
-rw-r--r--arch/arm/include/asm/krait-l2-accessors.h9
-rw-r--r--arch/arm/include/asm/linkage.h12
-rw-r--r--arch/arm/include/asm/mach/arch.h95
-rw-r--r--arch/arm/include/asm/mach/dma.h46
-rw-r--r--arch/arm/include/asm/mach/flash.h36
-rw-r--r--arch/arm/include/asm/mach/irq.h30
-rw-r--r--arch/arm/include/asm/mach/map.h65
-rw-r--r--arch/arm/include/asm/mach/pci.h86
-rw-r--r--arch/arm/include/asm/mach/sharpsl_param.h33
-rw-r--r--arch/arm/include/asm/mach/time.h13
-rw-r--r--arch/arm/include/asm/mc146818rtc.h31
-rw-r--r--arch/arm/include/asm/mcpm.h338
-rw-r--r--arch/arm/include/asm/mcs_spinlock.h24
-rw-r--r--arch/arm/include/asm/memblock.h10
-rw-r--r--arch/arm/include/asm/memory.h396
-rw-r--r--arch/arm/include/asm/mmu.h49
-rw-r--r--arch/arm/include/asm/mmu_context.h152
-rw-r--r--arch/arm/include/asm/module.h57
-rw-r--r--arch/arm/include/asm/module.lds.h7
-rw-r--r--arch/arm/include/asm/mpu.h133
-rw-r--r--arch/arm/include/asm/mtd-xip.h20
-rw-r--r--arch/arm/include/asm/neon.h33
-rw-r--r--arch/arm/include/asm/nwflash.h9
-rw-r--r--arch/arm/include/asm/opcodes-sec.h17
-rw-r--r--arch/arm/include/asm/opcodes-virt.h26
-rw-r--r--arch/arm/include/asm/opcodes.h233
-rw-r--r--arch/arm/include/asm/outercache.h120
-rw-r--r--arch/arm/include/asm/page-nommu.h34
-rw-r--r--arch/arm/include/asm/page.h195
-rw-r--r--arch/arm/include/asm/paravirt.h22
-rw-r--r--arch/arm/include/asm/paravirt_api_clock.h1
-rw-r--r--arch/arm/include/asm/patch.h18
-rw-r--r--arch/arm/include/asm/pci.h28
-rw-r--r--arch/arm/include/asm/percpu.h70
-rw-r--r--arch/arm/include/asm/perf_event.h25
-rw-r--r--arch/arm/include/asm/pgalloc.h149
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h94
-rw-r--r--arch/arm/include/asm/pgtable-2level-types.h55
-rw-r--r--arch/arm/include/asm/pgtable-2level.h249
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h97
-rw-r--r--arch/arm/include/asm/pgtable-3level-types.h58
-rw-r--r--arch/arm/include/asm/pgtable-3level.h253
-rw-r--r--arch/arm/include/asm/pgtable-hwdef.h16
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h89
-rw-r--r--arch/arm/include/asm/pgtable.h334
-rw-r--r--arch/arm/include/asm/probes.h49
-rw-r--r--arch/arm/include/asm/proc-fns.h189
-rw-r--r--arch/arm/include/asm/processor.h134
-rw-r--r--arch/arm/include/asm/procinfo.h46
-rw-r--r--arch/arm/include/asm/prom.h25
-rw-r--r--arch/arm/include/asm/psci.h18
-rw-r--r--arch/arm/include/asm/ptdump.h41
-rw-r--r--arch/arm/include/asm/ptrace.h200
-rw-r--r--arch/arm/include/asm/seccomp.h11
-rw-r--r--arch/arm/include/asm/sections.h26
-rw-r--r--arch/arm/include/asm/secure_cntvoff.h8
-rw-r--r--arch/arm/include/asm/semihost.h30
-rw-r--r--arch/arm/include/asm/set_memory.h22
-rw-r--r--arch/arm/include/asm/setup.h38
-rw-r--r--arch/arm/include/asm/shmparam.h17
-rw-r--r--arch/arm/include/asm/signal.h30
-rw-r--r--arch/arm/include/asm/simd.h8
-rw-r--r--arch/arm/include/asm/smp.h118
-rw-r--r--arch/arm/include/asm/smp_plat.h120
-rw-r--r--arch/arm/include/asm/smp_scu.h62
-rw-r--r--arch/arm/include/asm/smp_twd.h22
-rw-r--r--arch/arm/include/asm/sparsemem.h26
-rw-r--r--arch/arm/include/asm/spectre.h42
-rw-r--r--arch/arm/include/asm/spinlock.h273
-rw-r--r--arch/arm/include/asm/spinlock_types.h34
-rw-r--r--arch/arm/include/asm/stackprotector.h38
-rw-r--r--arch/arm/include/asm/stacktrace.h53
-rw-r--r--arch/arm/include/asm/string.h68
-rw-r--r--arch/arm/include/asm/suspend.h18
-rw-r--r--arch/arm/include/asm/swab.h39
-rw-r--r--arch/arm/include/asm/switch_to.h35
-rw-r--r--arch/arm/include/asm/sync_bitops.h48
-rw-r--r--arch/arm/include/asm/syscall.h89
-rw-r--r--arch/arm/include/asm/syscalls.h51
-rw-r--r--arch/arm/include/asm/system_info.h30
-rw-r--r--arch/arm/include/asm/system_misc.h42
-rw-r--r--arch/arm/include/asm/tcm.h37
-rw-r--r--arch/arm/include/asm/therm.h29
-rw-r--r--arch/arm/include/asm/thread_info.h180
-rw-r--r--arch/arm/include/asm/thread_notify.h46
-rw-r--r--arch/arm/include/asm/timex.h16
-rw-r--r--arch/arm/include/asm/tlb.h70
-rw-r--r--arch/arm/include/asm/tlbflush.h681
-rw-r--r--arch/arm/include/asm/tls.h143
-rw-r--r--arch/arm/include/asm/topology.h37
-rw-r--r--arch/arm/include/asm/traps.h47
-rw-r--r--arch/arm/include/asm/uaccess-asm.h111
-rw-r--r--arch/arm/include/asm/uaccess.h586
-rw-r--r--arch/arm/include/asm/ucontext.h96
-rw-r--r--arch/arm/include/asm/unified.h53
-rw-r--r--arch/arm/include/asm/unistd.h66
-rw-r--r--arch/arm/include/asm/unwind.h55
-rw-r--r--arch/arm/include/asm/uprobes.h42
-rw-r--r--arch/arm/include/asm/user.h100
-rw-r--r--arch/arm/include/asm/v7m.h98
-rw-r--r--arch/arm/include/asm/vdso.h36
-rw-r--r--arch/arm/include/asm/vdso/clocksource.h8
-rw-r--r--arch/arm/include/asm/vdso/cp15.h38
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h147
-rw-r--r--arch/arm/include/asm/vdso/processor.h22
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h36
-rw-r--r--arch/arm/include/asm/vdso_datapage.h26
-rw-r--r--arch/arm/include/asm/vermagic.h31
-rw-r--r--arch/arm/include/asm/vfp.h107
-rw-r--r--arch/arm/include/asm/vfpmacros.h79
-rw-r--r--arch/arm/include/asm/vga.h14
-rw-r--r--arch/arm/include/asm/virt.h81
-rw-r--r--arch/arm/include/asm/vmalloc.h4
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h171
-rw-r--r--arch/arm/include/asm/word-at-a-time.h99
-rw-r--r--arch/arm/include/asm/xen/events.h30
-rw-r--r--arch/arm/include/asm/xen/hypercall.h1
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h1
-rw-r--r--arch/arm/include/asm/xen/interface.h1
-rw-r--r--arch/arm/include/asm/xen/page.h6
-rw-r--r--arch/arm/include/asm/xen/swiotlb-xen.h1
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h2
-rw-r--r--arch/arm/include/asm/xor.h225
217 files changed, 20184 insertions, 0 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
new file mode 100644
index 0000000000..03657ff8fb
--- /dev/null
+++ b/arch/arm/include/asm/Kbuild
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += early_ioremap.h
+generic-y += extable.h
+generic-y += flat.h
+generic-y += parport.h
+
+generated-y += mach-types.h
+generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
new file mode 100644
index 0000000000..311e83038b
--- /dev/null
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/arch_gicv3.h
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ */
+#ifndef __ASM_ARCH_GICV3_H
+#define __ASM_ARCH_GICV3_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+
+#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
+#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
+#define ICC_IAR1 __ACCESS_CP15(c12, 0, c12, 0)
+#define ICC_SGI1R __ACCESS_CP15_64(0, c12)
+#define ICC_PMR __ACCESS_CP15(c4, 0, c6, 0)
+#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4)
+#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5)
+#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7)
+#define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3)
+#define ICC_RPR __ACCESS_CP15(c12, 0, c11, 3)
+
+#define __ICC_AP0Rx(x) __ACCESS_CP15(c12, 0, c8, 4 | x)
+#define ICC_AP0R0 __ICC_AP0Rx(0)
+#define ICC_AP0R1 __ICC_AP0Rx(1)
+#define ICC_AP0R2 __ICC_AP0Rx(2)
+#define ICC_AP0R3 __ICC_AP0Rx(3)
+
+#define __ICC_AP1Rx(x) __ACCESS_CP15(c12, 0, c9, x)
+#define ICC_AP1R0 __ICC_AP1Rx(0)
+#define ICC_AP1R1 __ICC_AP1Rx(1)
+#define ICC_AP1R2 __ICC_AP1Rx(2)
+#define ICC_AP1R3 __ICC_AP1Rx(3)
+
+#define CPUIF_MAP(a32, a64) \
+static inline void write_ ## a64(u32 val) \
+{ \
+ write_sysreg(val, a32); \
+} \
+static inline u32 read_ ## a64(void) \
+{ \
+ return read_sysreg(a32); \
+} \
+
+CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1)
+CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
+CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
+CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
+CPUIF_MAP(ICC_AP0R2, ICC_AP0R2_EL1)
+CPUIF_MAP(ICC_AP0R3, ICC_AP0R3_EL1)
+CPUIF_MAP(ICC_AP1R0, ICC_AP1R0_EL1)
+CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
+CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
+CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
+
+#define read_gicreg(r) read_##r()
+#define write_gicreg(v, r) write_##r(v)
+
+/* Low-level accessors */
+
+static inline void gic_write_dir(u32 val)
+{
+ write_sysreg(val, ICC_DIR);
+ isb();
+}
+
+static inline u32 gic_read_iar(void)
+{
+ u32 irqstat = read_sysreg(ICC_IAR1);
+
+ dsb(sy);
+
+ return irqstat;
+}
+
+static inline void gic_write_ctlr(u32 val)
+{
+ write_sysreg(val, ICC_CTLR);
+ isb();
+}
+
+static inline u32 gic_read_ctlr(void)
+{
+ return read_sysreg(ICC_CTLR);
+}
+
+static inline void gic_write_grpen1(u32 val)
+{
+ write_sysreg(val, ICC_IGRPEN1);
+ isb();
+}
+
+static inline void gic_write_sgi1r(u64 val)
+{
+ write_sysreg(val, ICC_SGI1R);
+}
+
+static inline u32 gic_read_sre(void)
+{
+ return read_sysreg(ICC_SRE);
+}
+
+static inline void gic_write_sre(u32 val)
+{
+ write_sysreg(val, ICC_SRE);
+ isb();
+}
+
+static inline void gic_write_bpr1(u32 val)
+{
+ write_sysreg(val, ICC_BPR1);
+}
+
+static inline u32 gic_read_pmr(void)
+{
+ return read_sysreg(ICC_PMR);
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+ write_sysreg(val, ICC_PMR);
+}
+
+static inline u32 gic_read_rpr(void)
+{
+ return read_sysreg(ICC_RPR);
+}
+
+/*
+ * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
+ * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
+ * make much sense.
+ * Moreover, 64bit I/O emulation is extremely difficult to implement on
+ * AArch32, since the syndrome register doesn't provide any information for
+ * them.
+ * Consequently, the following IO helpers use 32bit accesses.
+ */
+static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr)
+{
+ writel_relaxed((u32)val, addr);
+ writel_relaxed((u32)(val >> 32), addr + 4);
+}
+
+static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ val = readl_relaxed(addr);
+ val |= (u64)readl_relaxed(addr + 4) << 32;
+ return val;
+}
+
+#define gic_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+
+/*
+ * GICD_IROUTERn, contain the affinity values associated to each interrupt.
+ * The upper-word (aff3) will always be 0, so there is no need for a lock.
+ */
+#define gic_write_irouter(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gic_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_BASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_baser(c) __gic_readq_nonatomic(c)
+#define gits_write_baser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they
+ * won't be being used during any updates and can be changed non-atomically
+ */
+#define gicr_read_propbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_propbaser(v, c) __gic_writeq_nonatomic(v, c)
+#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_xLPIR - only the lower bits are significant
+ */
+#define gic_read_lpir(c) readl_relaxed(c)
+#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
+
+/*
+ * GITS_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gits_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_CBASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_cbaser(c) __gic_readq_nonatomic(c)
+#define gits_write_cbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GITS_CWRITER - hi and lo bits may be accessed independently.
+ */
+#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_VPROPBASER - hi and lo bits may be accessed independently.
+ */
+#define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_VPENDBASER - the Valid bit must be cleared before changing
+ * anything else.
+ */
+static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr + 4);
+ if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
+ tmp &= ~(GICR_VPENDBASER_Valid >> 32);
+ writel_relaxed(tmp, addr + 4);
+ }
+
+ /*
+ * Use the fact that __gic_writeq_nonatomic writes the second
+ * half of the 64bit quantity after the first.
+ */
+ __gic_writeq_nonatomic(val, addr);
+}
+
+#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
+
+static inline bool gic_prio_masking_enabled(void)
+{
+ return false;
+}
+
+static inline void gic_pmr_mask_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
+static inline void gic_arch_enable_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
+static inline bool gic_has_relaxed_pmr_sync(void)
+{
+ return false;
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
new file mode 100644
index 0000000000..bb129b6d23
--- /dev/null
+++ b/arch/arm/include/asm/arch_timer.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMARM_ARCH_TIMER_H
+#define __ASMARM_ARCH_TIMER_H
+
+#include <asm/barrier.h>
+#include <asm/errno.h>
+#include <asm/hwcap.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/types.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+/* 32bit ARM doesn't know anything about timer errata... */
+#define has_erratum_handler(h) (false)
+#define erratum_handler(h) (arch_timer_##h)
+
+int arch_timer_arch_init(void);
+
+/*
+ * These register accessors are marked inline so the compiler can
+ * nicely work out which register we want, and chuck away the rest of
+ * the code. At least it does so with a recent GCC (4.6.3).
+ */
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
+{
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" ((u32)val));
+ isb();
+ break;
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 2, %Q0, %R0, c14" : : "r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" ((u32)val));
+ isb();
+ break;
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 3, %Q0, %R0, c14" : : "r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else {
+ BUILD_BUG();
+ }
+}
+
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
+{
+ u32 val = 0;
+
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else {
+ BUILD_BUG();
+ }
+
+ return val;
+}
+
+static inline u32 arch_timer_get_cntfrq(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline u64 __arch_counter_get_cntpct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
+}
+
+static inline u64 __arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static inline u64 __arch_counter_get_cntvct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
+}
+
+static inline u64 __arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
+static inline u32 arch_timer_get_cntkctl(void)
+{
+ u32 cntkctl;
+ asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
+ return cntkctl;
+}
+
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
+ asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
+ isb();
+}
+
+static inline void arch_timer_set_evtstrm_feature(void)
+{
+ elf_hwcap |= HWCAP_EVTSTRM;
+}
+
+static inline bool arch_timer_have_evtstrm_feature(void)
+{
+ return elf_hwcap & HWCAP_EVTSTRM;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/archrandom.h b/arch/arm/include/asm/archrandom.h
new file mode 100644
index 0000000000..cc4714eb1a
--- /dev/null
+++ b/arch/arm/include/asm/archrandom.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+static inline bool __init smccc_probe_trng(void)
+{
+ return false;
+}
+
+#include <asm-generic/archrandom.h>
+
+#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm/include/asm/arm-cci.h b/arch/arm/include/asm/arm-cci.h
new file mode 100644
index 0000000000..7537bd7906
--- /dev/null
+++ b/arch/arm/include/asm/arm-cci.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/arm-cci.h
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ */
+
+#ifndef __ASM_ARM_CCI_H
+#define __ASM_ARM_CCI_H
+
+#ifdef CONFIG_MCPM
+#include <asm/mcpm.h>
+
+/*
+ * We don't have a reliable way of detecting whether,
+ * if we have access to secure-only registers, unless
+ * mcpm is registered.
+ */
+static inline bool platform_has_secure_cci_access(void)
+{
+ return mcpm_is_available();
+}
+
+#else
+static inline bool platform_has_secure_cci_access(void)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
new file mode 100644
index 0000000000..a41b503b7d
--- /dev/null
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ASM_PMUV3_H
+#define __ASM_PMUV3_H
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+
+#define PMCCNTR __ACCESS_CP15_64(0, c9)
+
+#define PMCR __ACCESS_CP15(c9, 0, c12, 0)
+#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1)
+#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2)
+#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3)
+#define PMSELR __ACCESS_CP15(c9, 0, c12, 5)
+#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6)
+#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7)
+#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1)
+#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2)
+#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
+#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
+#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
+#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
+#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
+#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
+#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
+
+#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0)
+#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1)
+#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2)
+#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3)
+#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4)
+#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5)
+#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6)
+#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7)
+#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0)
+#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1)
+#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2)
+#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3)
+#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4)
+#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5)
+#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6)
+#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7)
+#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0)
+#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1)
+#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2)
+#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3)
+#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4)
+#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5)
+#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6)
+#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7)
+#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0)
+#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1)
+#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2)
+#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3)
+#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4)
+#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5)
+#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6)
+
+#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0)
+#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1)
+#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2)
+#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3)
+#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4)
+#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5)
+#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6)
+#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7)
+#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0)
+#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1)
+#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2)
+#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3)
+#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4)
+#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5)
+#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6)
+#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7)
+#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0)
+#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1)
+#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2)
+#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3)
+#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4)
+#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5)
+#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6)
+#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7)
+#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0)
+#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1)
+#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2)
+#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3)
+#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4)
+#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5)
+#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(PMEVCNTR##n)
+static inline unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, PMEVCNTR##n)
+static inline void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, PMEVTYPER##n)
+static inline void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline unsigned long read_pmmir(void)
+{
+ return read_sysreg(PMMIR);
+}
+
+static inline u32 read_pmuver(void)
+{
+ /* PMUVers is not a signed field */
+ u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0);
+
+ return (dfr0 >> 24) & 0xf;
+}
+
+static inline void write_pmcr(u32 val)
+{
+ write_sysreg(val, PMCR);
+}
+
+static inline u32 read_pmcr(void)
+{
+ return read_sysreg(PMCR);
+}
+
+static inline void write_pmselr(u32 val)
+{
+ write_sysreg(val, PMSELR);
+}
+
+static inline void write_pmccntr(u64 val)
+{
+ write_sysreg(val, PMCCNTR);
+}
+
+static inline u64 read_pmccntr(void)
+{
+ return read_sysreg(PMCCNTR);
+}
+
+static inline void write_pmcntenset(u32 val)
+{
+ write_sysreg(val, PMCNTENSET);
+}
+
+static inline void write_pmcntenclr(u32 val)
+{
+ write_sysreg(val, PMCNTENCLR);
+}
+
+static inline void write_pmintenset(u32 val)
+{
+ write_sysreg(val, PMINTENSET);
+}
+
+static inline void write_pmintenclr(u32 val)
+{
+ write_sysreg(val, PMINTENCLR);
+}
+
+static inline void write_pmccfiltr(u32 val)
+{
+ write_sysreg(val, PMCCFILTR);
+}
+
+static inline void write_pmovsclr(u32 val)
+{
+ write_sysreg(val, PMOVSR);
+}
+
+static inline u32 read_pmovsclr(void)
+{
+ return read_sysreg(PMOVSR);
+}
+
+static inline void write_pmuserenr(u32 val)
+{
+ write_sysreg(val, PMUSERENR);
+}
+
+static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
+static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+{
+ return false;
+}
+
+static inline bool kvm_set_pmuserenr(u64 val)
+{
+ return false;
+}
+
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+/* PMU Version in DFR Register */
+#define ARMV8_PMU_DFR_VER_NI 0
+#define ARMV8_PMU_DFR_VER_V3P1 0x4
+#define ARMV8_PMU_DFR_VER_V3P4 0x5
+#define ARMV8_PMU_DFR_VER_V3P5 0x6
+#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
+
+static inline bool pmuv3_implemented(int pmuver)
+{
+ return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF ||
+ pmuver == ARMV8_PMU_DFR_VER_NI);
+}
+
+static inline bool is_pmuv3p4(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P4;
+}
+
+static inline bool is_pmuv3p5(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+}
+
+static inline u64 read_pmceid0(void)
+{
+ u64 val = read_sysreg(PMCEID0);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID2) << 32;
+
+ return val;
+}
+
+static inline u64 read_pmceid1(void)
+{
+ u64 val = read_sysreg(PMCEID1);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID3) << 32;
+
+ return val;
+}
+
+#endif
diff --git a/arch/arm/include/asm/asm-offsets.h b/arch/arm/include/asm/asm-offsets.h
new file mode 100644
index 0000000000..d370ee36a1
--- /dev/null
+++ b/arch/arm/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
new file mode 100644
index 0000000000..aebe2c8f6a
--- /dev/null
+++ b/arch/arm/include/asm/assembler.h
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/assembler.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ *
+ * This file contains arm architecture specific defines
+ * for the different processors.
+ *
+ * Do not include any C declarations in this file - it is included by
+ * assembler source.
+ */
+#ifndef __ASM_ASSEMBLER_H__
+#define __ASM_ASSEMBLER_H__
+
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+#include <asm/ptrace.h>
+#include <asm/opcodes-virt.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
+
+#define IOMEM(x) (x)
+
+/*
+ * Endian independent macros for shifting bytes within registers.
+ */
+#ifndef __ARMEB__
+#define lspull lsr
+#define lspush lsl
+#define get_byte_0 lsl #0
+#define get_byte_1 lsr #8
+#define get_byte_2 lsr #16
+#define get_byte_3 lsr #24
+#define put_byte_0 lsl #0
+#define put_byte_1 lsl #8
+#define put_byte_2 lsl #16
+#define put_byte_3 lsl #24
+#else
+#define lspull lsl
+#define lspush lsr
+#define get_byte_0 lsr #24
+#define get_byte_1 lsr #16
+#define get_byte_2 lsr #8
+#define get_byte_3 lsl #0
+#define put_byte_0 lsl #24
+#define put_byte_1 lsl #16
+#define put_byte_2 lsl #8
+#define put_byte_3 lsl #0
+#endif
+
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
+/*
+ * Data preload for architectures that support it
+ */
+#if __LINUX_ARM_ARCH__ >= 5
+#define PLD(code...) code
+#else
+#define PLD(code...)
+#endif
+
+/*
+ * This can be used to enable code to cacheline align the destination
+ * pointer when bulk writing to memory. Experiments on StrongARM and
+ * XScale didn't show this a worthwhile thing to do when the cache is not
+ * set to write-allocate (this would need further testing on XScale when WA
+ * is used).
+ *
+ * On Feroceon there is much to gain however, regardless of cache mode.
+ */
+#ifdef CONFIG_CPU_FEROCEON
+#define CALGN(code...) code
+#else
+#define CALGN(code...)
+#endif
+
+#define IMM12_MASK 0xfff
+
+/* the frame pointer used for stack unwinding */
+ARM( fpreg .req r11 )
+THUMB( fpreg .req r7 )
+
+/*
+ * Enable and disable interrupts
+ */
+#if __LINUX_ARM_ARCH__ >= 6
+ .macro disable_irq_notrace
+ cpsid i
+ .endm
+
+ .macro enable_irq_notrace
+ cpsie i
+ .endm
+#else
+ .macro disable_irq_notrace
+ msr cpsr_c, #PSR_I_BIT | SVC_MODE
+ .endm
+
+ .macro enable_irq_notrace
+ msr cpsr_c, #SVC_MODE
+ .endm
+#endif
+
+#if __LINUX_ARM_ARCH__ < 7
+ .macro dsb, args
+ mcr p15, 0, r0, c7, c10, 4
+ .endm
+
+ .macro isb, args
+ mcr p15, 0, r0, c7, c5, 4
+ .endm
+#endif
+
+ .macro asm_trace_hardirqs_off, save=1
+#if defined(CONFIG_TRACE_IRQFLAGS)
+ .if \save
+ stmdb sp!, {r0-r3, ip, lr}
+ .endif
+ bl trace_hardirqs_off
+ .if \save
+ ldmia sp!, {r0-r3, ip, lr}
+ .endif
+#endif
+ .endm
+
+ .macro asm_trace_hardirqs_on, cond=al, save=1
+#if defined(CONFIG_TRACE_IRQFLAGS)
+ /*
+ * actually the registers should be pushed and pop'd conditionally, but
+ * after bl the flags are certainly clobbered
+ */
+ .if \save
+ stmdb sp!, {r0-r3, ip, lr}
+ .endif
+ bl\cond trace_hardirqs_on
+ .if \save
+ ldmia sp!, {r0-r3, ip, lr}
+ .endif
+#endif
+ .endm
+
+ .macro disable_irq, save=1
+ disable_irq_notrace
+ asm_trace_hardirqs_off \save
+ .endm
+
+ .macro enable_irq
+ asm_trace_hardirqs_on
+ enable_irq_notrace
+ .endm
+/*
+ * Save the current IRQ state and disable IRQs. Note that this macro
+ * assumes FIQs are enabled, and that the processor is in SVC mode.
+ */
+ .macro save_and_disable_irqs, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ mrs \oldcpsr, primask
+#else
+ mrs \oldcpsr, cpsr
+#endif
+ disable_irq
+ .endm
+
+ .macro save_and_disable_irqs_notrace, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ mrs \oldcpsr, primask
+#else
+ mrs \oldcpsr, cpsr
+#endif
+ disable_irq_notrace
+ .endm
+
+/*
+ * Restore interrupt state previously stored in a register. We don't
+ * guarantee that this will preserve the flags.
+ */
+ .macro restore_irqs_notrace, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ msr primask, \oldcpsr
+#else
+ msr cpsr_c, \oldcpsr
+#endif
+ .endm
+
+ .macro restore_irqs, oldcpsr
+ tst \oldcpsr, #PSR_I_BIT
+ asm_trace_hardirqs_on cond=eq
+ restore_irqs_notrace \oldcpsr
+ .endm
+
+/*
+ * Assembly version of "adr rd, BSYM(sym)". This should only be used to
+ * reference local symbols in the same assembly file which are to be
+ * resolved by the assembler. Other usage is undefined.
+ */
+ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+ .macro badr\c, rd, sym
+#ifdef CONFIG_THUMB2_KERNEL
+ adr\c \rd, \sym + 1
+#else
+ adr\c \rd, \sym
+#endif
+ .endm
+ .endr
+
+/*
+ * Get current thread_info.
+ */
+ .macro get_thread_info, rd
+ /* thread_info is the first member of struct task_struct */
+ get_current \rd
+ .endm
+
+/*
+ * Increment/decrement the preempt count.
+ */
+#ifdef CONFIG_PREEMPT_COUNT
+ .macro inc_preempt_count, ti, tmp
+ ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
+ add \tmp, \tmp, #1 @ increment it
+ str \tmp, [\ti, #TI_PREEMPT]
+ .endm
+
+ .macro dec_preempt_count, ti, tmp
+ ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
+ sub \tmp, \tmp, #1 @ decrement it
+ str \tmp, [\ti, #TI_PREEMPT]
+ .endm
+#else
+ .macro inc_preempt_count, ti, tmp
+ .endm
+
+ .macro dec_preempt_count, ti, tmp
+ .endm
+#endif
+
+#define USERL(l, x...) \
+9999: x; \
+ .pushsection __ex_table,"a"; \
+ .align 3; \
+ .long 9999b,l; \
+ .popsection
+
+#define USER(x...) USERL(9001f, x)
+
+#ifdef CONFIG_SMP
+#define ALT_SMP(instr...) \
+9998: instr
+/*
+ * Note: if you get assembler errors from ALT_UP() when building with
+ * CONFIG_THUMB2_KERNEL, you almost certainly need to use
+ * ALT_SMP( W(instr) ... )
+ */
+#define ALT_UP(instr...) \
+ .pushsection ".alt.smp.init", "a" ;\
+ .align 2 ;\
+ .long 9998b - . ;\
+9997: instr ;\
+ .if . - 9997b == 2 ;\
+ nop ;\
+ .endif ;\
+ .if . - 9997b != 4 ;\
+ .error "ALT_UP() content must assemble to exactly 4 bytes";\
+ .endif ;\
+ .popsection
+#define ALT_UP_B(label) \
+ .pushsection ".alt.smp.init", "a" ;\
+ .align 2 ;\
+ .long 9998b - . ;\
+ W(b) . + (label - 9998b) ;\
+ .popsection
+#else
+#define ALT_SMP(instr...)
+#define ALT_UP(instr...) instr
+#define ALT_UP_B(label) b label
+#endif
+
+ /*
+ * this_cpu_offset - load the per-CPU offset of this CPU into
+ * register 'rd'
+ */
+ .macro this_cpu_offset, rd:req
+#ifdef CONFIG_SMP
+ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L1_\@)
+.L0_\@:
+ .subsection 1
+.L1_\@: ldr_va \rd, __per_cpu_offset
+ b .L0_\@
+ .previous
+#endif
+#else
+ mov \rd, #0
+#endif
+ .endm
+
+ /*
+ * set_current - store the task pointer of this CPU's current task
+ */
+ .macro set_current, rn:req, tmp:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: str_va \rn, __current, \tmp
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ str_va \rn, __current, \tmp
+#endif
+ .endm
+
+ /*
+ * get_current - load the task pointer of this CPU's current task
+ */
+ .macro get_current, rd:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: ldr_va \rd, __current
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ ldr_va \rd, __current
+#endif
+ .endm
+
+ /*
+ * reload_current - reload the task pointer of this CPU's current task
+ * into the TLS register
+ */
+ .macro reload_current, t1:req, t2:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+#endif
+ ldr_this_cpu \t1, __entry_task, \t1, \t2
+ mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
+.L0_\@:
+#endif
+ .endm
+
+/*
+ * Instruction barrier
+ */
+ .macro instr_sync
+#if __LINUX_ARM_ARCH__ >= 7
+ isb
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c5, 4
+#endif
+ .endm
+
+/*
+ * SMP data memory barrier
+ */
+ .macro smp_dmb mode
+#ifdef CONFIG_SMP
+#if __LINUX_ARM_ARCH__ >= 7
+ .ifeqs "\mode","arm"
+ ALT_SMP(dmb ish)
+ .else
+ ALT_SMP(W(dmb) ish)
+ .endif
+#elif __LINUX_ARM_ARCH__ == 6
+ ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
+#else
+#error Incompatible SMP platform
+#endif
+ .ifeqs "\mode","arm"
+ ALT_UP(nop)
+ .else
+ ALT_UP(W(nop))
+ .endif
+#endif
+ .endm
+
+/*
+ * Raw SMP data memory barrier
+ */
+ .macro __smp_dmb mode
+#if __LINUX_ARM_ARCH__ >= 7
+ .ifeqs "\mode","arm"
+ dmb ish
+ .else
+ W(dmb) ish
+ .endif
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#else
+ .error "Incompatible SMP platform"
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_V7M)
+ /*
+ * setmode is used to assert to be in svc mode during boot. For v7-M
+ * this is done in __v7m_setup, so setmode can be empty here.
+ */
+ .macro setmode, mode, reg
+ .endm
+#elif defined(CONFIG_THUMB2_KERNEL)
+ .macro setmode, mode, reg
+ mov \reg, #\mode
+ msr cpsr_c, \reg
+ .endm
+#else
+ .macro setmode, mode, reg
+ msr cpsr_c, #\mode
+ .endm
+#endif
+
+/*
+ * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
+ * a scratch register for the macro to overwrite.
+ *
+ * This macro is intended for forcing the CPU into SVC mode at boot time.
+ * you cannot return to the original mode.
+ */
+.macro safe_svcmode_maskall reg:req
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
+ mrs \reg , cpsr
+ eor \reg, \reg, #HYP_MODE
+ tst \reg, #MODE_MASK
+ bic \reg , \reg , #MODE_MASK
+ orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
+THUMB( orr \reg , \reg , #PSR_T_BIT )
+ bne 1f
+ orr \reg, \reg, #PSR_A_BIT
+ badr lr, 2f
+ msr spsr_cxsf, \reg
+ __MSR_ELR_HYP(14)
+ __ERET
+1: msr cpsr_c, \reg
+2:
+#else
+/*
+ * workaround for possibly broken pre-v6 hardware
+ * (akita, Sharp Zaurus C-1000, PXA270-based)
+ */
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
+#endif
+.endm
+
+/*
+ * STRT/LDRT access macros with ARM and Thumb-2 variants
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+
+ .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
+9999:
+ .if \inc == 1
+ \instr\()b\t\cond\().w \reg, [\ptr, #\off]
+ .elseif \inc == 4
+ \instr\t\cond\().w \reg, [\ptr, #\off]
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .pushsection __ex_table,"a"
+ .align 3
+ .long 9999b, \abort
+ .popsection
+ .endm
+
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort
+ @ explicit IT instruction needed because of the label
+ @ introduced by the USER macro
+ .ifnc \cond,al
+ .if \rept == 1
+ itt \cond
+ .elseif \rept == 2
+ ittt \cond
+ .else
+ .error "Unsupported rept macro argument"
+ .endif
+ .endif
+
+ @ Slightly optimised to avoid incrementing the pointer twice
+ usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
+ .if \rept == 2
+ usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
+ .endif
+
+ add\cond \ptr, #\rept * \inc
+ .endm
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
+ .rept \rept
+9999:
+ .if \inc == 1
+ \instr\()b\t\cond \reg, [\ptr], #\inc
+ .elseif \inc == 4
+ \instr\t\cond \reg, [\ptr], #\inc
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .pushsection __ex_table,"a"
+ .align 3
+ .long 9999b, \abort
+ .popsection
+ .endr
+ .endm
+
+#endif /* CONFIG_THUMB2_KERNEL */
+
+ .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
+ usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
+ usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+/* Utility macro for declaring string literals */
+ .macro string name:req, string
+ .type \name , #object
+\name:
+ .asciz "\string"
+ .size \name , . - \name
+ .endm
+
+ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+ .macro ret\c, reg
+#if __LINUX_ARM_ARCH__ < 6
+ mov\c pc, \reg
+#else
+ .ifeqs "\reg", "lr"
+ bx\c \reg
+ .else
+ mov\c pc, \reg
+ .endif
+#endif
+ .endm
+ .endr
+
+ .macro ret.w, reg
+ ret \reg
+#ifdef CONFIG_THUMB2_KERNEL
+ nop
+#endif
+ .endm
+
+ .macro bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1: .inst 0xde02
+#else
+1: .inst 0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ .pushsection .rodata.str, "aMS", %progbits, 1
+2: .asciz "\msg"
+ .popsection
+ .pushsection __bug_table, "aw"
+ .align 2
+ .word 1b, 2b
+ .hword \line
+ .popsection
+#endif
+ .endm
+
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist", "aw" ; \
+ .balign 4 ; \
+ .long entry; \
+ .popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
+ .macro __adldst_l, op, reg, sym, tmp, c
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\c \tmp, .La\@
+ .subsection 1
+ .align 2
+.La\@: .long \sym - .Lpc\@
+ .previous
+ .else
+ .ifnb \c
+ THUMB( ittt \c )
+ .endif
+ movw\c \tmp, #:lower16:\sym - .Lpc\@
+ movt\c \tmp, #:upper16:\sym - .Lpc\@
+ .endif
+
+#ifndef CONFIG_THUMB2_KERNEL
+ .set .Lpc\@, . + 8 // PC bias
+ .ifc \op, add
+ add\c \reg, \tmp, pc
+ .else
+ \op\c \reg, [pc, \tmp]
+ .endif
+#else
+.Lb\@: add\c \tmp, \tmp, pc
+ /*
+ * In Thumb-2 builds, the PC bias depends on whether we are currently
+ * emitting into a .arm or a .thumb section. The size of the add opcode
+ * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
+ * emitting in ARM mode, so let's use this to account for the bias.
+ */
+ .set .Lpc\@, . + (. - .Lb\@)
+
+ .ifnc \op, add
+ \op\c \reg, [\tmp]
+ .endif
+#endif
+ .endm
+
+ /*
+ * mov_l - move a constant value or [relocated] address into a register
+ */
+ .macro mov_l, dst:req, imm:req, cond
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\cond \dst, =\imm
+ .else
+ movw\cond \dst, #:lower16:\imm
+ movt\cond \dst, #:upper16:\imm
+ .endif
+ .endm
+
+ /*
+ * adr_l - adr pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro adr_l, dst:req, sym:req, cond
+ __adldst_l add, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * ldr_l - ldr <literal> pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro ldr_l, dst:req, sym:req, cond
+ __adldst_l ldr, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * str_l - str <literal> pseudo-op with unlimited range
+ *
+ * @src: source register
+ * @sym: name of the symbol
+ * @tmp: mandatory scratch register
+ * @cond: conditional opcode suffix
+ */
+ .macro str_l, src:req, sym:req, tmp:req, cond
+ __adldst_l str, \src, \sym, \tmp, \cond
+ .endm
+
+ .macro __ldst_va, op, reg, tmp, sym, cond, offset
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ mov_l \tmp, \sym, \cond
+#else
+ /*
+ * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
+ * with the appropriate relocations. The combined sequence has a range
+ * of -/+ 256 MiB, which should be sufficient for the core kernel and
+ * for modules loaded into the module region.
+ */
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+.L0_\@: sub\cond \tmp, pc, #8 - \offset
+.L1_\@: sub\cond \tmp, \tmp, #4 - \offset
+.L2_\@:
+#endif
+ \op\cond \reg, [\tmp, #\offset]
+ .endm
+
+ /*
+ * ldr_va - load a 32-bit word from the virtual address of \sym
+ */
+ .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
+ .ifnb \tmp
+ __ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
+ .else
+ __ldst_va ldr, \rd, \rd, \sym, \cond, \offset
+ .endif
+ .endm
+
+ /*
+ * str_va - store a 32-bit word to the virtual address of \sym
+ */
+ .macro str_va, rn:req, sym:req, tmp:req, cond
+ __ldst_va str, \rn, \tmp, \sym, \cond, 0
+ .endm
+
+ /*
+ * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
+ * without using a temp register. Supported in ARM mode
+ * only.
+ */
+ .macro ldr_this_cpu_armv6, rd:req, sym:req
+ this_cpu_offset \rd
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+ add \rd, \rd, pc
+.L0_\@: sub \rd, \rd, #4
+.L1_\@: sub \rd, \rd, #0
+.L2_\@: ldr \rd, [\rd, #4]
+ .endm
+
+ /*
+ * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
+ * into register 'rd', which may be the stack pointer,
+ * using 't1' and 't2' as general temp registers. These
+ * are permitted to overlap with 'rd' if != sp
+ */
+ .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
+#ifndef CONFIG_SMP
+ ldr_va \rd, \sym, tmp=\t1
+#elif __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ this_cpu_offset \t1
+ mov_l \t2, \sym
+ ldr \rd, [\t1, \t2]
+#else
+ ldr_this_cpu_armv6 \rd, \sym
+#endif
+ .endm
+
+ /*
+ * rev_l - byte-swap a 32-bit value
+ *
+ * @val: source/destination register
+ * @tmp: scratch register
+ */
+ .macro rev_l, val:req, tmp:req
+ .if __LINUX_ARM_ARCH__ < 6
+ eor \tmp, \val, \val, ror #16
+ bic \tmp, \tmp, #0x00ff0000
+ mov \val, \val, ror #8
+ eor \val, \val, \tmp, lsr #8
+ .else
+ rev \val, \val
+ .endif
+ .endm
+
+ .if __LINUX_ARM_ARCH__ < 6
+ .set .Lrev_l_uses_tmp, 1
+ .else
+ .set .Lrev_l_uses_tmp, 0
+ .endif
+
+ /*
+ * bl_r - branch and link to register
+ *
+ * @dst: target to branch to
+ * @c: conditional opcode suffix
+ */
+ .macro bl_r, dst:req, c
+ .if __LINUX_ARM_ARCH__ < 6
+ mov\c lr, pc
+ mov\c pc, \dst
+ .else
+ blx\c \dst
+ .endif
+ .endm
+
+#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
new file mode 100644
index 0000000000..f0e3b01afa
--- /dev/null
+++ b/arch/arm/include/asm/atomic.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ */
+#ifndef __ASM_ARM_ATOMIC_H
+#define __ASM_ARM_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <linux/prefetch.h>
+#include <linux/types.h>
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+
+#ifdef __KERNEL__
+
+/*
+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+/*
+ * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
+ * store exclusive to ensure that these are atomic. We may loop
+ * to ensure that the update happens.
+ */
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+ __asm__ __volatile__("@ atomic_" #op "\n" \
+"1: ldrex %0, [%3]\n" \
+" " #asm_op " %0, %0, %4\n" \
+" strex %1, %0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic_" #op "_return\n" \
+"1: ldrex %0, [%3]\n" \
+" " #asm_op " %0, %0, %4\n" \
+" strex %1, %0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result, val; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
+"1: ldrex %0, [%4]\n" \
+" " #asm_op " %1, %0, %5\n" \
+" strex %2, %1, [%4]\n" \
+" teq %2, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
+{
+ int oldval;
+ unsigned long res;
+
+ prefetchw(&ptr->counter);
+
+ do {
+ __asm__ __volatile__("@ atomic_cmpxchg\n"
+ "ldrex %1, [%3]\n"
+ "mov %0, #0\n"
+ "teq %1, %4\n"
+ "strexeq %0, %5, [%3]\n"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
+ : "cc");
+ } while (res);
+
+ return oldval;
+}
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int oldval, newval;
+ unsigned long tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+"1: ldrex %0, [%4]\n"
+" teq %0, %5\n"
+" beq 2f\n"
+" add %1, %0, %6\n"
+" strex %2, %1, [%4]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+
+ if (oldval != u)
+ smp_mb();
+
+ return oldval;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+#else /* ARM_ARCH_6 */
+
+#ifdef CONFIG_SMP
+#error SMP not supported on pre-ARMv6 CPUs
+#endif
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int val; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ val = v->counter; \
+ raw_local_irq_restore(flags); \
+ \
+ return val; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int val; \
+ \
+ raw_local_irq_save(flags); \
+ val = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ \
+ return val; \
+}
+
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ raw_local_irq_restore(flags);
+
+ return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+
+#endif /* __LINUX_ARM_ARCH__ */
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#define arch_atomic_andnot arch_atomic_andnot
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, orr)
+ATOMIC_OPS(xor, ^=, eor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+typedef struct {
+ s64 counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+#ifdef CONFIG_ARM_LPAE
+static inline s64 arch_atomic64_read(const atomic64_t *v)
+{
+ s64 result;
+
+ __asm__ __volatile__("@ atomic64_read\n"
+" ldrd %0, %H0, [%1]"
+ : "=&r" (result)
+ : "r" (&v->counter), "Qo" (v->counter)
+ );
+
+ return result;
+}
+
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
+{
+ __asm__ __volatile__("@ atomic64_set\n"
+" strd %2, %H2, [%1]"
+ : "=Qo" (v->counter)
+ : "r" (&v->counter), "r" (i)
+ );
+}
+#else
+static inline s64 arch_atomic64_read(const atomic64_t *v)
+{
+ s64 result;
+
+ __asm__ __volatile__("@ atomic64_read\n"
+" ldrexd %0, %H0, [%1]"
+ : "=&r" (result)
+ : "r" (&v->counter), "Qo" (v->counter)
+ );
+
+ return result;
+}
+
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
+{
+ s64 tmp;
+
+ prefetchw(&v->counter);
+ __asm__ __volatile__("@ atomic64_set\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" strexd %0, %3, %H3, [%2]\n"
+" teq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp), "=Qo" (v->counter)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+}
+#endif
+
+#define ATOMIC64_OP(op, op1, op2) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
+{ \
+ s64 result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ __asm__ __volatile__("@ atomic64_" #op "\n" \
+"1: ldrexd %0, %H0, [%3]\n" \
+" " #op1 " %Q0, %Q0, %Q4\n" \
+" " #op2 " %R0, %R0, %R4\n" \
+" strexd %1, %0, %H0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, op1, op2) \
+static inline s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
+{ \
+ s64 result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic64_" #op "_return\n" \
+"1: ldrexd %0, %H0, [%3]\n" \
+" " #op1 " %Q0, %Q0, %Q4\n" \
+" " #op2 " %R0, %R0, %R4\n" \
+" strexd %1, %0, %H0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
+#define ATOMIC64_FETCH_OP(op, op1, op2) \
+static inline s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
+{ \
+ s64 result, val; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
+"1: ldrexd %0, %H0, [%4]\n" \
+" " #op1 " %Q1, %Q0, %Q5\n" \
+" " #op2 " %R1, %R0, %R5\n" \
+" strexd %2, %1, %H1, [%4]\n" \
+" teq %2, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_OP_RETURN(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
+
+ATOMIC64_OPS(add, adds, adc)
+ATOMIC64_OPS(sub, subs, sbc)
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
+
+#define arch_atomic64_andnot arch_atomic64_andnot
+
+ATOMIC64_OPS(and, and, and)
+ATOMIC64_OPS(andnot, bic, bic)
+ATOMIC64_OPS(or, orr, orr)
+ATOMIC64_OPS(xor, eor, eor)
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
+{
+ s64 oldval;
+ unsigned long res;
+
+ prefetchw(&ptr->counter);
+
+ do {
+ __asm__ __volatile__("@ atomic64_cmpxchg\n"
+ "ldrexd %1, %H1, [%3]\n"
+ "mov %0, #0\n"
+ "teq %1, %4\n"
+ "teqeq %H1, %H4\n"
+ "strexdeq %0, %5, %H5, [%3]"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+ : "r" (&ptr->counter), "r" (old), "r" (new)
+ : "cc");
+ } while (res);
+
+ return oldval;
+}
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
+
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
+{
+ s64 result;
+ unsigned long tmp;
+
+ prefetchw(&ptr->counter);
+
+ __asm__ __volatile__("@ atomic64_xchg\n"
+"1: ldrexd %0, %H0, [%3]\n"
+" strexd %1, %4, %H4, [%3]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
+ : "r" (&ptr->counter), "r" (new)
+ : "cc");
+
+ return result;
+}
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
+
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 result;
+ unsigned long tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+"1: ldrexd %0, %H0, [%3]\n"
+" subs %Q0, %Q0, #1\n"
+" sbc %R0, %R0, #0\n"
+" teq %R0, #0\n"
+" bmi 2f\n"
+" strexd %1, %0, %H0, [%3]\n"
+" teq %1, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 oldval, newval;
+ unsigned long tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_add_unless\n"
+"1: ldrexd %0, %H0, [%4]\n"
+" teq %0, %5\n"
+" teqeq %H0, %H5\n"
+" beq 2f\n"
+" adds %Q1, %Q0, %Q6\n"
+" adc %R1, %R0, %R6\n"
+" strexd %2, %1, %H1, [%4]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+
+ if (oldval != u)
+ smp_mb();
+
+ return oldval;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+
+#endif /* !CONFIG_GENERIC_ATOMIC64 */
+#endif
+#endif
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
new file mode 100644
index 0000000000..fbd388c462
--- /dev/null
+++ b/arch/arm/include/asm/auxvec.h
@@ -0,0 +1 @@
+#include <uapi/asm/auxvec.h>
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 0000000000..45a75d9381
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+ return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
new file mode 100644
index 0000000000..83ae97c049
--- /dev/null
+++ b/arch/arm/include/asm/barrier.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
+
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
+#define sev() __asm__ __volatile__ ("sev" : : : "memory")
+#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
+#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define wfe() do { } while (0)
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
+#ifdef CONFIG_THUMB2_KERNEL
+#define CSDB ".inst.w 0xf3af8014"
+#else
+#define CSDB ".inst 0xe320f014"
+#endif
+#define csdb() __asm__ __volatile__(CSDB : : : "memory")
+#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+ : : "r" (0) : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+ : : "r" (0) : "memory")
+#elif defined(CONFIG_CPU_FA526)
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+ : : "r" (0) : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+#else
+#define isb(x) __asm__ __volatile__ ("" : : : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+#endif
+
+#ifndef CSDB
+#define CSDB
+#endif
+#ifndef csdb
+#define csdb()
+#endif
+
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
+#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+#define mb() __arm_heavy_mb()
+#define rmb() dsb()
+#define wmb() __arm_heavy_mb(st)
+#define dma_rmb() dmb(osh)
+#define dma_wmb() dmb(oshst)
+#else
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
+#endif
+
+#define __smp_mb() dmb(ish)
+#define __smp_rmb() __smp_mb()
+#define __smp_wmb() dmb(ishst)
+
+#ifdef CONFIG_CPU_SPECTRE
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+ unsigned long sz)
+{
+ unsigned long mask;
+
+ asm volatile(
+ "cmp %1, %2\n"
+ " sbc %0, %1, %1\n"
+ CSDB
+ : "=r" (mask)
+ : "r" (idx), "Ir" (sz)
+ : "cc");
+
+ return mask;
+}
+#define array_index_mask_nospec array_index_mask_nospec
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
new file mode 100644
index 0000000000..714440fa2f
--- /dev/null
+++ b/arch/arm/include/asm/bitops.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 1995, Russell King.
+ * Various bits and pieces copyrights include:
+ * Linus Torvalds (test_bit).
+ * Big endian support: Copyright 2001, Nicolas Pitre
+ * reworked by rmk.
+ *
+ * bit 0 is the LSB of an "unsigned long" quantity.
+ *
+ * Please note that the code in this file should never be included
+ * from user space. Many of these are not implemented in assembler
+ * since they would be too costly. Also, they require privileged
+ * instructions (which are not available from user mode) to ensure
+ * that they are atomic.
+ */
+
+#ifndef __ASM_ARM_BITOPS_H
+#define __ASM_ARM_BITOPS_H
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
+
+/*
+ * These functions are the basis of our bit ops.
+ *
+ * First, the atomic bitops. These use native endian.
+ */
+static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long mask = BIT_MASK(bit);
+
+ p += BIT_WORD(bit);
+
+ raw_local_irq_save(flags);
+ *p |= mask;
+ raw_local_irq_restore(flags);
+}
+
+static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long mask = BIT_MASK(bit);
+
+ p += BIT_WORD(bit);
+
+ raw_local_irq_save(flags);
+ *p &= ~mask;
+ raw_local_irq_restore(flags);
+}
+
+static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long mask = BIT_MASK(bit);
+
+ p += BIT_WORD(bit);
+
+ raw_local_irq_save(flags);
+ *p ^= mask;
+ raw_local_irq_restore(flags);
+}
+
+static inline int
+____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned int res;
+ unsigned long mask = BIT_MASK(bit);
+
+ p += BIT_WORD(bit);
+
+ raw_local_irq_save(flags);
+ res = *p;
+ *p = res | mask;
+ raw_local_irq_restore(flags);
+
+ return (res & mask) != 0;
+}
+
+static inline int
+____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned int res;
+ unsigned long mask = BIT_MASK(bit);
+
+ p += BIT_WORD(bit);
+
+ raw_local_irq_save(flags);
+ res = *p;
+ *p = res & ~mask;
+ raw_local_irq_restore(flags);
+
+ return (res & mask) != 0;
+}
+
+static inline int
+____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned int res;
+ unsigned long mask = BIT_MASK(bit);
+
+ p += BIT_WORD(bit);
+
+ raw_local_irq_save(flags);
+ res = *p;
+ *p = res ^ mask;
+ raw_local_irq_restore(flags);
+
+ return (res & mask) != 0;
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+/*
+ * A note about Endian-ness.
+ * -------------------------
+ *
+ * When the ARM is put into big endian mode via CR15, the processor
+ * merely swaps the order of bytes within words, thus:
+ *
+ * ------------ physical data bus bits -----------
+ * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0
+ * little byte 3 byte 2 byte 1 byte 0
+ * big byte 0 byte 1 byte 2 byte 3
+ *
+ * This means that reading a 32-bit word at address 0 returns the same
+ * value irrespective of the endian mode bit.
+ *
+ * Peripheral devices should be connected with the data bus reversed in
+ * "Big Endian" mode. ARM Application Note 61 is applicable, and is
+ * available from http://www.arm.com/.
+ *
+ * The following assumes that the data bus connectivity for big endian
+ * mode has been followed.
+ *
+ * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
+ */
+
+/*
+ * Native endian assembly bitops. nr = 0 -> word 0 bit 0.
+ */
+extern void _set_bit(int nr, volatile unsigned long * p);
+extern void _clear_bit(int nr, volatile unsigned long * p);
+extern void _change_bit(int nr, volatile unsigned long * p);
+extern int _test_and_set_bit(int nr, volatile unsigned long * p);
+extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
+extern int _test_and_change_bit(int nr, volatile unsigned long * p);
+
+/*
+ * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
+ */
+unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
+
+/*
+ * Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
+ */
+unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_be(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
+
+#ifndef CONFIG_SMP
+/*
+ * The __* form of bitops are non-atomic and may be reordered.
+ */
+#define ATOMIC_BITOP(name,nr,p) \
+ (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
+#else
+#define ATOMIC_BITOP(name,nr,p) _##name(nr,p)
+#endif
+
+/*
+ * Native endian atomic definitions.
+ */
+#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)
+#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p)
+#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p)
+#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
+#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
+#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)
+
+#ifndef __ARMEB__
+/*
+ * These are the little endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
+#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
+#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
+#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off)
+
+#else
+/*
+ * These are the big endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
+#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
+#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
+#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off)
+
+#endif
+
+#if __LINUX_ARM_ARCH__ < 5
+
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/ffs.h>
+
+#else
+
+/*
+ * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
+ * and produce optimal inlined code in all cases. On ARMv7 it is even
+ * better by also using the rbit instruction.
+ */
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
+
+#endif
+
+#include <asm-generic/bitops/ffz.h>
+
+#include <asm-generic/bitops/fls64.h>
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#ifdef __ARMEB__
+
+static inline int find_first_zero_bit_le(const void *p, unsigned size)
+{
+ return _find_first_zero_bit_le(p, size);
+}
+#define find_first_zero_bit_le find_first_zero_bit_le
+
+static inline int find_next_zero_bit_le(const void *p, int size, int offset)
+{
+ return _find_next_zero_bit_le(p, size, offset);
+}
+#define find_next_zero_bit_le find_next_zero_bit_le
+
+static inline int find_next_bit_le(const void *p, int size, int offset)
+{
+ return _find_next_bit_le(p, size, offset);
+}
+#define find_next_bit_le find_next_bit_le
+
+#endif
+
+#include <asm-generic/bitops/le.h>
+
+/*
+ * Ext2 is defined to use little-endian byte ordering.
+ */
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ARM_BITOPS_H */
diff --git a/arch/arm/include/asm/bitrev.h b/arch/arm/include/asm/bitrev.h
new file mode 100644
index 0000000000..84ad8dde62
--- /dev/null
+++ b/arch/arm/include/asm/bitrev.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BITREV_H
+#define __ASM_BITREV_H
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ __asm__ ("rbit %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ return __arch_bitrev32((u32)x) >> 16;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ return __arch_bitrev32((u32)x) >> 24;
+}
+
+#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
new file mode 100644
index 0000000000..ba8d9d7d24
--- /dev/null
+++ b/arch/arm/include/asm/bug.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASMARM_BUG_H
+#define _ASMARM_BUG_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/opcodes.h>
+
+/*
+ * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
+ * We need to be careful not to conflict with those used by other modules and
+ * the register_undef_hook() system.
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+#define BUG_INSTR_VALUE 0xde02
+#define BUG_INSTR(__value) __inst_thumb16(__value)
+#else
+#define BUG_INSTR_VALUE 0xe7f001f2
+#define BUG_INSTR(__value) __inst_arm(__value)
+#endif
+
+
+#define BUG() _BUG(__FILE__, __LINE__, BUG_INSTR_VALUE)
+#define _BUG(file, line, value) __BUG(file, line, value)
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+/*
+ * The extra indirection is to ensure that the __FILE__ string comes through
+ * OK. Many version of gcc do not support the asm %c parameter which would be
+ * preferable to this unpleasantness. We use mergeable string sections to
+ * avoid multiple copies of the string appearing in the kernel image.
+ */
+
+#define __BUG(__file, __line, __value) \
+do { \
+ asm volatile("1:\t" BUG_INSTR(__value) "\n" \
+ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
+ "2:\t.asciz " #__file "\n" \
+ ".popsection\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
+ ".align 2\n" \
+ "3:\t.word 1b, 2b\n" \
+ "\t.hword " #__line ", 0\n" \
+ ".popsection"); \
+ unreachable(); \
+} while (0)
+
+#else
+
+#define __BUG(__file, __line, __value) \
+do { \
+ asm volatile(BUG_INSTR(__value) "\n"); \
+ unreachable(); \
+} while (0)
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+void die(const char *msg, struct pt_regs *regs, int err);
+
+void arm_notify_die(const char *str, struct pt_regs *regs,
+ int signo, int si_code, void __user *addr,
+ unsigned long err, unsigned long trap);
+
+#ifdef CONFIG_ARM_LPAE
+#define FAULT_CODE_ALIGNMENT 33
+#define FAULT_CODE_DEBUG 34
+#else
+#define FAULT_CODE_ALIGNMENT 1
+#define FAULT_CODE_DEBUG 2
+#endif
+
+void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
+ struct pt_regs *),
+ int sig, int code, const char *name);
+
+void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
+ struct pt_regs *),
+ int sig, int code, const char *name);
+
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
+ const char *loglvl);
+
+struct mm_struct;
+void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
+extern void __show_regs(struct pt_regs *);
+extern void __show_regs_alloc_free(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
new file mode 100644
index 0000000000..fe385551ed
--- /dev/null
+++ b/arch/arm/include/asm/bugs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 1995-2003 Russell King
+ */
+#ifndef __ASM_BUGS_H
+#define __ASM_BUGS_H
+
+extern void check_writebuffer_bugs(void);
+
+#ifdef CONFIG_MMU
+extern void check_other_bugs(void);
+#else
+#define check_other_bugs() do { } while (0)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
new file mode 100644
index 0000000000..e3ea34558a
--- /dev/null
+++ b/arch/arm/include/asm/cache.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/cache.h
+ */
+#ifndef __ASMARM_CACHE_H
+#define __ASMARM_CACHE_H
+
+#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+ * unrelated code may cause parts of the buffer to be read into the
+ * cache before the transfer is done, causing old data to be seen by
+ * the CPU.
+ */
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
+ */
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
+#define __read_mostly __section(".data..read_mostly")
+
+#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
new file mode 100644
index 0000000000..f6181f6957
--- /dev/null
+++ b/arch/arm/include/asm/cacheflush.h
@@ -0,0 +1,474 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/cacheflush.h
+ *
+ * Copyright (C) 1999-2002 Russell King
+ */
+#ifndef _ASMARM_CACHEFLUSH_H
+#define _ASMARM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+#include <asm/glue-cache.h>
+#include <asm/shmparam.h>
+#include <asm/cachetype.h>
+#include <asm/outercache.h>
+
+#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
+
+/*
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
+ */
+#define PG_dcache_clean PG_arch_1
+
+/*
+ * MM Cache Management
+ * ===================
+ *
+ * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
+ * implement these methods.
+ *
+ * Start addresses are inclusive and end addresses are exclusive;
+ * start addresses should be rounded down, end addresses up.
+ *
+ * See Documentation/core-api/cachetlb.rst for more information.
+ * Please note that the implementation of these, and the required
+ * effects are cache-type (VIVT/VIPT/PIPT) specific.
+ *
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ * Currently only needed for cache-v6.S and cache-v7.S, see
+ * __flush_icache_all for the generic implementation.
+ *
+ * flush_kern_all()
+ *
+ * Unconditionally clean and invalidate the entire cache.
+ *
+ * flush_kern_louis()
+ *
+ * Flush data cache levels up to the level of unification
+ * inner shareable and invalidate the I-cache.
+ * Only needed from v7 onwards, falls back to flush_cache_all()
+ * for all other processor versions.
+ *
+ * flush_user_all()
+ *
+ * Clean and invalidate all user space cache entries
+ * before a change of page tables.
+ *
+ * flush_user_range(start, end, flags)
+ *
+ * Clean and invalidate a range of cache entries in the
+ * specified address space before a change of page tables.
+ * - start - user start address (inclusive, page aligned)
+ * - end - user end address (exclusive, page aligned)
+ * - flags - vma->vm_flags field
+ *
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * coherent_user_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * flush_kern_dcache_area(kaddr, size)
+ *
+ * Ensure that the data held in page is written back.
+ * - kaddr - page address
+ * - size - region size
+ *
+ * DMA Cache Coherency
+ * ===================
+ *
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+
+struct cpu_cache_fns {
+ void (*flush_icache_all)(void);
+ void (*flush_kern_all)(void);
+ void (*flush_kern_louis)(void);
+ void (*flush_user_all)(void);
+ void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
+
+ void (*coherent_kern_range)(unsigned long, unsigned long);
+ int (*coherent_user_range)(unsigned long, unsigned long);
+ void (*flush_kern_dcache_area)(void *, size_t);
+
+ void (*dma_map_area)(const void *, size_t, int);
+ void (*dma_unmap_area)(const void *, size_t, int);
+
+ void (*dma_flush_range)(const void *, const void *);
+} __no_randomize_layout;
+
+/*
+ * Select the calling method
+ */
+#ifdef MULTI_CACHE
+
+extern struct cpu_cache_fns cpu_cache;
+
+#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
+#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
+#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
+#define __cpuc_flush_user_all cpu_cache.flush_user_all
+#define __cpuc_flush_user_range cpu_cache.flush_user_range
+#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
+#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
+#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_flush_range cpu_cache.dma_flush_range
+
+#else
+
+extern void __cpuc_flush_icache_all(void);
+extern void __cpuc_flush_kern_all(void);
+extern void __cpuc_flush_kern_louis(void);
+extern void __cpuc_flush_user_all(void);
+extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
+extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
+extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern void __cpuc_flush_dcache_area(void *, size_t);
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_flush_range(const void *, const void *);
+
+#endif
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+ */
+extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ unsigned long, void *, const void *, unsigned long);
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ } while (0)
+
+/*
+ * Convert calls to our calling convention.
+ */
+
+/* Invalidate I-cache */
+#define __flush_icache_all_generic() \
+ asm("mcr p15, 0, %0, c7, c5, 0" \
+ : : "r" (0));
+
+/* Invalidate I-cache inner shareable */
+#define __flush_icache_all_v7_smp() \
+ asm("mcr p15, 0, %0, c7, c1, 0" \
+ : : "r" (0));
+
+/*
+ * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
+ * will fall through to use __flush_icache_all_generic.
+ */
+#if (defined(CONFIG_CPU_V7) && \
+ (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
+ defined(CONFIG_SMP_ON_UP)
+#define __flush_icache_preferred __cpuc_flush_icache_all
+#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
+#define __flush_icache_preferred __flush_icache_all_v7_smp
+#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
+#define __flush_icache_preferred __cpuc_flush_icache_all
+#else
+#define __flush_icache_preferred __flush_icache_all_generic
+#endif
+
+static inline void __flush_icache_all(void)
+{
+ __flush_icache_preferred();
+ dsb(ishst);
+}
+
+/*
+ * Flush caches up to Level of Unification Inner Shareable
+ */
+#define flush_cache_louis() __cpuc_flush_kern_louis()
+
+#define flush_cache_all() __cpuc_flush_kern_all()
+
+static inline void vivt_flush_cache_mm(struct mm_struct *mm)
+{
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+ __cpuc_flush_user_all();
+}
+
+static inline void
+vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+ __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
+ vma->vm_flags);
+}
+
+static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn, unsigned int nr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
+ unsigned long addr = user_addr & PAGE_MASK;
+ __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
+ vma->vm_flags);
+ }
+}
+
+#ifndef CONFIG_CPU_CACHE_VIPT
+#define flush_cache_mm(mm) \
+ vivt_flush_cache_mm(mm)
+#define flush_cache_range(vma,start,end) \
+ vivt_flush_cache_range(vma,start,end)
+#define flush_cache_pages(vma, addr, pfn, nr) \
+ vivt_flush_cache_pages(vma, addr, pfn, nr)
+#else
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn, unsigned int nr);
+#endif
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
+
+/*
+ * flush_icache_user_range is used when we want to ensure that the
+ * Harvard caches are synchronised for the user space address range.
+ * This is used for the ARM private sys_cacheflush system call.
+ */
+#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
+
+/*
+ * Perform necessary cache operations to ensure that data previously
+ * stored within this range of addresses can be executed by the CPU.
+ */
+#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
+
+/*
+ * Perform necessary cache operations to ensure that the TLB will
+ * see data written in the specified area.
+ */
+#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
+
+/*
+ * flush_dcache_page is used when the kernel has written to the page
+ * cache page at virtual address page->virtual.
+ *
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
+ *
+ * Otherwise we can defer the operation, and clean the cache when we are
+ * about to change to user space. This is the same method as used on SPARC64.
+ * See update_mmu_cache for the user space part.
+ */
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
+
+#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
+static inline void flush_kernel_vmap_range(void *addr, int size)
+{
+ if ((cache_is_vivt() || cache_is_vipt_aliasing()))
+ __cpuc_flush_dcache_area(addr, (size_t)size);
+}
+static inline void invalidate_kernel_vmap_range(void *addr, int size)
+{
+ if ((cache_is_vivt() || cache_is_vipt_aliasing()))
+ __cpuc_flush_dcache_area(addr, (size_t)size);
+}
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vmaddr)
+{
+ extern void __flush_anon_page(struct vm_area_struct *vma,
+ struct page *, unsigned long);
+ if (PageAnon(page))
+ __flush_anon_page(vma, page, vmaddr);
+}
+
+#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+
+/*
+ * flush_cache_vmap() is used when creating mappings (eg, via vmap,
+ * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
+ * caches, since the direct-mappings of these pages may contain cached
+ * data, we need to do a full cache flush to ensure that writebacks
+ * don't corrupt data placed into these pages via the new mappings.
+ */
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (!cache_is_vipt_nonaliasing())
+ flush_cache_all();
+ else
+ /*
+ * set_pte_at() called from vmap_pte_range() does not
+ * have a DSB after cleaning the cache line.
+ */
+ dsb(ishst);
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ if (!cache_is_vipt_nonaliasing())
+ flush_cache_all();
+}
+
+/*
+ * Memory synchronization helpers for mixed cached vs non cached accesses.
+ *
+ * Some synchronization algorithms have to set states in memory with the
+ * cache enabled or disabled depending on the code path. It is crucial
+ * to always ensure proper cache maintenance to update main memory right
+ * away in that case.
+ *
+ * Any cached write must be followed by a cache clean operation.
+ * Any cached read must be preceded by a cache invalidate operation.
+ * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
+ * operation is needed to avoid discarding possible concurrent writes to the
+ * accessed memory.
+ *
+ * Also, in order to prevent a cached writer from interfering with an
+ * adjacent non-cached writer, each state variable must be located to
+ * a separate cache line.
+ */
+
+/*
+ * This needs to be >= the max cache writeback size of all
+ * supported platforms included in the current kernel configuration.
+ * This is used to align state variables to their own cache lines.
+ */
+#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
+#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
+
+/*
+ * There is no __cpuc_clean_dcache_area but we use it anyway for
+ * code intent clarity, and alias it to __cpuc_flush_dcache_area.
+ */
+#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
+
+/*
+ * Ensure preceding writes to *p by this CPU are visible to
+ * subsequent reads by other CPUs:
+ */
+static inline void __sync_cache_range_w(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+ __cpuc_clean_dcache_area(_p, size);
+ outer_clean_range(__pa(_p), __pa(_p + size));
+}
+
+/*
+ * Ensure preceding writes to *p by other CPUs are visible to
+ * subsequent reads by this CPU. We must be careful not to
+ * discard data simultaneously written by another CPU, hence the
+ * usage of flush rather than invalidate operations.
+ */
+static inline void __sync_cache_range_r(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+#ifdef CONFIG_OUTER_CACHE
+ if (outer_cache.flush_range) {
+ /*
+ * Ensure dirty data migrated from other CPUs into our cache
+ * are cleaned out safely before the outer cache is cleaned:
+ */
+ __cpuc_clean_dcache_area(_p, size);
+
+ /* Clean and invalidate stale data for *p from outer ... */
+ outer_flush_range(__pa(_p), __pa(_p + size));
+ }
+#endif
+
+ /* ... and inner cache: */
+ __cpuc_flush_dcache_area(_p, size);
+}
+
+#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
+#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ ".arch armv7-a \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb" \
+ : : : "r0","r1","r2","r3","r4","r5","r6", \
+ "r9","r10","ip","lr","memory" )
+
+void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
+ void *kaddr, unsigned long len);
+
+
+#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
+void check_cpu_icache_size(int cpuid);
+#else
+static inline void check_cpu_icache_size(int cpuid) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
new file mode 100644
index 0000000000..e8c30430be
--- /dev/null
+++ b/arch/arm/include/asm/cachetype.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_CACHETYPE_H
+#define __ASM_ARM_CACHETYPE_H
+
+#define CACHEID_VIVT (1 << 0)
+#define CACHEID_VIPT_NONALIASING (1 << 1)
+#define CACHEID_VIPT_ALIASING (1 << 2)
+#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
+#define CACHEID_ASID_TAGGED (1 << 3)
+#define CACHEID_VIPT_I_ALIASING (1 << 4)
+#define CACHEID_PIPT (1 << 5)
+
+extern unsigned int cacheid;
+
+#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
+#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
+#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
+#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
+#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
+#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
+#define icache_is_pipt() cacheid_is(CACHEID_PIPT)
+
+/*
+ * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
+ * Mask out support which will never be present on newer CPUs.
+ * - v6+ is never VIVT
+ * - v7+ VIPT never aliases on D-side
+ */
+#if __LINUX_ARM_ARCH__ >= 7
+#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
+ CACHEID_ASID_TAGGED |\
+ CACHEID_VIPT_I_ALIASING |\
+ CACHEID_PIPT)
+#elif __LINUX_ARM_ARCH__ >= 6
+#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
+#else
+#define __CACHEID_ARCH_MIN (~0)
+#endif
+
+/*
+ * Mask out support which isn't configured
+ */
+#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS (CACHEID_VIVT)
+#define __CACHEID_NEVER (~CACHEID_VIVT)
+#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS (0)
+#define __CACHEID_NEVER (CACHEID_VIVT)
+#else
+#define __CACHEID_ALWAYS (0)
+#define __CACHEID_NEVER (0)
+#endif
+
+static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
+{
+ return (__CACHEID_ALWAYS & mask) |
+ (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
+}
+
+#define CSSELR_ICACHE 1
+#define CSSELR_DCACHE 0
+
+#define CSSELR_L1 (0 << 1)
+#define CSSELR_L2 (1 << 1)
+#define CSSELR_L3 (2 << 1)
+#define CSSELR_L4 (3 << 1)
+#define CSSELR_L5 (4 << 1)
+#define CSSELR_L6 (5 << 1)
+#define CSSELR_L7 (6 << 1)
+
+#ifndef CONFIG_CPU_V7M
+static inline void set_csselr(unsigned int cache_selector)
+{
+ asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (cache_selector));
+}
+
+static inline unsigned int read_ccsidr(void)
+{
+ unsigned int val;
+
+ asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
+ return val;
+}
+#else /* CONFIG_CPU_V7M */
+#include <linux/io.h>
+#include "asm/v7m.h"
+
+static inline void set_csselr(unsigned int cache_selector)
+{
+ writel(cache_selector, BASEADDR_V7M_SCB + V7M_SCB_CTR);
+}
+
+static inline unsigned int read_ccsidr(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CCSIDR);
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
new file mode 100644
index 0000000000..d8a13959bf
--- /dev/null
+++ b/arch/arm/include/asm/checksum.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/checksum.h
+ *
+ * IP checksum routines
+ *
+ * Copyright (C) Original authors of ../asm-i386/checksum.h
+ * Copyright (C) 1996-1999 Russell King
+ */
+#ifndef __ASM_ARM_CHECKSUM_H
+#define __ASM_ARM_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <linux/uaccess.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len);
+
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+
+ return csum_partial_copy_from_user(src, dst, len);
+}
+
+/*
+ * Fold a partial checksum without adding pseudo headers
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ __asm__(
+ "add %0, %1, %1, ror #16 @ csum_fold"
+ : "=r" (sum)
+ : "r" (sum)
+ : "cc");
+ return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int tmp1;
+ __wsum sum;
+
+ __asm__ __volatile__(
+ "ldr %0, [%1], #4 @ ip_fast_csum \n\
+ ldr %3, [%1], #4 \n\
+ sub %2, %2, #5 \n\
+ adds %0, %0, %3 \n\
+ ldr %3, [%1], #4 \n\
+ adcs %0, %0, %3 \n\
+ ldr %3, [%1], #4 \n\
+1: adcs %0, %0, %3 \n\
+ ldr %3, [%1], #4 \n\
+ tst %2, #15 @ do this carefully \n\
+ subne %2, %2, #1 @ without destroying \n\
+ bne 1b @ the carry flag \n\
+ adcs %0, %0, %3 \n\
+ adc %0, %0, #0"
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
+ : "1" (iph), "2" (ihl)
+ : "cc", "memory");
+ return csum_fold(sum);
+}
+
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ u32 lenprot = len + proto;
+ if (__builtin_constant_p(sum) && sum == 0) {
+ __asm__(
+ "adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
+#ifdef __ARMEB__
+ "adcs %0, %0, %3 \n\t"
+#else
+ "adcs %0, %0, %3, ror #8 \n\t"
+#endif
+ "adc %0, %0, #0"
+ : "=&r" (sum)
+ : "r" (daddr), "r" (saddr), "r" (lenprot)
+ : "cc");
+ } else {
+ __asm__(
+ "adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
+ "adcs %0, %0, %3 \n\t"
+#ifdef __ARMEB__
+ "adcs %0, %0, %4 \n\t"
+#else
+ "adcs %0, %0, %4, ror #8 \n\t"
+#endif
+ "adc %0, %0, #0"
+ : "=&r"(sum)
+ : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
+ : "cc");
+ }
+ return sum;
+}
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16
+ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+extern __wsum
+__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
+ __be32 proto, __wsum sum);
+
+static inline __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
+ htonl(proto), sum));
+}
+#endif
diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h
new file mode 100644
index 0000000000..13651c731a
--- /dev/null
+++ b/arch/arm/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* _ASM_CLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
new file mode 100644
index 0000000000..44667bdb47
--- /dev/null
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_CMPXCHG_H
+#define __ASM_ARM_CMPXCHG_H
+
+#include <linux/irqflags.h>
+#include <linux/prefetch.h>
+#include <asm/barrier.h>
+
+#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
+/*
+ * On the StrongARM, "swp" is terminally broken since it bypasses the
+ * cache totally. This means that the cache becomes inconsistent, and,
+ * since we use normal loads/stores as well, this is really bad.
+ * Typically, this causes oopsen in filp_close, but could have other,
+ * more disastrous effects. There are two work-arounds:
+ * 1. Disable interrupts and emulate the atomic swap
+ * 2. Clean the cache, perform atomic swap, flush the cache
+ *
+ * We choose (1) since its the "easiest" to achieve here and is not
+ * dependent on the processor type.
+ *
+ * NOTE that this solution won't work on an SMP system, so explcitly
+ * forbid it here.
+ */
+#define swp_is_buggy
+#endif
+
+static inline unsigned long
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
+{
+ extern void __bad_xchg(volatile void *, int);
+ unsigned long ret;
+#ifdef swp_is_buggy
+ unsigned long flags;
+#endif
+#if __LINUX_ARM_ARCH__ >= 6
+ unsigned int tmp;
+#endif
+
+ prefetchw((const void *)ptr);
+
+ switch (size) {
+#if __LINUX_ARM_ARCH__ >= 6
+#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
+ case 1:
+ asm volatile("@ __xchg1\n"
+ "1: ldrexb %0, [%3]\n"
+ " strexb %1, %2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ case 2:
+ asm volatile("@ __xchg2\n"
+ "1: ldrexh %0, [%3]\n"
+ " strexh %1, %2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+#endif
+ case 4:
+ asm volatile("@ __xchg4\n"
+ "1: ldrex %0, [%3]\n"
+ " strex %1, %2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+#elif defined(swp_is_buggy)
+#ifdef CONFIG_SMP
+#error SMP is not supported on this platform
+#endif
+ case 1:
+ raw_local_irq_save(flags);
+ ret = *(volatile unsigned char *)ptr;
+ *(volatile unsigned char *)ptr = x;
+ raw_local_irq_restore(flags);
+ break;
+
+ case 4:
+ raw_local_irq_save(flags);
+ ret = *(volatile unsigned long *)ptr;
+ *(volatile unsigned long *)ptr = x;
+ raw_local_irq_restore(flags);
+ break;
+#else
+ case 1:
+ asm volatile("@ __xchg1\n"
+ " swpb %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ case 4:
+ asm volatile("@ __xchg4\n"
+ " swp %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+#endif
+ default:
+ /* Cause a link-time error, the xchg() size is not supported */
+ __bad_xchg(ptr, size), ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+#define arch_xchg_relaxed(ptr, x) ({ \
+ (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
+})
+
+#include <asm-generic/cmpxchg-local.h>
+
+#if __LINUX_ARM_ARCH__ < 6
+/* min ARCH < ARMv6 */
+
+#ifdef CONFIG_SMP
+#error "SMP is not supported on this platform"
+#endif
+
+#define arch_xchg arch_xchg_relaxed
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+
+#include <asm-generic/cmpxchg.h>
+
+#else /* min ARCH >= ARMv6 */
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+/*
+ * cmpxchg only support 32-bits operands on ARMv6.
+ */
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long oldval, res;
+
+ prefetchw((const void *)ptr);
+
+ switch (size) {
+#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
+ case 1:
+ do {
+ asm volatile("@ __cmpxchg1\n"
+ " ldrexb %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexbeq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+ case 2:
+ do {
+ asm volatile("@ __cmpxchg1\n"
+ " ldrexh %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexheq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+#endif
+ case 4:
+ do {
+ asm volatile("@ __cmpxchg4\n"
+ " ldrex %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexeq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+ default:
+ __bad_cmpxchg(ptr, size);
+ oldval = 0;
+ }
+
+ return oldval;
+}
+
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
+ (__typeof__(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
+ case 1:
+ case 2:
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
+ break;
+#endif
+ default:
+ ret = __cmpxchg(ptr, old, new, size);
+ }
+
+ return ret;
+}
+
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__cmpxchg_local((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long oldval;
+ unsigned long res;
+
+ prefetchw(ptr);
+
+ __asm__ __volatile__(
+"1: ldrexd %1, %H1, [%3]\n"
+" teq %1, %4\n"
+" teqeq %H1, %H4\n"
+" bne 2f\n"
+" strexd %0, %5, %H5, [%3]\n"
+" teq %0, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (old), "r" (new)
+ : "cc");
+
+ return oldval;
+}
+
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
+ (__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)); \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#endif /* __ASM_ARM_CMPXCHG_H */
diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h
new file mode 100644
index 0000000000..5e94e67d10
--- /dev/null
+++ b/arch/arm/include/asm/compiler.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_COMPILER_H
+#define __ASM_ARM_COMPILER_H
+
+/*
+ * This is used to ensure the compiler did actually allocate the register we
+ * asked it for some inline assembly sequences. Apparently we can't trust
+ * the compiler from one version to another so a bit of paranoia won't hurt.
+ * This string is meant to be concatenated with the inline asm string and
+ * will cause compilation to stop on mismatch.
+ * (for details, see gcc PR 15089)
+ * For compatibility with clang, we have to specifically take the equivalence
+ * of 'r11' <-> 'fp' and 'r12' <-> 'ip' into account as well.
+ */
+#define __asmeq(x, y) \
+ ".ifnc " x "," y "; " \
+ ".ifnc " x y ",fpr11; " \
+ ".ifnc " x y ",r11fp; " \
+ ".ifnc " x y ",ipr12; " \
+ ".ifnc " x y ",r12ip; " \
+ ".err; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif\n\t"
+
+
+#endif /* __ASM_ARM_COMPILER_H */
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
new file mode 100644
index 0000000000..a54230e656
--- /dev/null
+++ b/arch/arm/include/asm/cp15.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_CP15_H
+#define __ASM_ARM_CP15_H
+
+#include <asm/barrier.h>
+
+/*
+ * CR1 bits (CP#15 CR1)
+ */
+#define CR_M (1 << 0) /* MMU enable */
+#define CR_A (1 << 1) /* Alignment abort enable */
+#define CR_C (1 << 2) /* Dcache enable */
+#define CR_W (1 << 3) /* Write buffer enable */
+#define CR_P (1 << 4) /* 32-bit exception handler */
+#define CR_D (1 << 5) /* 32-bit data address range */
+#define CR_L (1 << 6) /* Implementation defined */
+#define CR_B (1 << 7) /* Big endian */
+#define CR_S (1 << 8) /* System MMU protection */
+#define CR_R (1 << 9) /* ROM MMU protection */
+#define CR_F (1 << 10) /* Implementation defined */
+#define CR_Z (1 << 11) /* Implementation defined */
+#define CR_I (1 << 12) /* Icache enable */
+#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
+#define CR_RR (1 << 14) /* Round Robin cache replacement */
+#define CR_L4 (1 << 15) /* LDR pc can set T bit */
+#define CR_DT (1 << 16)
+#ifdef CONFIG_MMU
+#define CR_HA (1 << 17) /* Hardware management of Access Flag */
+#else
+#define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */
+#endif
+#define CR_IT (1 << 18)
+#define CR_ST (1 << 19)
+#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
+#define CR_U (1 << 22) /* Unaligned access operation */
+#define CR_XP (1 << 23) /* Extended page tables */
+#define CR_VE (1 << 24) /* Vectored interrupts */
+#define CR_EE (1 << 25) /* Exception (Big) Endian */
+#define CR_TRE (1 << 28) /* TEX remap enable */
+#define CR_AFE (1 << 29) /* Access flag enable */
+#define CR_TE (1 << 30) /* Thumb exception enable */
+
+#ifndef __ASSEMBLY__
+
+#if __LINUX_ARM_ARCH__ >= 4
+#define vectors_high() (get_cr() & CR_V)
+#else
+#define vectors_high() (0)
+#endif
+
+#ifdef CONFIG_CPU_CP15
+
+#include <asm/vdso/cp15.h>
+
+extern unsigned long cr_alignment; /* defined in entry-armv.S */
+
+static inline unsigned long get_cr(void)
+{
+ unsigned long val;
+ asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_cr(unsigned long val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
+ : : "r" (val) : "cc");
+ isb();
+}
+
+static inline unsigned int get_auxcr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val));
+ return val;
+}
+
+static inline void set_auxcr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
+ : : "r" (val));
+ isb();
+}
+
+#define CPACC_FULL(n) (3 << (n * 2))
+#define CPACC_SVC(n) (1 << (n * 2))
+#define CPACC_DISABLE(n) (0 << (n * 2))
+
+static inline unsigned int get_copro_access(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
+ : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_copro_access(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
+ : : "r" (val) : "cc");
+ isb();
+}
+
+#else /* ifdef CONFIG_CPU_CP15 */
+
+/*
+ * cr_alignment is tightly coupled to cp15 (at least in the minds of the
+ * developers). Yielding 0 for machines without a cp15 (and making it
+ * read-only) is fine for most cases and saves quite some #ifdeffery.
+ */
+#define cr_alignment UL(0)
+
+static inline unsigned long get_cr(void)
+{
+ return 0;
+}
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#endif /* ifndef __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
new file mode 100644
index 0000000000..bd6fdb4b92
--- /dev/null
+++ b/arch/arm/include/asm/cpu.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/cpu.h
+ *
+ * Copyright (C) 2004-2005 ARM Ltd.
+ */
+#ifndef __ASM_ARM_CPU_H
+#define __ASM_ARM_CPU_H
+
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+
+struct cpuinfo_arm {
+ struct cpu cpu;
+ u32 cpuid;
+#ifdef CONFIG_SMP
+ unsigned int loops_per_jiffy;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_arm, cpu_data);
+
+#endif
diff --git a/arch/arm/include/asm/cpufeature.h b/arch/arm/include/asm/cpufeature.h
new file mode 100644
index 0000000000..16c161b3ff
--- /dev/null
+++ b/arch/arm/include/asm/cpufeature.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <linux/log2.h>
+#include <asm/hwcap.h>
+
+/*
+ * Due to the fact that ELF_HWCAP is a 32-bit type on ARM, and given the number
+ * of optional CPU features it defines, ARM's CPU hardware capability bits have
+ * been distributed over separate elf_hwcap and elf_hwcap2 variables, each of
+ * which covers a subset of the available CPU features.
+ *
+ * Currently, only a few of those are suitable for automatic module loading
+ * (which is the primary use case of this facility) and those happen to be all
+ * covered by HWCAP2. So let's only cover those via the cpu_feature()
+ * convenience macro for now (which is used by module_cpu_feature_match()).
+ * However, all capabilities are exposed via the modalias, and can be matched
+ * using an explicit MODULE_DEVICE_TABLE() that uses __hwcap_feature() directly.
+ */
+#define MAX_CPU_FEATURES 64
+#define __hwcap_feature(x) ilog2(HWCAP_ ## x)
+#define __hwcap2_feature(x) (32 + ilog2(HWCAP2_ ## x))
+#define cpu_feature(x) __hwcap2_feature(x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return num < 32 ? elf_hwcap & BIT(num) : elf_hwcap2 & BIT(num - 32);
+}
+
+#endif
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
new file mode 100644
index 0000000000..397be5ed30
--- /dev/null
+++ b/arch/arm/include/asm/cpuidle.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_CPUIDLE_H
+#define __ASM_ARM_CPUIDLE_H
+
+#include <asm/proc-fns.h>
+
+#ifdef CONFIG_CPU_IDLE
+extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
+#else
+static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
+#endif
+
+/* Common ARM WFI state */
+#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\
+ .enter = arm_cpuidle_simple_enter,\
+ .exit_latency = 1,\
+ .target_residency = 1,\
+ .power_usage = p,\
+ .name = "WFI",\
+ .desc = "ARM WFI",\
+}
+
+/*
+ * in case power_specified == 1, give a default WFI power value needed
+ * by some governors
+ */
+#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
+
+struct device_node;
+
+struct cpuidle_ops {
+ int (*suspend)(unsigned long arg);
+ int (*init)(struct device_node *, int cpu);
+};
+
+struct of_cpuidle_method {
+ const char *method;
+ const struct cpuidle_ops *ops;
+};
+
+#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
+
+extern int arm_cpuidle_suspend(int index);
+
+extern int arm_cpuidle_init(int cpu);
+
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+
+#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
new file mode 100644
index 0000000000..0163c3e78a
--- /dev/null
+++ b/arch/arm/include/asm/cputype.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_CPUTYPE_H
+#define __ASM_ARM_CPUTYPE_H
+
+#define CPUID_ID 0
+#define CPUID_CACHETYPE 1
+#define CPUID_TCM 2
+#define CPUID_TLBTYPE 3
+#define CPUID_MPUIR 4
+#define CPUID_MPIDR 5
+#define CPUID_REVIDR 6
+
+#ifdef CONFIG_CPU_V7M
+#define CPUID_EXT_PFR0 0x40
+#define CPUID_EXT_PFR1 0x44
+#define CPUID_EXT_DFR0 0x48
+#define CPUID_EXT_AFR0 0x4c
+#define CPUID_EXT_MMFR0 0x50
+#define CPUID_EXT_MMFR1 0x54
+#define CPUID_EXT_MMFR2 0x58
+#define CPUID_EXT_MMFR3 0x5c
+#define CPUID_EXT_ISAR0 0x60
+#define CPUID_EXT_ISAR1 0x64
+#define CPUID_EXT_ISAR2 0x68
+#define CPUID_EXT_ISAR3 0x6c
+#define CPUID_EXT_ISAR4 0x70
+#define CPUID_EXT_ISAR5 0x74
+#define CPUID_EXT_ISAR6 0x7c
+#define CPUID_EXT_PFR2 0x90
+#else
+#define CPUID_EXT_PFR0 "c1, 0"
+#define CPUID_EXT_PFR1 "c1, 1"
+#define CPUID_EXT_DFR0 "c1, 2"
+#define CPUID_EXT_AFR0 "c1, 3"
+#define CPUID_EXT_MMFR0 "c1, 4"
+#define CPUID_EXT_MMFR1 "c1, 5"
+#define CPUID_EXT_MMFR2 "c1, 6"
+#define CPUID_EXT_MMFR3 "c1, 7"
+#define CPUID_EXT_ISAR0 "c2, 0"
+#define CPUID_EXT_ISAR1 "c2, 1"
+#define CPUID_EXT_ISAR2 "c2, 2"
+#define CPUID_EXT_ISAR3 "c2, 3"
+#define CPUID_EXT_ISAR4 "c2, 4"
+#define CPUID_EXT_ISAR5 "c2, 5"
+#define CPUID_EXT_ISAR6 "c2, 7"
+#define CPUID_EXT_PFR2 "c3, 4"
+#endif
+
+#define MPIDR_SMP_BITMASK (0x3 << 30)
+#define MPIDR_SMP_VALUE (0x2 << 30)
+
+#define MPIDR_MT_BITMASK (0x1 << 24)
+
+#define MPIDR_HWID_BITMASK 0xFFFFFF
+
+#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
+
+#define MPIDR_LEVEL_BITS 8
+#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
+
+#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
+ ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
+
+#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_BRCM 0x42
+#define ARM_CPU_IMP_DEC 0x44
+#define ARM_CPU_IMP_INTEL 0x69
+
+/* ARM implemented processors */
+#define ARM_CPU_PART_ARM1136 0x4100b360
+#define ARM_CPU_PART_ARM1156 0x4100b560
+#define ARM_CPU_PART_ARM1176 0x4100b760
+#define ARM_CPU_PART_ARM11MPCORE 0x4100b020
+#define ARM_CPU_PART_CORTEX_A8 0x4100c080
+#define ARM_CPU_PART_CORTEX_A9 0x4100c090
+#define ARM_CPU_PART_CORTEX_A5 0x4100c050
+#define ARM_CPU_PART_CORTEX_A7 0x4100c070
+#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
+#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
+#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
+#define ARM_CPU_PART_CORTEX_A53 0x4100d030
+#define ARM_CPU_PART_CORTEX_A57 0x4100d070
+#define ARM_CPU_PART_CORTEX_A72 0x4100d080
+#define ARM_CPU_PART_CORTEX_A73 0x4100d090
+#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
+#define ARM_CPU_PART_MASK 0xff00fff0
+
+/* Broadcom implemented processors */
+#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
+#define ARM_CPU_PART_BRAHMA_B53 0x42001000
+
+/* DEC implemented cores */
+#define ARM_CPU_PART_SA1100 0x4400a110
+
+/* Intel implemented cores */
+#define ARM_CPU_PART_SA1110 0x6900b110
+#define ARM_CPU_REV_SA1110_A0 0
+#define ARM_CPU_REV_SA1110_B0 4
+#define ARM_CPU_REV_SA1110_B1 5
+#define ARM_CPU_REV_SA1110_B2 6
+#define ARM_CPU_REV_SA1110_B4 8
+
+#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
+#define ARM_CPU_XSCALE_ARCH_V1 0x2000
+#define ARM_CPU_XSCALE_ARCH_V2 0x4000
+#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION 0x510002d0
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+
+extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
+
+#ifdef CONFIG_CPU_CP15
+#define read_cpuid(reg) \
+ ({ \
+ unsigned int __val; \
+ asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
+ : "=r" (__val) \
+ : \
+ : "cc"); \
+ __val; \
+ })
+
+/*
+ * The memory clobber prevents gcc 4.5 from reordering the mrc before
+ * any is_smp() tests, which can cause undefined instruction aborts on
+ * ARM1136 r0 due to the missing extended CP15 registers.
+ */
+#define read_cpuid_ext(ext_reg) \
+ ({ \
+ unsigned int __val; \
+ asm("mrc p15, 0, %0, c0, " ext_reg \
+ : "=r" (__val) \
+ : \
+ : "memory"); \
+ __val; \
+ })
+
+#elif defined(CONFIG_CPU_V7M)
+
+#include <asm/io.h>
+#include <asm/v7m.h>
+
+#define read_cpuid(reg) \
+ ({ \
+ WARN_ON_ONCE(1); \
+ 0; \
+ })
+
+static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset)
+{
+ return readl(BASEADDR_V7M_SCB + offset);
+}
+
+#else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */
+
+/*
+ * read_cpuid and read_cpuid_ext should only ever be called on machines that
+ * have cp15 so warn on other usages.
+ */
+#define read_cpuid(reg) \
+ ({ \
+ WARN_ON_ONCE(1); \
+ 0; \
+ })
+
+#define read_cpuid_ext(reg) read_cpuid(reg)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#ifdef CONFIG_CPU_CP15
+/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant. Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return read_cpuid(CPUID_ID);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+{
+ return read_cpuid(CPUID_CACHETYPE);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return read_cpuid(CPUID_MPUIR);
+}
+
+#elif defined(CONFIG_CPU_V7M)
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return readl(BASEADDR_V7M_SCB + MPU_TYPE);
+}
+
+#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return processor_id;
+}
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
+{
+ return (read_cpuid_id() & 0xFF000000) >> 24;
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_revision(void)
+{
+ return read_cpuid_id() & 0x0000000f;
+}
+
+/*
+ * The CPU part number is meaningless without referring to the CPU
+ * implementer: implementers are free to define their own part numbers
+ * which are permitted to clash with other implementer part numbers.
+ */
+static inline unsigned int __attribute_const__ read_cpuid_part(void)
+{
+ return read_cpuid_id() & ARM_CPU_PART_MASK;
+}
+
+static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
+{
+ return read_cpuid_id() & 0xFFF0;
+}
+
+static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
+{
+ return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
+{
+ return read_cpuid(CPUID_TCM);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
+{
+ return read_cpuid(CPUID_MPIDR);
+}
+
+/* StrongARM-11x0 CPUs */
+#define cpu_is_sa1100() (read_cpuid_part() == ARM_CPU_PART_SA1100)
+#define cpu_is_sa1110() (read_cpuid_part() == ARM_CPU_PART_SA1110)
+
+/*
+ * Intel's XScale3 core supports some v6 features (supersections, L2)
+ * but advertises itself as v5 as it does not support the v6 ISA. For
+ * this reason, we need a way to explicitly test for this type of CPU.
+ */
+#ifndef CONFIG_CPU_XSC3
+#define cpu_is_xsc3() 0
+#else
+static inline int cpu_is_xsc3(void)
+{
+ unsigned int id;
+ id = read_cpuid_id() & 0xffffe000;
+ /* It covers both Intel ID and Marvell ID */
+ if ((id == 0x69056000) || (id == 0x56056000))
+ return 1;
+
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) && \
+ !defined(CONFIG_CPU_MOHAWK)
+#define cpu_is_xscale_family() 0
+#else
+static inline int cpu_is_xscale_family(void)
+{
+ unsigned int id;
+ id = read_cpuid_id() & 0xffffe000;
+
+ switch (id) {
+ case 0x69052000: /* Intel XScale 1 */
+ case 0x69054000: /* Intel XScale 2 */
+ case 0x69056000: /* Intel XScale 3 */
+ case 0x56056000: /* Marvell XScale 3 */
+ case 0x56158000: /* Marvell Mohawk */
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * Marvell's PJ4 and PJ4B cores are based on V7 version,
+ * but require a specical sequence for enabling coprocessors.
+ * For this reason, we need a way to distinguish them.
+ */
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
+static inline int cpu_is_pj4(void)
+{
+ unsigned int id;
+
+ id = read_cpuid_id();
+ if ((id & 0xff0fff00) == 0x560f5800)
+ return 1;
+
+ return 0;
+}
+#else
+#define cpu_is_pj4() 0
+#endif
+
+static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
+ int field)
+{
+ int feature = (features >> field) & 15;
+
+ /* feature registers are signed values */
+ if (feature > 7)
+ feature -= 16;
+
+ return feature;
+}
+
+#define cpuid_feature_extract(reg, field) \
+ cpuid_feature_extract_field(read_cpuid_ext(reg), field)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
new file mode 100644
index 0000000000..f8500e5d6e
--- /dev/null
+++ b/arch/arm/include/asm/cti.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMARM_CTI_H
+#define __ASMARM_CTI_H
+
+#include <asm/io.h>
+#include <asm/hardware/coresight.h>
+
+/* The registers' definition is from section 3.2 of
+ * Embedded Cross Trigger Revision: r0p0
+ */
+#define CTICONTROL 0x000
+#define CTISTATUS 0x004
+#define CTILOCK 0x008
+#define CTIPROTECTION 0x00C
+#define CTIINTACK 0x010
+#define CTIAPPSET 0x014
+#define CTIAPPCLEAR 0x018
+#define CTIAPPPULSE 0x01c
+#define CTIINEN 0x020
+#define CTIOUTEN 0x0A0
+#define CTITRIGINSTATUS 0x130
+#define CTITRIGOUTSTATUS 0x134
+#define CTICHINSTATUS 0x138
+#define CTICHOUTSTATUS 0x13c
+#define CTIPERIPHID0 0xFE0
+#define CTIPERIPHID1 0xFE4
+#define CTIPERIPHID2 0xFE8
+#define CTIPERIPHID3 0xFEC
+#define CTIPCELLID0 0xFF0
+#define CTIPCELLID1 0xFF4
+#define CTIPCELLID2 0xFF8
+#define CTIPCELLID3 0xFFC
+
+/* The below are from section 3.6.4 of
+ * CoreSight v1.0 Architecture Specification
+ */
+#define LOCKACCESS 0xFB0
+#define LOCKSTATUS 0xFB4
+
+/**
+ * struct cti - cross trigger interface struct
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out_for_irq: triger out number which will cause
+ * the @irq happen
+ *
+ * cti struct used to operate cti registers.
+ */
+struct cti {
+ void __iomem *base;
+ int irq;
+ int trig_out_for_irq;
+};
+
+/**
+ * cti_init - initialize the cti instance
+ * @cti: cti instance
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out: triger out number which will cause
+ * the @irq happen
+ *
+ * called by machine code to pass the board dependent
+ * @base, @irq and @trig_out to cti.
+ */
+static inline void cti_init(struct cti *cti,
+ void __iomem *base, int irq, int trig_out)
+{
+ cti->base = base;
+ cti->irq = irq;
+ cti->trig_out_for_irq = trig_out;
+}
+
+/**
+ * cti_map_trigger - use the @chan to map @trig_in to @trig_out
+ * @cti: cti instance
+ * @trig_in: trigger in number
+ * @trig_out: trigger out number
+ * @channel: channel number
+ *
+ * This function maps one trigger in of @trig_in to one trigger
+ * out of @trig_out using the channel @chan.
+ */
+static inline void cti_map_trigger(struct cti *cti,
+ int trig_in, int trig_out, int chan)
+{
+ void __iomem *base = cti->base;
+ unsigned long val;
+
+ val = __raw_readl(base + CTIINEN + trig_in * 4);
+ val |= BIT(chan);
+ __raw_writel(val, base + CTIINEN + trig_in * 4);
+
+ val = __raw_readl(base + CTIOUTEN + trig_out * 4);
+ val |= BIT(chan);
+ __raw_writel(val, base + CTIOUTEN + trig_out * 4);
+}
+
+/**
+ * cti_enable - enable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_enable(struct cti *cti)
+{
+ __raw_writel(0x1, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_disable - disable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_disable(struct cti *cti)
+{
+ __raw_writel(0, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_irq_ack - clear the cti irq
+ * @cti: cti instance
+ *
+ * clear the cti irq
+ */
+static inline void cti_irq_ack(struct cti *cti)
+{
+ void __iomem *base = cti->base;
+ unsigned long val;
+
+ val = __raw_readl(base + CTIINTACK);
+ val |= BIT(cti->trig_out_for_irq);
+ __raw_writel(val, base + CTIINTACK);
+}
+
+/**
+ * cti_unlock - unlock cti module
+ * @cti: cti instance
+ *
+ * unlock the cti module, or else any writes to the cti
+ * module is not allowed.
+ */
+static inline void cti_unlock(struct cti *cti)
+{
+ __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
+}
+
+/**
+ * cti_lock - lock cti module
+ * @cti: cti instance
+ *
+ * lock the cti module, so any writes to the cti
+ * module will be not allowed.
+ */
+static inline void cti_lock(struct cti *cti)
+{
+ __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
+}
+#endif
diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h
new file mode 100644
index 0000000000..1e1178bf17
--- /dev/null
+++ b/arch/arm/include/asm/current.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Keith Packard <keithp@keithp.com>
+ * Copyright (c) 2021 Google, LLC <ardb@kernel.org>
+ */
+
+#ifndef _ASM_ARM_CURRENT_H
+#define _ASM_ARM_CURRENT_H
+
+#ifndef __ASSEMBLY__
+#include <asm/insn.h>
+
+struct task_struct;
+
+extern struct task_struct *__current;
+
+static __always_inline __attribute_const__ struct task_struct *get_current(void)
+{
+ struct task_struct *cur;
+
+#if __has_builtin(__builtin_thread_pointer) && \
+ defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) && \
+ !(defined(CONFIG_THUMB2_KERNEL) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001)
+ /*
+ * Use the __builtin helper when available - this results in better
+ * code, especially when using GCC in combination with the per-task
+ * stack protector, as the compiler will recognize that it needs to
+ * load the TLS register only once in every function.
+ *
+ * Clang < 13.0.1 gets this wrong for Thumb2 builds:
+ * https://github.com/ClangBuiltLinux/linux/issues/1485
+ */
+ cur = __builtin_thread_pointer();
+#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+ asm("0: mrc p15, 0, %0, c13, c0, 3 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __current \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r"(cur));
+#elif __LINUX_ARM_ARCH__>= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ cur = __current;
+#else
+ asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
+#endif
+ return cur;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ARM_CURRENT_H */
diff --git a/arch/arm/include/asm/dcc.h b/arch/arm/include/asm/dcc.h
new file mode 100644
index 0000000000..d24c4be724
--- /dev/null
+++ b/arch/arm/include/asm/dcc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/barrier.h>
+
+static inline u32 __dcc_getstatus(void)
+{
+ u32 __ret;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg"
+ : "=r" (__ret) : : "cc");
+
+ return __ret;
+}
+
+static inline char __dcc_getchar(void)
+{
+ char __c;
+
+ asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
+ : "=r" (__c));
+ isb();
+
+ return __c;
+}
+
+static inline void __dcc_putchar(char c)
+{
+ asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
+ : /* no output register */
+ : "r" (c));
+ isb();
+}
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
new file mode 100644
index 0000000000..1d069e558d
--- /dev/null
+++ b/arch/arm/include/asm/delay.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1995-2004 Russell King
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+#ifndef __ASM_ARM_DELAY_H
+#define __ASM_ARM_DELAY_H
+
+#include <asm/page.h>
+#include <asm/param.h> /* HZ */
+
+/*
+ * Loop (or tick) based delay:
+ *
+ * loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
+ *
+ * where:
+ *
+ * jiffies_per_sec = HZ
+ * us_per_sec = 1000000
+ *
+ * Therefore the constant part is HZ / 1000000 which is a small
+ * fractional number. To make this usable with integer math, we
+ * scale up this constant by 2^31, perform the actual multiplication,
+ * and scale the result back down by 2^31 with a simple shift:
+ *
+ * loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
+ *
+ * where:
+ *
+ * UDELAY_MULT = 2^31 * HZ / 1000000
+ * = (2^31 / 1000000) * HZ
+ * = 2147.483648 * HZ
+ * = 2147 * HZ + 483648 * HZ / 1000000
+ *
+ * 31 is the biggest scale shift value that won't overflow 32 bits for
+ * delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
+ */
+#define MAX_UDELAY_MS 2
+#define UDELAY_MULT UL(2147 * HZ + 483648 * HZ / 1000000)
+#define UDELAY_SHIFT 31
+
+#ifndef __ASSEMBLY__
+
+struct delay_timer {
+ unsigned long (*read_current_timer)(void);
+ unsigned long freq;
+};
+
+extern struct arm_delay_ops {
+ void (*delay)(unsigned long);
+ void (*const_udelay)(unsigned long);
+ void (*udelay)(unsigned long);
+ unsigned long ticks_per_jiffy;
+} arm_delay_ops;
+
+#define __delay(n) arm_delay_ops.delay(n)
+
+/*
+ * This function intentionally does not exist; if you see references to
+ * it, it means that you're calling udelay() with an out of range value.
+ *
+ * With currently imposed limits, this means that we support a max delay
+ * of 2000us. Further limits: HZ<=1000
+ */
+extern void __bad_udelay(void);
+
+/*
+ * division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 2 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+#define __udelay(n) arm_delay_ops.udelay(n)
+#define __const_udelay(n) arm_delay_ops.const_udelay(n)
+
+#define udelay(n) \
+ (__builtin_constant_p(n) ? \
+ ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \
+ __const_udelay((n) * UDELAY_MULT)) : \
+ __udelay(n))
+
+/* Loop-based definitions for assembly code. */
+extern void __loop_delay(unsigned long loops);
+extern void __loop_udelay(unsigned long usecs);
+extern void __loop_const_udelay(unsigned long);
+
+/* Delay-loop timer registration. */
+#define ARCH_HAS_READ_CURRENT_TIMER
+extern void register_current_timer_delay(const struct delay_timer *timer);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* defined(_ARM_DELAY_H) */
+
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
new file mode 100644
index 0000000000..c6beb1708c
--- /dev/null
+++ b/arch/arm/include/asm/device.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Arch specific extensions to struct device
+ */
+#ifndef ASMARM_DEVICE_H
+#define ASMARM_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
+ unsigned int dma_ops_setup:1;
+};
+
+struct omap_device;
+
+struct pdev_archdata {
+#ifdef CONFIG_ARCH_OMAP
+ struct omap_device *od;
+#endif
+};
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
new file mode 100644
index 0000000000..4b69cf8504
--- /dev/null
+++ b/arch/arm/include/asm/div64.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_DIV64
+#define __ASM_ARM_DIV64
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * The semantics of __div64_32() are:
+ *
+ * uint32_t __div64_32(uint64_t *n, uint32_t base)
+ * {
+ * uint32_t remainder = *n % base;
+ * *n = *n / base;
+ * return remainder;
+ * }
+ *
+ * In other words, a 64-bit dividend with a 32-bit divisor producing
+ * a 64-bit result and a 32-bit remainder. To accomplish this optimally
+ * we override the generic version in lib/div64.c to call our __do_div64
+ * assembly implementation with completely non standard calling convention
+ * for arguments and results (beware).
+ */
+static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
+{
+ register unsigned int __base asm("r4") = base;
+ register unsigned long long __n asm("r0") = *n;
+ register unsigned long long __res asm("r2");
+ unsigned int __rem;
+ asm( __asmeq("%0", "r0")
+ __asmeq("%1", "r2")
+ __asmeq("%2", "r4")
+ "bl __do_div64"
+ : "+r" (__n), "=r" (__res)
+ : "r" (__base)
+ : "ip", "lr", "cc");
+ __rem = __n >> 32;
+ *n = __res;
+ return __rem;
+}
+#define __div64_32 __div64_32
+
+#if !defined(CONFIG_AEABI)
+
+/*
+ * In OABI configurations, some uses of the do_div function
+ * cause gcc to run out of registers. To work around that,
+ * we can force the use of the out-of-line version for
+ * configurations that build a OABI kernel.
+ */
+#define do_div(n, base) __div64_32(&(n), base)
+
+#else
+
+static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
+{
+ unsigned long long res;
+ register unsigned int tmp asm("ip") = 0;
+
+ if (!bias) {
+ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t"
+ "mov %Q0, #0"
+ : "=&r" (res)
+ : "r" (m), "r" (n)
+ : "cc");
+ } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ res = m;
+ asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t"
+ "mov %Q0, #0"
+ : "+&r" (res)
+ : "r" (m), "r" (n)
+ : "cc");
+ } else {
+ asm ( "umull %Q0, %R0, %Q2, %Q3\n\t"
+ "cmn %Q0, %Q2\n\t"
+ "adcs %R0, %R0, %R2\n\t"
+ "adc %Q0, %1, #0"
+ : "=&r" (res), "+&r" (tmp)
+ : "r" (m), "r" (n)
+ : "cc");
+ }
+
+ if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ asm ( "umlal %R0, %Q0, %R1, %Q2\n\t"
+ "umlal %R0, %Q0, %Q1, %R2\n\t"
+ "mov %R0, #0\n\t"
+ "umlal %Q0, %R0, %R1, %R2"
+ : "+&r" (res)
+ : "r" (m), "r" (n)
+ : "cc");
+ } else {
+ asm ( "umlal %R0, %Q0, %R2, %Q3\n\t"
+ "umlal %R0, %1, %Q2, %R3\n\t"
+ "mov %R0, #0\n\t"
+ "adds %Q0, %1, %Q0\n\t"
+ "adc %R0, %R0, #0\n\t"
+ "umlal %Q0, %R0, %R2, %R3"
+ : "+&r" (res), "+&r" (tmp)
+ : "r" (m), "r" (n)
+ : "cc");
+ }
+
+ return res;
+}
+#define __arch_xprod_64 __arch_xprod_64
+
+#include <asm-generic/div64.h>
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 0000000000..82ec1ccf1f
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/kref.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ unsigned long **bitmaps; /* array of bitmaps */
+ unsigned int nr_bitmaps; /* nr of elements in array */
+ unsigned int extensions;
+ size_t bitmap_size; /* size of a single bitmap */
+ size_t bits; /* per bitmap */
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
new file mode 100644
index 0000000000..e2a1916013
--- /dev/null
+++ b/arch/arm/include/asm/dma.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_DMA_H
+#define __ASM_ARM_DMA_H
+
+/*
+ * This is the maximum virtual address which can be DMA'd from.
+ */
+#ifndef CONFIG_ZONE_DMA
+#define MAX_DMA_ADDRESS 0xffffffffUL
+#else
+#define MAX_DMA_ADDRESS ({ \
+ extern phys_addr_t arm_dma_zone_size; \
+ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
+ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
+
+extern phys_addr_t arm_dma_limit;
+#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
+#endif
+
+#ifdef CONFIG_ISA_DMA_API
+/*
+ * This is used to support drivers written for the x86 ISA DMA API.
+ * It should not be re-used except for that purpose.
+ */
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+
+#include <mach/isa-dma.h>
+
+/*
+ * The DMA modes reflect the settings for the ISA DMA controller
+ */
+#define DMA_MODE_MASK 0xcc
+
+#define DMA_MODE_READ 0x44
+#define DMA_MODE_WRITE 0x48
+#define DMA_MODE_CASCADE 0xc0
+#define DMA_AUTOINIT 0x10
+
+extern raw_spinlock_t dma_spin_lock;
+
+static inline unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ raw_spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static inline void release_dma_lock(unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ */
+#define clear_dma_ff(chan)
+
+/* Set only the page register bits of the transfer address.
+ *
+ * NOTE: This is an architecture specific function, and should
+ * be hidden from the drivers
+ */
+extern void set_dma_page(unsigned int chan, char pagenr);
+
+/* Request a DMA channel
+ *
+ * Some architectures may need to do allocate an interrupt
+ */
+extern int request_dma(unsigned int chan, const char * device_id);
+
+/* Free a DMA channel
+ *
+ * Some architectures may need to do free an interrupt
+ */
+extern void free_dma(unsigned int chan);
+
+/* Enable DMA for this channel
+ *
+ * On some architectures, this may have other side effects like
+ * enabling an interrupt and setting the DMA registers.
+ */
+extern void enable_dma(unsigned int chan);
+
+/* Disable DMA for this channel
+ *
+ * On some architectures, this may have other side effects like
+ * disabling an interrupt or whatever.
+ */
+extern void disable_dma(unsigned int chan);
+
+/* Test whether the specified channel has an active DMA transfer
+ */
+extern int dma_channel_active(unsigned int chan);
+
+/* Set the DMA scatter gather list for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA address immediately, but defer it to the enable_dma().
+ */
+extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
+
+/* Set the DMA address for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA address immediately, but defer it to the enable_dma().
+ */
+extern void __set_dma_addr(unsigned int chan, void *addr);
+#define set_dma_addr(chan, addr) \
+ __set_dma_addr(chan, (void *)isa_bus_to_virt(addr))
+
+/* Set the DMA byte count for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA count immediately, but defer it to the enable_dma().
+ */
+extern void set_dma_count(unsigned int chan, unsigned long count);
+
+/* Set the transfer direction for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA transfer direction immediately, but defer it to the
+ * enable_dma().
+ */
+extern void set_dma_mode(unsigned int chan, unsigned int mode);
+
+/* Set the transfer speed for this channel
+ */
+extern void set_dma_speed(unsigned int chan, int cycle_ns);
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ */
+extern int get_dma_residue(unsigned int chan);
+
+#ifndef NO_DMA
+#define NO_DMA 255
+#endif
+
+#endif /* CONFIG_ISA_DMA_API */
+
+#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/dmi.h b/arch/arm/include/asm/dmi.h
new file mode 100644
index 0000000000..32c95dad4c
--- /dev/null
+++ b/arch/arm/include/asm/dmi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_DMI_H
+#define __ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB)
+#define dmi_early_unmap(x, l) memunmap(x)
+#define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB)
+#define dmi_unmap(x) memunmap(x)
+#define dmi_alloc(l) kzalloc(l, GFP_KERNEL)
+
+#endif
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
new file mode 100644
index 0000000000..41536feb43
--- /dev/null
+++ b/arch/arm/include/asm/domain.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/domain.h
+ *
+ * Copyright (C) 1999 Russell King.
+ */
+#ifndef __ASM_PROC_DOMAIN_H
+#define __ASM_PROC_DOMAIN_H
+
+#ifndef __ASSEMBLY__
+#include <asm/barrier.h>
+#include <asm/thread_info.h>
+#endif
+
+/*
+ * Domain numbers
+ *
+ * DOMAIN_IO - domain 2 includes all IO only
+ * DOMAIN_USER - domain 1 includes all user memory only
+ * DOMAIN_KERNEL - domain 0 includes all kernel memory only
+ *
+ * The domain numbering depends on whether we support 36 physical
+ * address for I/O or not. Addresses above the 32 bit boundary can
+ * only be mapped using supersections and supersections can only
+ * be set for domain 0. We could just default to DOMAIN_IO as zero,
+ * but there may be systems with supersection support and no 36-bit
+ * addressing. In such cases, we want to map system memory with
+ * supersections to reduce TLB misses and footprint.
+ *
+ * 36-bit addressing and supersections are only available on
+ * CPUs based on ARMv6+ or the Intel XSC3 core.
+ */
+#ifndef CONFIG_IO_36
+#define DOMAIN_KERNEL 0
+#define DOMAIN_USER 1
+#define DOMAIN_IO 2
+#else
+#define DOMAIN_KERNEL 2
+#define DOMAIN_USER 1
+#define DOMAIN_IO 0
+#endif
+#define DOMAIN_VECTORS 3
+
+/*
+ * Domain types
+ */
+#define DOMAIN_NOACCESS 0
+#define DOMAIN_CLIENT 1
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define DOMAIN_MANAGER 3
+#else
+#define DOMAIN_MANAGER 1
+#endif
+
+#define domain_mask(dom) ((3) << (2 * (dom)))
+#define domain_val(dom,type) ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_CP15_MMU
+static __always_inline unsigned int get_domain(void)
+{
+ unsigned int domain;
+
+ asm(
+ "mrc p15, 0, %0, c3, c0 @ get domain"
+ : "=r" (domain)
+ : "m" (current_thread_info()->cpu_domain));
+
+ return domain;
+}
+
+static __always_inline void set_domain(unsigned int val)
+{
+ asm volatile(
+ "mcr p15, 0, %0, c3, c0 @ set domain"
+ : : "r" (val) : "memory");
+ isb();
+}
+#else
+static __always_inline unsigned int get_domain(void)
+{
+ return 0;
+}
+
+static __always_inline void set_domain(unsigned int val)
+{
+}
+#endif
+
+/*
+ * Generate the T (user) versions of the LDR/STR and related
+ * instructions (inline assembly)
+ */
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr "t" #cond
+#else
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr #cond
+#endif
+
+#else /* __ASSEMBLY__ */
+
+/*
+ * Generate the T (user) versions of the LDR/STR and related
+ * instructions
+ */
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define TUSER(instr) instr ## t
+#else
+#define TUSER(instr) instr
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* !__ASM_PROC_DOMAIN_H */
diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h
new file mode 100644
index 0000000000..4befe8d2ae
--- /dev/null
+++ b/arch/arm/include/asm/ecard.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/ecard.h
+ *
+ * definitions for expansion cards
+ *
+ * This is a new system as from Linux 1.2.3
+ *
+ * Changelog:
+ * 11-12-1996 RMK Further minor improvements
+ * 12-09-1997 RMK Added interrupt enable/disable for card level
+ *
+ * Reference: Acorns Risc OS 3 Programmers Reference Manuals.
+ */
+
+#ifndef __ASM_ECARD_H
+#define __ASM_ECARD_H
+
+/*
+ * Currently understood cards (but not necessarily
+ * supported):
+ * Manufacturer Product ID
+ */
+#define MANU_ACORN 0x0000
+#define PROD_ACORN_SCSI 0x0002
+#define PROD_ACORN_ETHER1 0x0003
+#define PROD_ACORN_MFM 0x000b
+
+#define MANU_ANT2 0x0011
+#define PROD_ANT_ETHER3 0x00a4
+
+#define MANU_ATOMWIDE 0x0017
+#define PROD_ATOMWIDE_3PSERIAL 0x0090
+
+#define MANU_IRLAM_INSTRUMENTS 0x001f
+#define MANU_IRLAM_INSTRUMENTS_ETHERN 0x5678
+
+#define MANU_OAK 0x0021
+#define PROD_OAK_SCSI 0x0058
+
+#define MANU_MORLEY 0x002b
+#define PROD_MORLEY_SCSI_UNCACHED 0x0067
+
+#define MANU_CUMANA 0x003a
+#define PROD_CUMANA_SCSI_2 0x003a
+#define PROD_CUMANA_SCSI_1 0x00a0
+
+#define MANU_ICS 0x003c
+#define PROD_ICS_IDE 0x00ae
+
+#define MANU_ICS2 0x003d
+#define PROD_ICS2_IDE 0x00ae
+
+#define MANU_SERPORT 0x003f
+#define PROD_SERPORT_DSPORT 0x00b9
+
+#define MANU_ARXE 0x0041
+#define PROD_ARXE_SCSI 0x00be
+
+#define MANU_I3 0x0046
+#define PROD_I3_ETHERLAN500 0x00d4
+#define PROD_I3_ETHERLAN600 0x00ec
+#define PROD_I3_ETHERLAN600A 0x011e
+
+#define MANU_ANT 0x0053
+#define PROD_ANT_ETHERM 0x00d8
+#define PROD_ANT_ETHERB 0x00e4
+
+#define MANU_ALSYSTEMS 0x005b
+#define PROD_ALSYS_SCSIATAPI 0x0107
+
+#define MANU_MCS 0x0063
+#define PROD_MCS_CONNECT32 0x0125
+
+#define MANU_EESOX 0x0064
+#define PROD_EESOX_SCSI2 0x008c
+
+#define MANU_YELLOWSTONE 0x0096
+#define PROD_YELLOWSTONE_RAPIDE32 0x0120
+
+#ifdef ECARD_C
+#define CONST
+#else
+#define CONST const
+#endif
+
+#define MAX_ECARDS 9
+
+struct ecard_id { /* Card ID structure */
+ unsigned short manufacturer;
+ unsigned short product;
+ void *data;
+};
+
+struct in_ecid { /* Packed card ID information */
+ unsigned short product; /* Product code */
+ unsigned short manufacturer; /* Manufacturer code */
+ unsigned char id:4; /* Simple ID */
+ unsigned char cd:1; /* Chunk dir present */
+ unsigned char is:1; /* Interrupt status pointers */
+ unsigned char w:2; /* Width */
+ unsigned char country; /* Country */
+ unsigned char irqmask; /* IRQ mask */
+ unsigned char fiqmask; /* FIQ mask */
+ unsigned long irqoff; /* IRQ offset */
+ unsigned long fiqoff; /* FIQ offset */
+};
+
+typedef struct expansion_card ecard_t;
+typedef unsigned long *loader_t;
+
+typedef struct expansion_card_ops { /* Card handler routines */
+ void (*irqenable)(ecard_t *ec, int irqnr);
+ void (*irqdisable)(ecard_t *ec, int irqnr);
+ int (*irqpending)(ecard_t *ec);
+ void (*fiqenable)(ecard_t *ec, int fiqnr);
+ void (*fiqdisable)(ecard_t *ec, int fiqnr);
+ int (*fiqpending)(ecard_t *ec);
+} expansioncard_ops_t;
+
+#define ECARD_NUM_RESOURCES (6)
+
+#define ECARD_RES_IOCSLOW (0)
+#define ECARD_RES_IOCMEDIUM (1)
+#define ECARD_RES_IOCFAST (2)
+#define ECARD_RES_IOCSYNC (3)
+#define ECARD_RES_MEMC (4)
+#define ECARD_RES_EASI (5)
+
+#define ecard_resource_start(ec,nr) ((ec)->resource[nr].start)
+#define ecard_resource_end(ec,nr) ((ec)->resource[nr].end)
+#define ecard_resource_len(ec,nr) ((ec)->resource[nr].end - \
+ (ec)->resource[nr].start + 1)
+#define ecard_resource_flags(ec,nr) ((ec)->resource[nr].flags)
+
+/*
+ * This contains all the info needed on an expansion card
+ */
+struct expansion_card {
+ struct expansion_card *next;
+
+ struct device dev;
+ struct resource resource[ECARD_NUM_RESOURCES];
+
+ /* Public data */
+ void __iomem *irqaddr; /* address of IRQ register */
+ void __iomem *fiqaddr; /* address of FIQ register */
+ unsigned char irqmask; /* IRQ mask */
+ unsigned char fiqmask; /* FIQ mask */
+ unsigned char claimed; /* Card claimed? */
+ unsigned char easi; /* EASI card */
+
+ void *irq_data; /* Data for use for IRQ by card */
+ void *fiq_data; /* Data for use for FIQ by card */
+ const expansioncard_ops_t *ops; /* Enable/Disable Ops for card */
+
+ CONST unsigned int slot_no; /* Slot number */
+ CONST unsigned int dma; /* DMA number (for request_dma) */
+ CONST unsigned int irq; /* IRQ number (for request_irq) */
+ CONST unsigned int fiq; /* FIQ number (for request_irq) */
+ CONST struct in_ecid cid; /* Card Identification */
+
+ /* Private internal data */
+ const char *card_desc; /* Card description */
+ CONST loader_t loader; /* loader program */
+ u64 dma_mask;
+};
+
+void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data);
+
+struct in_chunk_dir {
+ unsigned int start_offset;
+ union {
+ unsigned char string[256];
+ unsigned char data[1];
+ } d;
+};
+
+/*
+ * Read a chunk from an expansion card
+ * cd : where to put read data
+ * ec : expansion card info struct
+ * id : id number to find
+ * num: (n+1)'th id to find.
+ */
+extern int ecard_readchunk (struct in_chunk_dir *cd, struct expansion_card *ec, int id, int num);
+
+/*
+ * Request and release ecard resources
+ */
+extern int ecard_request_resources(struct expansion_card *ec);
+extern void ecard_release_resources(struct expansion_card *ec);
+
+void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
+ unsigned long offset, unsigned long maxsize);
+#define ecardm_iounmap(__ec, __addr) devm_iounmap(&(__ec)->dev, __addr)
+
+extern struct bus_type ecard_bus_type;
+
+#define ECARD_DEV(_d) container_of((_d), struct expansion_card, dev)
+
+struct ecard_driver {
+ int (*probe)(struct expansion_card *, const struct ecard_id *id);
+ void (*remove)(struct expansion_card *);
+ void (*shutdown)(struct expansion_card *);
+ const struct ecard_id *id_table;
+ unsigned int id;
+ struct device_driver drv;
+};
+
+#define ECARD_DRV(_d) container_of((_d), struct ecard_driver, drv)
+
+#define ecard_set_drvdata(ec,data) dev_set_drvdata(&(ec)->dev, (data))
+#define ecard_get_drvdata(ec) dev_get_drvdata(&(ec)->dev)
+
+int ecard_register_driver(struct ecard_driver *);
+void ecard_remove_driver(struct ecard_driver *);
+
+#endif
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
new file mode 100644
index 0000000000..16f95dd37b
--- /dev/null
+++ b/arch/arm/include/asm/edac.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2011 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch edac_atomic_scrub() that EDAC use for software
+ * ECC scrubbing. It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+
+static inline void edac_atomic_scrub(void *va, u32 size)
+{
+#if __LINUX_ARM_ARCH__ >= 6
+ unsigned int *virt_addr = va;
+ unsigned int temp, temp2;
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %1, %0, [%2]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+ : "=&r"(temp), "=&r"(temp2)
+ : "r"(virt_addr)
+ : "cc");
+ }
+#endif
+}
+
+#endif
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
new file mode 100644
index 0000000000..78282ced50
--- /dev/null
+++ b/arch/arm/include/asm/efi.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#ifndef __ASM_ARM_EFI_H
+#define __ASM_ARM_EFI_H
+
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/early_ioremap.h>
+#include <asm/fixmap.h>
+#include <asm/highmem.h>
+#include <asm/mach/map.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_EFI
+void efi_init(void);
+void arm_efi_init(void);
+
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
+
+#define arch_efi_call_virt_setup() efi_virtmap_load()
+#define arch_efi_call_virt_teardown() efi_virtmap_unload()
+
+#define ARCH_EFI_IRQ_FLAGS_MASK \
+ (PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
+ PSR_T_BIT | MODE_MASK)
+
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+ check_and_switch_context(mm, NULL);
+}
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
+#else
+#define arm_efi_init()
+#endif /* CONFIG_EFI */
+
+/* arch specific definitions used by the stub code */
+
+/*
+ * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
+ * so we will reserve that amount of memory. We have no easy way to tell what
+ * the actuall size of code + data the uncompressed kernel will use.
+ * If this is insufficient, the decompressor will relocate itself out of the
+ * way before performing the decompression.
+ */
+#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
+
+/*
+ * phys-to-virt patching requires that the physical to virtual offset is a
+ * multiple of 2 MiB. However, using an alignment smaller than TEXT_OFFSET
+ * here throws off the memory allocation logic, so let's use the lowest power
+ * of two greater than 2 MiB and greater than TEXT_OFFSET.
+ */
+#define EFI_PHYS_ALIGN max(UL(SZ_2M), roundup_pow_of_two(TEXT_OFFSET))
+
+/* on ARM, the initrd should be loaded in a lowmem region */
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+ return round_down(image_addr, SZ_4M) + SZ_512M;
+}
+
+struct efi_arm_entry_state {
+ u32 cpsr_before_ebs;
+ u32 sctlr_before_ebs;
+ u32 cpsr_after_ebs;
+ u32 sctlr_after_ebs;
+};
+
+static inline void efi_capsule_flush_cache_range(void *addr, int size)
+{
+ __cpuc_flush_dcache_area(addr, size);
+}
+
+#endif /* _ASM_ARM_EFI_H */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
new file mode 100644
index 0000000000..d68101655b
--- /dev/null
+++ b/arch/arm/include/asm/elf.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMARM_ELF_H
+#define __ASMARM_ELF_H
+
+#include <asm/auxvec.h>
+#include <asm/hwcap.h>
+#include <asm/vdso_datapage.h>
+
+/*
+ * ELF register definitions..
+ */
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+struct task_struct;
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_freg_t[3];
+
+#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_fp elf_fpregset_t;
+
+#define EF_ARM_EABI_MASK 0xff000000
+#define EF_ARM_EABI_UNKNOWN 0x00000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
+#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
+#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
+#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
+#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
+#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
+#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
+#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
+#define EF_ARM_PIC 0x00000020 /* ABI 0 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
+#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
+#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
+#define EF_ARM_HASENTRY 0x00000002 /* All */
+#define EF_ARM_RELEXEC 0x00000001 /* All */
+
+#define R_ARM_NONE 0
+#define R_ARM_PC24 1
+#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
+#define R_ARM_CALL 28
+#define R_ARM_JUMP24 29
+#define R_ARM_TARGET1 38
+#define R_ARM_V4BX 40
+#define R_ARM_PREL31 42
+#define R_ARM_MOVW_ABS_NC 43
+#define R_ARM_MOVT_ABS 44
+#define R_ARM_MOVW_PREL_NC 45
+#define R_ARM_MOVT_PREL 46
+#define R_ARM_ALU_PC_G0_NC 57
+#define R_ARM_ALU_PC_G1_NC 59
+#define R_ARM_LDR_PC_G2 63
+
+#define R_ARM_THM_CALL 10
+#define R_ARM_THM_JUMP24 30
+#define R_ARM_THM_MOVW_ABS_NC 47
+#define R_ARM_THM_MOVT_ABS 48
+#define R_ARM_THM_MOVW_PREL_NC 49
+#define R_ARM_THM_MOVT_PREL 50
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#ifdef __ARMEB__
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+#define ELF_ARCH EM_ARM
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ *
+ * For now we just provide a fairly general string that describes the
+ * processor family. This could be made more specific later if someone
+ * implemented optimisations that require it. 26-bit CPUs give you
+ * "v1l" for ARM2 (no SWP) and "v2l" for anything else (ARM1 isn't
+ * supported). 32-bit CPUs give you "v3[lb]" for anything based on an
+ * ARM6 or ARM7 core and "armv4[lb]" for anything based on a StrongARM-1
+ * core.
+ */
+#define ELF_PLATFORM_SIZE 8
+#define ELF_PLATFORM (elf_platform)
+
+extern char elf_platform[];
+
+struct elf32_hdr;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC platform */
+#define elf_check_fdpic(x) ((x)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
+#define elf_check_const_displacement(x) ((x)->e_flags & EF_ARM_PIC)
+#define ELF_FDPIC_CORE_EFLAGS 0
+
+#define vmcore_elf64_check_arch(x) (0)
+
+extern int arm_elf_read_implies_exec(int);
+#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(stk)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
+#define ELF_ET_DYN_BASE 0x400000UL
+
+/* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ have no such handler. */
+#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
+
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->ARM_r7 = _exec_map_addr; \
+ (_r)->ARM_r8 = _interp_map_addr; \
+ (_r)->ARM_r9 = dynamic_addr; \
+ } while(0)
+
+extern void elf_set_personality(const struct elf32_hdr *);
+#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+#ifdef CONFIG_MMU
+#ifdef CONFIG_VDSO
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)current->mm->context.vdso); \
+} while (0)
+#endif
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
new file mode 100644
index 0000000000..3c82975d46
--- /dev/null
+++ b/arch/arm/include/asm/exception.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Annotations for marking C functions as exception handlers.
+ *
+ * These should only be used for C functions that are called from the low
+ * level exception entry code and not any intervening C code.
+ */
+#ifndef __ASM_ARM_EXCEPTION_H
+#define __ASM_ARM_EXCEPTION_H
+
+#include <linux/interrupt.h>
+
+#define __exception_irq_entry __irq_entry
+
+#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/fb.h b/arch/arm/include/asm/fb.h
new file mode 100644
index 0000000000..ce20a43c30
--- /dev/null
+++ b/arch/arm/include/asm/fb.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <asm-generic/fb.h>
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
new file mode 100644
index 0000000000..6bdfb4a473
--- /dev/null
+++ b/arch/arm/include/asm/fiq.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/fiq.h
+ *
+ * Support for FIQ on ARM architectures.
+ * Written by Philip Blundell <philb@gnu.org>, 1998
+ * Re-written by Russell King
+ *
+ * NOTE: The FIQ mode registers are not magically preserved across
+ * suspend/resume.
+ *
+ * Drivers which require these registers to be preserved across power
+ * management operations must implement appropriate suspend/resume handlers to
+ * save and restore them.
+ */
+
+#ifndef __ASM_FIQ_H
+#define __ASM_FIQ_H
+
+#include <asm/ptrace.h>
+
+struct fiq_handler {
+ struct fiq_handler *next;
+ /* Name
+ */
+ const char *name;
+ /* Called to ask driver to relinquish/
+ * reacquire FIQ
+ * return zero to accept, or -<errno>
+ */
+ int (*fiq_op)(void *, int relinquish);
+ /* data for the relinquish/reacquire functions
+ */
+ void *dev_id;
+};
+
+extern int claim_fiq(struct fiq_handler *f);
+extern void release_fiq(struct fiq_handler *f);
+extern void set_fiq_handler(void *start, unsigned int length);
+extern void enable_fiq(int fiq);
+extern void disable_fiq(int fiq);
+
+/* helpers defined in fiqasm.S: */
+extern void __set_fiq_regs(unsigned long const *regs);
+extern void __get_fiq_regs(unsigned long *regs);
+
+static inline void set_fiq_regs(struct pt_regs const *regs)
+{
+ __set_fiq_regs(&regs->ARM_r8);
+}
+
+static inline void get_fiq_regs(struct pt_regs *regs)
+{
+ __get_fiq_regs(&regs->ARM_r8);
+}
+
+#endif
diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h
new file mode 100644
index 0000000000..23fe0bd405
--- /dev/null
+++ b/arch/arm/include/asm/firmware.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Samsung Electronics.
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Tomasz Figa <t.figa@samsung.com>
+ */
+
+#ifndef __ASM_ARM_FIRMWARE_H
+#define __ASM_ARM_FIRMWARE_H
+
+#include <linux/bug.h>
+
+/*
+ * struct firmware_ops
+ *
+ * A structure to specify available firmware operations.
+ *
+ * A filled up structure can be registered with register_firmware_ops().
+ */
+struct firmware_ops {
+ /*
+ * Inform the firmware we intend to enter CPU idle mode
+ */
+ int (*prepare_idle)(unsigned long mode);
+ /*
+ * Enters CPU idle mode
+ */
+ int (*do_idle)(unsigned long mode);
+ /*
+ * Sets boot address of specified physical CPU
+ */
+ int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr);
+ /*
+ * Gets boot address of specified physical CPU
+ */
+ int (*get_cpu_boot_addr)(int cpu, unsigned long *boot_addr);
+ /*
+ * Boots specified physical CPU
+ */
+ int (*cpu_boot)(int cpu);
+ /*
+ * Initializes L2 cache
+ */
+ int (*l2x0_init)(void);
+ /*
+ * Enter system-wide suspend.
+ */
+ int (*suspend)(void);
+ /*
+ * Restore state of privileged hardware after system-wide suspend.
+ */
+ int (*resume)(void);
+};
+
+/* Global pointer for current firmware_ops structure, can't be NULL. */
+extern const struct firmware_ops *firmware_ops;
+
+/*
+ * call_firmware_op(op, ...)
+ *
+ * Checks if firmware operation is present and calls it,
+ * otherwise returns -ENOSYS
+ */
+#define call_firmware_op(op, ...) \
+ ((firmware_ops->op) ? firmware_ops->op(__VA_ARGS__) : (-ENOSYS))
+
+/*
+ * register_firmware_ops(ops)
+ *
+ * A function to register platform firmware_ops struct.
+ */
+static inline void register_firmware_ops(const struct firmware_ops *ops)
+{
+ BUG_ON(!ops);
+
+ firmware_ops = ops;
+}
+
+#endif
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
new file mode 100644
index 0000000000..707068f852
--- /dev/null
+++ b/arch/arm/include/asm/fixmap.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#define FIXADDR_START 0xffc80000UL
+#define FIXADDR_END 0xfff00000UL
+#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
+
+#include <linux/pgtable.h>
+#include <asm/kmap_size.h>
+
+enum fixed_addresses {
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+
+ /* Support writing RO kernel text via kprobes, jump labels, etc. */
+ FIX_TEXT_POKE0,
+ FIX_TEXT_POKE1,
+
+ __end_of_fixmap_region,
+
+ /*
+ * Share the kmap() region with early_ioremap(): this is guaranteed
+ * not to clash since early_ioremap() is only available before
+ * paging_init(), and kmap() only after.
+ */
+#define NR_FIX_BTMAPS 32
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ __end_of_early_ioremap_region
+};
+
+static const enum fixed_addresses __end_of_fixed_addresses =
+ __end_of_fixmap_region > __end_of_early_ioremap_region ?
+ __end_of_fixmap_region : __end_of_early_ioremap_region;
+
+#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL (pgprot_kernel | L_PTE_XN)
+#define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
+
+#define __early_set_fixmap __set_fixmap
+
+#ifdef CONFIG_MMU
+
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
+
+#include <asm-generic/fixmap.h>
+
+#else
+
+static inline void early_fixmap_init(void) { }
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
new file mode 100644
index 0000000000..e1cb04ed50
--- /dev/null
+++ b/arch/arm/include/asm/floppy.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/floppy.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ *
+ * Note that we don't touch FLOPPY_DMA nor FLOPPY_IRQ here
+ */
+#ifndef __ASM_ARM_FLOPPY_H
+#define __ASM_ARM_FLOPPY_H
+
+#define fd_outb(val, base, reg) \
+ do { \
+ int new_val = (val); \
+ if ((reg) == FD_DOR) { \
+ if (new_val & 0xf0) \
+ new_val = (new_val & 0x0c) | \
+ floppy_selects[new_val & 3]; \
+ else \
+ new_val &= 0x0c; \
+ } \
+ outb(new_val, (base) + (reg)); \
+ } while(0)
+
+#define fd_inb(base, reg) inb((base) + (reg))
+#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
+ 0,"floppy",NULL)
+#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
+#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
+#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
+
+static inline int fd_dma_setup(void *data, unsigned int length,
+ unsigned int mode, unsigned long addr)
+{
+ set_dma_mode(DMA_FLOPPY, mode);
+ __set_dma_addr(DMA_FLOPPY, data);
+ set_dma_count(DMA_FLOPPY, length);
+ virtual_dma_port = addr;
+ enable_dma(DMA_FLOPPY);
+ return 0;
+}
+#define fd_dma_setup fd_dma_setup
+
+#define fd_request_dma() request_dma(DMA_FLOPPY,"floppy")
+#define fd_free_dma() free_dma(DMA_FLOPPY)
+#define fd_disable_dma() disable_dma(DMA_FLOPPY)
+
+/* need to clean up dma.h */
+#define DMA_FLOPPYDISK DMA_FLOPPY
+
+/* Floppy_selects is the list of DOR's to select drive fd
+ *
+ * On initialisation, the floppy list is scanned, and the drives allocated
+ * in the order that they are found. This is done by seeking the drive
+ * to a non-zero track, and then restoring it to track 0. If an error occurs,
+ * then there is no floppy drive present. [to be put back in again]
+ */
+static unsigned char floppy_selects[4] = { 0x10, 0x21, 0x23, 0x33 };
+
+#define FDC1 (0x3f0)
+
+#define FLOPPY0_TYPE 4
+#define FLOPPY1_TYPE 4
+
+#define N_FDC 1
+#define N_DRIVE 4
+
+#define CROSS_64KB(a,s) (0)
+
+/*
+ * This allows people to reverse the order of
+ * fd0 and fd1, in case their hardware is
+ * strangely connected (as some RiscPCs
+ * and A5000s seem to be).
+ */
+static void driveswap(int *ints, int dummy, int dummy2)
+{
+ swap(floppy_selects[0], floppy_selects[1]);
+}
+
+#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 }
+
+#endif
diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
new file mode 100644
index 0000000000..78217b0e76
--- /dev/null
+++ b/arch/arm/include/asm/fncpy.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/fncpy.h - helper macros for function body copying
+ *
+ * Copyright (C) 2011 Linaro Limited
+ */
+
+/*
+ * These macros are intended for use when there is a need to copy a low-level
+ * function body into special memory.
+ *
+ * For example, when reconfiguring the SDRAM controller, the code doing the
+ * reconfiguration may need to run from SRAM.
+ *
+ * NOTE: that the copied function body must be entirely self-contained and
+ * position-independent in order for this to work properly.
+ *
+ * NOTE: in order for embedded literals and data to get referenced correctly,
+ * the alignment of functions must be preserved when copying. To ensure this,
+ * the source and destination addresses for fncpy() must be aligned to a
+ * multiple of 8 bytes: you will be get a BUG() if this condition is not met.
+ * You will typically need a ".align 3" directive in the assembler where the
+ * function to be copied is defined, and ensure that your allocator for the
+ * destination buffer returns 8-byte-aligned pointers.
+ *
+ * Typical usage example:
+ *
+ * extern int f(args);
+ * extern uint32_t size_of_f;
+ * int (*copied_f)(args);
+ * void *sram_buffer;
+ *
+ * copied_f = fncpy(sram_buffer, &f, size_of_f);
+ *
+ * ... later, call the function: ...
+ *
+ * copied_f(args);
+ *
+ * The size of the function to be copied can't be determined from C:
+ * this must be determined by other means, such as adding assmbler directives
+ * in the file where f is defined.
+ */
+
+#ifndef __ASM_FNCPY_H
+#define __ASM_FNCPY_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/bug.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Minimum alignment requirement for the source and destination addresses
+ * for function copying.
+ */
+#define FNCPY_ALIGN 8
+
+#define fncpy(dest_buf, funcp, size) ({ \
+ uintptr_t __funcp_address; \
+ typeof(funcp) __result; \
+ \
+ asm("" : "=r" (__funcp_address) : "0" (funcp)); \
+ \
+ /* \
+ * Ensure alignment of source and destination addresses, \
+ * disregarding the function's Thumb bit: \
+ */ \
+ BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
+ (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
+ \
+ memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
+ flush_icache_range((unsigned long)(dest_buf), \
+ (unsigned long)(dest_buf) + (size)); \
+ \
+ asm("" : "=r" (__result) \
+ : "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
+ \
+ __result; \
+})
+
+#endif /* !__ASM_FNCPY_H */
diff --git a/arch/arm/include/asm/fpstate.h b/arch/arm/include/asm/fpstate.h
new file mode 100644
index 0000000000..e29d9c7a52
--- /dev/null
+++ b/arch/arm/include/asm/fpstate.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/fpstate.h
+ *
+ * Copyright (C) 1995 Russell King
+ */
+
+#ifndef __ASM_ARM_FPSTATE_H
+#define __ASM_ARM_FPSTATE_H
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * VFP storage area has:
+ * - FPEXC, FPSCR, FPINST and FPINST2.
+ * - 16 or 32 double precision data registers
+ * - an implementation-dependent word of state for FLDMX/FSTMX (pre-ARMv6)
+ *
+ * FPEXC will always be non-zero once the VFP has been used in this process.
+ */
+
+struct vfp_hard_struct {
+#ifdef CONFIG_VFPv3
+ __u64 fpregs[32];
+#else
+ __u64 fpregs[16];
+#endif
+#if __LINUX_ARM_ARCH__ < 6
+ __u32 fpmx_state;
+#endif
+ __u32 fpexc;
+ __u32 fpscr;
+ /*
+ * VFP implementation specific state
+ */
+ __u32 fpinst;
+ __u32 fpinst2;
+
+#ifdef CONFIG_SMP
+ __u32 cpu;
+#endif
+};
+
+union vfp_state {
+ struct vfp_hard_struct hard;
+};
+
+#define FP_HARD_SIZE 35
+
+struct fp_hard_struct {
+ unsigned int save[FP_HARD_SIZE]; /* as yet undefined */
+};
+
+#define FP_SOFT_SIZE 35
+
+struct fp_soft_struct {
+ unsigned int save[FP_SOFT_SIZE]; /* undefined information */
+};
+
+#define IWMMXT_SIZE 0x98
+
+struct iwmmxt_struct {
+ unsigned int save[IWMMXT_SIZE / sizeof(unsigned int)];
+};
+
+union fp_state {
+ struct fp_hard_struct hard;
+ struct fp_soft_struct soft;
+#ifdef CONFIG_IWMMXT
+ struct iwmmxt_struct iwmmxt;
+#endif
+};
+
+#define FP_SIZE (sizeof(union fp_state) / sizeof(int))
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
new file mode 100644
index 0000000000..5be3ddc96a
--- /dev/null
+++ b/arch/arm/include/asm/ftrace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_FTRACE
+#define _ASM_ARM_FTRACE
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
+#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void __gnu_mcount_nc(void);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+struct dyn_arch_ftrace {
+#ifdef CONFIG_ARM_MODULE_PLTS
+ struct module *mod;
+#endif
+};
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /* With Thumb-2, the recorded addresses have the lsb set */
+ return addr & ~1;
+}
+#endif
+
+#endif
+
+#endif
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
+/*
+ * return_address uses walk_stackframe to do it's work. If both
+ * CONFIG_FRAME_POINTER=y and CONFIG_ARM_UNWIND=y walk_stackframe uses unwind
+ * information. For this to work in the function tracer many functions would
+ * have to be marked with __notrace. So for now just depend on
+ * !CONFIG_ARM_UNWIND.
+ */
+
+void *return_address(unsigned int);
+
+#else
+
+static inline void *return_address(unsigned int level)
+{
+ return NULL;
+}
+
+#endif
+
+#define ftrace_return_address(n) return_address(n)
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ if (!strcmp(sym, "sys_mmap2"))
+ sym = "sys_mmap_pgoff";
+ else if (!strcmp(sym, "sys_statfs64_wrapper"))
+ sym = "sys_statfs64";
+ else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+ sym = "sys_fstatfs64";
+ else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+ sym = "sys_fadvise64_64";
+
+ /* Ignore case since sym may start with "SyS" instead of "sys" */
+ return !strcasecmp(sym, name);
+}
+
+void prepare_ftrace_return(unsigned long *parent, unsigned long self,
+ unsigned long frame_pointer,
+ unsigned long stack_pointer);
+
+#endif /* ifndef __ASSEMBLY__ */
+
+#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
new file mode 100644
index 0000000000..a9151884bc
--- /dev/null
+++ b/arch/arm/include/asm/futex.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_FUTEX_H
+#define _ASM_ARM_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_ex_table(err_reg) \
+ "3:\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4f, 2b, 4f\n" \
+ " .popsection\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: mov %0, " err_reg "\n" \
+ " b 3b\n" \
+ " .popsection"
+
+#ifdef CONFIG_SMP
+
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags; \
+ smp_mb(); \
+ prefetchw(uaddr); \
+ __ua_flags = uaccess_save_and_enable(); \
+ __asm__ __volatile__( \
+ "1: ldrex %1, [%3]\n" \
+ " " insn "\n" \
+ "2: strex %2, %0, [%3]\n" \
+ " teq %2, #0\n" \
+ " bne 1b\n" \
+ " mov %0, #0\n" \
+ __futex_atomic_ex_table("%5") \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
+ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ unsigned int __ua_flags;
+ int ret;
+ u32 val;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ smp_mb();
+ /* Prefetching cannot fault */
+ prefetchw(uaddr);
+ __ua_flags = uaccess_save_and_enable();
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldrex %1, [%4]\n"
+ " teq %1, %2\n"
+ " ite eq @ explicit IT needed for the 2b label\n"
+ "2: strexeq %0, %3, [%4]\n"
+ " movne %0, #0\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ __futex_atomic_ex_table("%5")
+ : "=&r" (ret), "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+ uaccess_restore(__ua_flags);
+ smp_mb();
+
+ *uval = val;
+ return ret;
+}
+
+#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
+
+#include <linux/preempt.h>
+#include <asm/domain.h>
+
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
+ __asm__ __volatile__( \
+ "1: " TUSER(ldr) " %1, [%3]\n" \
+ " " insn "\n" \
+ "2: " TUSER(str) " %0, [%3]\n" \
+ " mov %0, #0\n" \
+ __futex_atomic_ex_table("%5") \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
+ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ unsigned int __ua_flags;
+ int ret = 0;
+ u32 val;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ preempt_disable();
+ __ua_flags = uaccess_save_and_enable();
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ " .syntax unified\n"
+ "1: " TUSER(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+ " it eq @ explicit IT needed for the 2b label\n"
+ "2: " TUSERCOND(str, eq) " %3, [%4]\n"
+ __futex_atomic_ex_table("%5")
+ : "+r" (ret), "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+ uaccess_restore(__ua_flags);
+
+ *uval = val;
+ preempt_enable();
+
+ return ret;
+}
+
+#endif /* !SMP */
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret, tmp;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+#ifndef CONFIG_SMP
+ preempt_disable();
+#endif
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+#ifndef CONFIG_SMP
+ preempt_enable();
+#endif
+
+ /*
+ * Store unconditionally. If ret != 0 the extra store is the least
+ * of the worries but GCC cannot figure out that __futex_atomic_op()
+ * is either setting ret to -EFAULT or storing the old value in
+ * oldval which results in a uninitialized warning at the call site.
+ */
+ *oval = oldval;
+
+ return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
new file mode 100644
index 0000000000..724f8dac1e
--- /dev/null
+++ b/arch/arm/include/asm/glue-cache.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/glue-cache.h
+ *
+ * Copyright (C) 1999-2002 Russell King
+ */
+#ifndef ASM_GLUE_CACHE_H
+#define ASM_GLUE_CACHE_H
+
+#include <asm/glue.h>
+
+/*
+ * Cache Model
+ * ===========
+ */
+#undef _CACHE
+#undef MULTI_CACHE
+
+#if defined(CONFIG_CPU_CACHE_V4)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v4
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+ defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
+ defined(CONFIG_CPU_ARM1026)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_FA526)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE fa
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM926T)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE arm926
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM940T)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE arm940
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM946E)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE arm946
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4WB)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v4wb
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSCALE)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE xscale
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSC3)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE xsc3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_MOHAWK)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE mohawk
+# endif
+#endif
+
+#if defined(CONFIG_CPU_FEROCEON)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v6
+# endif
+#endif
+
+#if defined(CONFIG_CPU_V7)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v7
+# endif
+#endif
+
+#if defined(CONFIG_CACHE_B15_RAC)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_V7M)
+# define MULTI_CACHE 1
+#endif
+
+#if !defined(_CACHE) && !defined(MULTI_CACHE)
+#error Unknown cache maintenance model
+#endif
+
+#ifndef __ASSEMBLER__
+static inline void nop_flush_icache_all(void) { }
+static inline void nop_flush_kern_cache_all(void) { }
+static inline void nop_flush_kern_cache_louis(void) { }
+static inline void nop_flush_user_cache_all(void) { }
+static inline void nop_flush_user_cache_range(unsigned long a,
+ unsigned long b, unsigned int c) { }
+
+static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
+static inline int nop_coherent_user_range(unsigned long a,
+ unsigned long b) { return 0; }
+static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
+
+static inline void nop_dma_flush_range(const void *a, const void *b) { }
+
+static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
+static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
+#endif
+
+#ifndef MULTI_CACHE
+#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
+#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
+#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
+#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
+#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
+#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
+#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
+
+#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
new file mode 100644
index 0000000000..209e46c02d
--- /dev/null
+++ b/arch/arm/include/asm/glue-df.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/glue-df.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ */
+#ifndef ASM_GLUE_DF_H
+#define ASM_GLUE_DF_H
+
+#include <asm/glue.h>
+
+/*
+ * Data Abort Model
+ * ================
+ *
+ * We have the following to choose from:
+ * arm7 - ARM7 style
+ * v4_early - ARMv4 without Thumb early abort handler
+ * v4t_late - ARMv4 with Thumb late abort handler
+ * v4t_early - ARMv4 with Thumb early abort handler
+ * v5t_early - ARMv5 with Thumb early abort handler
+ * v5tj_early - ARMv5 with Thumb and Java early abort handler
+ * xscale - ARMv5 with Thumb with Xscale extensions
+ * v6_early - ARMv6 generic early abort handler
+ * v7_early - ARMv7 generic early abort handler
+ */
+#undef CPU_DABORT_HANDLER
+#undef MULTI_DABORT
+
+#ifdef CONFIG_CPU_ABRT_EV4
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v4_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_LV4T
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v4t_late_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV4T
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v4t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5T
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v5t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5TJ
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v5tj_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV6
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v6_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV7
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER v7_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_NOMMU
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER nommu_early_abort
+# endif
+#endif
+
+#ifndef CPU_DABORT_HANDLER
+#error Unknown data abort handler type
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h
new file mode 100644
index 0000000000..a033929fad
--- /dev/null
+++ b/arch/arm/include/asm/glue-pf.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/glue-pf.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ */
+#ifndef ASM_GLUE_PF_H
+#define ASM_GLUE_PF_H
+
+#include <asm/glue.h>
+
+/*
+ * Prefetch Abort Model
+ * ================
+ *
+ * We have the following to choose from:
+ * legacy - no IFSR, no IFAR
+ * v6 - ARMv6: IFSR, no IFAR
+ * v7 - ARMv7: IFSR and IFAR
+ */
+
+#undef CPU_PABORT_HANDLER
+#undef MULTI_PABORT
+
+#ifdef CONFIG_CPU_PABRT_LEGACY
+# ifdef CPU_PABORT_HANDLER
+# define MULTI_PABORT 1
+# else
+# define CPU_PABORT_HANDLER legacy_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V6
+# ifdef CPU_PABORT_HANDLER
+# define MULTI_PABORT 1
+# else
+# define CPU_PABORT_HANDLER v6_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V7
+# ifdef CPU_PABORT_HANDLER
+# define MULTI_PABORT 1
+# else
+# define CPU_PABORT_HANDLER v7_pabort
+# endif
+#endif
+
+#ifndef CPU_PABORT_HANDLER
+#error Unknown prefetch abort handler type
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
new file mode 100644
index 0000000000..52df74aa3c
--- /dev/null
+++ b/arch/arm/include/asm/glue-proc.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/glue-proc.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ */
+#ifndef ASM_GLUE_PROC_H
+#define ASM_GLUE_PROC_H
+
+#include <asm/glue.h>
+
+/*
+ * Work out if we need multiple CPU support
+ */
+#undef MULTI_CPU
+#undef CPU_NAME
+
+/*
+ * CPU_NAME - the prefix for CPU related functions
+ */
+
+#ifdef CONFIG_CPU_ARM7TDMI
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm7tdmi
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM720T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm720
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM740T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm740
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM9TDMI
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm9tdmi
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM920T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm920
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM922T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm922
+# endif
+#endif
+
+#ifdef CONFIG_CPU_FA526
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_fa526
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm925
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm926
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM940T
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm940
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm946
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA110
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_sa110
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_sa1100
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1020
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1020e
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1022
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_arm1026
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_xscale
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_xsc3
+# endif
+#endif
+
+#ifdef CONFIG_CPU_MOHAWK
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_mohawk
+# endif
+#endif
+
+#ifdef CONFIG_CPU_FEROCEON
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_feroceon
+# endif
+#endif
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v6
+# endif
+#endif
+
+#ifdef CONFIG_CPU_V7M
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v7m
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PJ4B
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_pj4b
+# endif
+#endif
+
+#ifdef CONFIG_CPU_V7
+/*
+ * Cortex-A9 needs a different suspend/resume function, so we need
+ * multiple CPU support for ARMv7 anyway.
+ */
+# undef MULTI_CPU
+# define MULTI_CPU
+#endif
+
+#ifndef MULTI_CPU
+#define cpu_proc_init __glue(CPU_NAME,_proc_init)
+#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
+#define cpu_reset __glue(CPU_NAME,_reset)
+#define cpu_do_idle __glue(CPU_NAME,_do_idle)
+#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
+#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
+#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
+#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
+#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
+#define cpu_do_resume __glue(CPU_NAME,_do_resume)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
new file mode 100644
index 0000000000..377fd4cfab
--- /dev/null
+++ b/arch/arm/include/asm/glue.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/glue.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ *
+ * This file provides the glue to stick the processor-specific bits
+ * into the kernel in an efficient manner. The idea is to use branches
+ * when we're only targeting one class of TLB, or indirect calls
+ * when we're targeting multiple classes of TLBs.
+ */
+#ifdef __KERNEL__
+
+#ifdef __STDC__
+#define ____glue(name,fn) name##fn
+#else
+#define ____glue(name,fn) name/**/fn
+#endif
+#define __glue(name,fn) ____glue(name,fn)
+
+#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
new file mode 100644
index 0000000000..706efafbf9
--- /dev/null
+++ b/arch/arm/include/asm/hardirq.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <asm/irq.h>
+
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/hardware/cache-aurora-l2.h b/arch/arm/include/asm/hardware/cache-aurora-l2.h
new file mode 100644
index 0000000000..9694808ee9
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-aurora-l2.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AURORA shared L2 cache controller support
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#ifndef __ASM_ARM_HARDWARE_AURORA_L2_H
+#define __ASM_ARM_HARDWARE_AURORA_L2_H
+
+#define AURORA_SYNC_REG 0x700
+#define AURORA_RANGE_BASE_ADDR_REG 0x720
+#define AURORA_FLUSH_PHY_ADDR_REG 0x7f0
+#define AURORA_INVAL_RANGE_REG 0x774
+#define AURORA_CLEAN_RANGE_REG 0x7b4
+#define AURORA_FLUSH_RANGE_REG 0x7f4
+
+#define AURORA_ACR_REPLACEMENT_OFFSET 27
+#define AURORA_ACR_REPLACEMENT_MASK \
+ (0x3 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \
+ (0 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_LFSR \
+ (1 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
+ (3 << AURORA_ACR_REPLACEMENT_OFFSET)
+
+#define AURORA_ACR_PARITY_EN (1 << 21)
+#define AURORA_ACR_ECC_EN (1 << 20)
+
+#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
+#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
+ (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_POLICY_DIS \
+ (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_BACK_POLICY \
+ (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
+ (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+
+#define AURORA_ERR_CNT_REG 0x600
+#define AURORA_ERR_ATTR_CAP_REG 0x608
+#define AURORA_ERR_ADDR_CAP_REG 0x60c
+#define AURORA_ERR_WAY_CAP_REG 0x610
+#define AURORA_ERR_INJECT_CTL_REG 0x614
+#define AURORA_ERR_INJECT_MASK_REG 0x618
+
+#define AURORA_ERR_CNT_CLR_OFFSET 31
+#define AURORA_ERR_CNT_CLR \
+ (0x1 << AURORA_ERR_CNT_CLR_OFFSET)
+#define AURORA_ERR_CNT_UE_OFFSET 16
+#define AURORA_ERR_CNT_UE_MASK \
+ (0x7fff << AURORA_ERR_CNT_UE_OFFSET)
+#define AURORA_ERR_CNT_CE_OFFSET 0
+#define AURORA_ERR_CNT_CE_MASK \
+ (0xffff << AURORA_ERR_CNT_CE_OFFSET)
+
+#define AURORA_ERR_ATTR_SRC_OFF 16
+#define AURORA_ERR_ATTR_SRC_MSK \
+ (0x7 << AURORA_ERR_ATTR_SRC_OFF)
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_ATTR_TXN_MSK \
+ (0xf << AURORA_ERR_ATTR_TXN_OFF)
+#define AURORA_ERR_ATTR_ERR_OFF 8
+#define AURORA_ERR_ATTR_ERR_MSK \
+ (0x3 << AURORA_ERR_ATTR_ERR_OFF)
+#define AURORA_ERR_ATTR_CAP_VALID_OFF 0
+#define AURORA_ERR_ATTR_CAP_VALID \
+ (0x1 << AURORA_ERR_ATTR_CAP_VALID_OFF)
+
+#define AURORA_ERR_ADDR_CAP_ADDR_MASK 0xffffffe0
+
+#define AURORA_ERR_WAY_IDX_OFF 8
+#define AURORA_ERR_WAY_IDX_MSK \
+ (0xfff << AURORA_ERR_WAY_IDX_OFF)
+#define AURORA_ERR_WAY_CAP_WAY_OFFSET 1
+#define AURORA_ERR_WAY_CAP_WAY_MASK \
+ (0xf << AURORA_ERR_WAY_CAP_WAY_OFFSET)
+
+#define AURORA_ERR_INJECT_CTL_ADDR_MASK 0xfffffff0
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_INJECT_CTL_EN_MASK 0x3
+#define AURORA_ERR_INJECT_CTL_EN_PARITY 0x2
+#define AURORA_ERR_INJECT_CTL_EN_ECC 0x1
+
+#define AURORA_MAX_RANGE_SIZE 1024
+
+#define AURORA_WAY_SIZE_SHIFT 2
+
+#define AURORA_CTRL_FW 0x100
+
+/* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make
+ * the distinction between a number coming from hardware and a number
+ * coming from the device tree */
+#define AURORA_CACHE_ID 0x100
+
+#endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */
diff --git a/arch/arm/include/asm/hardware/cache-b15-rac.h b/arch/arm/include/asm/hardware/cache-b15-rac.h
new file mode 100644
index 0000000000..3d43ec06fd
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-b15-rac.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+#define __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+
+#ifndef __ASSEMBLY__
+
+void b15_flush_kern_cache_all(void);
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/cache-feroceon-l2.h b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
new file mode 100644
index 0000000000..eb2e7b7f70
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/cache-feroceon-l2.h
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ */
+
+extern void __init feroceon_l2_init(int l2_wt_override);
+extern int __init feroceon_of_init(void);
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
new file mode 100644
index 0000000000..5a7ee70f56
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/cache-l2x0.h
+ *
+ * Copyright (C) 2007 ARM Limited
+ */
+
+#ifndef __ASM_ARM_HARDWARE_L2X0_H
+#define __ASM_ARM_HARDWARE_L2X0_H
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define L2X0_CACHE_ID 0x000
+#define L2X0_CACHE_TYPE 0x004
+#define L2X0_CTRL 0x100
+#define L2X0_AUX_CTRL 0x104
+#define L310_TAG_LATENCY_CTRL 0x108
+#define L310_DATA_LATENCY_CTRL 0x10C
+#define L2X0_EVENT_CNT_CTRL 0x200
+#define L2X0_EVENT_CNT1_CFG 0x204
+#define L2X0_EVENT_CNT0_CFG 0x208
+#define L2X0_EVENT_CNT1_VAL 0x20C
+#define L2X0_EVENT_CNT0_VAL 0x210
+#define L2X0_INTR_MASK 0x214
+#define L2X0_MASKED_INTR_STAT 0x218
+#define L2X0_RAW_INTR_STAT 0x21C
+#define L2X0_INTR_CLEAR 0x220
+#define L2X0_CACHE_SYNC 0x730
+#define L2X0_DUMMY_REG 0x740
+#define L2X0_INV_LINE_PA 0x770
+#define L2X0_INV_WAY 0x77C
+#define L2X0_CLEAN_LINE_PA 0x7B0
+#define L2X0_CLEAN_LINE_IDX 0x7B8
+#define L2X0_CLEAN_WAY 0x7BC
+#define L2X0_CLEAN_INV_LINE_PA 0x7F0
+#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
+#define L2X0_CLEAN_INV_WAY 0x7FC
+/*
+ * The lockdown registers repeat 8 times for L310, the L210 has only one
+ * D and one I lockdown register at 0x0900 and 0x0904.
+ */
+#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
+#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
+#define L2X0_LOCKDOWN_STRIDE 0x08
+#define L310_ADDR_FILTER_START 0xC00
+#define L310_ADDR_FILTER_END 0xC04
+#define L2X0_TEST_OPERATION 0xF00
+#define L2X0_LINE_DATA 0xF10
+#define L2X0_LINE_TAG 0xF30
+#define L2X0_DEBUG_CTRL 0xF40
+#define L310_PREFETCH_CTRL 0xF60
+#define L310_POWER_CTRL 0xF80
+#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
+#define L310_STNDBY_MODE_EN (1 << 0)
+
+/* Registers shifts and masks */
+#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
+#define L2X0_CACHE_ID_PART_L210 (1 << 6)
+#define L2X0_CACHE_ID_PART_L220 (2 << 6)
+#define L2X0_CACHE_ID_PART_L310 (3 << 6)
+#define L2X0_CACHE_ID_RTL_MASK 0x3f
+#define L210_CACHE_ID_RTL_R0P2_02 0x00
+#define L210_CACHE_ID_RTL_R0P1 0x01
+#define L210_CACHE_ID_RTL_R0P2_01 0x02
+#define L210_CACHE_ID_RTL_R0P3 0x03
+#define L210_CACHE_ID_RTL_R0P4 0x0b
+#define L210_CACHE_ID_RTL_R0P5 0x0f
+#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
+#define L310_CACHE_ID_RTL_R0P0 0x00
+#define L310_CACHE_ID_RTL_R1P0 0x02
+#define L310_CACHE_ID_RTL_R2P0 0x04
+#define L310_CACHE_ID_RTL_R3P0 0x05
+#define L310_CACHE_ID_RTL_R3P1 0x06
+#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
+#define L310_CACHE_ID_RTL_R3P2 0x08
+#define L310_CACHE_ID_RTL_R3P3 0x09
+
+#define L2X0_EVENT_CNT_CTRL_ENABLE BIT(0)
+
+#define L2X0_EVENT_CNT_CFG_SRC_SHIFT 2
+#define L2X0_EVENT_CNT_CFG_SRC_MASK 0xf
+#define L2X0_EVENT_CNT_CFG_SRC_DISABLED 0
+#define L2X0_EVENT_CNT_CFG_INT_DISABLED 0
+#define L2X0_EVENT_CNT_CFG_INT_INCR 1
+#define L2X0_EVENT_CNT_CFG_INT_OVERFLOW 2
+
+/* L2C auxiliary control register - bits common to L2C-210/220/310 */
+#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
+#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
+#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
+#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
+#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
+#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
+/* L2C-210/220 common bits */
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
+#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
+#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
+#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
+#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
+/* L2C-210 specific bits */
+#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
+#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
+#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
+/* L2C-220 specific bits */
+#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
+#define L220_AUX_CTRL_FWA_SHIFT 23
+#define L220_AUX_CTRL_FWA_MASK (3 << 23)
+#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
+#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
+/* L2C-310 specific bits */
+#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
+#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
+#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
+#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
+#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
+#define L310_AUX_CTRL_FWA_SHIFT 23
+#define L310_AUX_CTRL_FWA_MASK (3 << 23)
+#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
+#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
+#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
+#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
+#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
+#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
+
+#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
+#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
+#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
+
+#define L310_ADDR_FILTER_EN 1
+
+#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
+#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
+#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
+#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
+#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
+#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
+#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
+
+#define L2X0_CTRL_EN 1
+
+#define L2X0_WAY_SIZE_SHIFT 3
+
+#ifndef __ASSEMBLY__
+extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
+#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
+extern int l2x0_of_init(u32 aux_val, u32 aux_mask);
+#else
+static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_CACHE_L2X0_PMU
+void l2x0_pmu_register(void __iomem *base, u32 part);
+void l2x0_pmu_suspend(void);
+void l2x0_pmu_resume(void);
+#else
+static inline void l2x0_pmu_register(void __iomem *base, u32 part) {}
+static inline void l2x0_pmu_suspend(void) {}
+static inline void l2x0_pmu_resume(void) {}
+#endif
+
+struct l2x0_regs {
+ unsigned long phy_base;
+ unsigned long aux_ctrl;
+ /*
+ * Whether the following registers need to be saved/restored
+ * depends on platform
+ */
+ unsigned long tag_latency;
+ unsigned long data_latency;
+ unsigned long filter_start;
+ unsigned long filter_end;
+ unsigned long prefetch_ctrl;
+ unsigned long pwr_ctrl;
+ unsigned long ctrl;
+ unsigned long aux2_ctrl;
+};
+
+extern struct l2x0_regs l2x0_saved_regs;
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
new file mode 100644
index 0000000000..4e493facaa
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/cache-tauros2.h
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ */
+
+#define CACHE_TAUROS2_PREFETCH_ON (1 << 0)
+#define CACHE_TAUROS2_LINEFILL_BURST8 (1 << 1)
+
+extern void __init tauros2_init(unsigned int features);
diff --git a/arch/arm/include/asm/hardware/cache-uniphier.h b/arch/arm/include/asm/hardware/cache-uniphier.h
new file mode 100644
index 0000000000..b1fefca65d
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-uniphier.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#ifndef __CACHE_UNIPHIER_H
+#define __CACHE_UNIPHIER_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_CACHE_UNIPHIER
+int uniphier_cache_init(void);
+#else
+static inline int uniphier_cache_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __CACHE_UNIPHIER_H */
diff --git a/arch/arm/include/asm/hardware/cp14.h b/arch/arm/include/asm/hardware/cp14.h
new file mode 100644
index 0000000000..44f2bde379
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cp14.h
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __ASM_HARDWARE_CP14_H
+#define __ASM_HARDWARE_CP14_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg) RCP14_##reg()
+#define dbg_write(val, reg) WCP14_##reg(val)
+#define etm_read(reg) RCP14_##reg()
+#define etm_write(val, reg) WCP14_##reg(val)
+
+/* MRC14 and MCR14 */
+#define MRC14(op1, crn, crm, op2) \
+({ \
+u32 val; \
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+#define MCR14(val, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv7
+ * DBGECR, DBGDSCCR, DBGDSMCR, DBGDRCR
+ *
+ * Available only in DBGv7.1
+ * DBGBXVRm, DBGOSDLR, DBGDEVID2, DBGDEVID1
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGPRSR,
+ * DBGPRSR, DBGDSAR, DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0)
+#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR() MRC14(0, c0, c7, 0)
+#define RCP14_DBGECR() MRC14(0, c0, c9, 0)
+#define RCP14_DBGDSCCR() MRC14(0, c0, c10, 0)
+#define RCP14_DBGDSMCR() MRC14(0, c0, c11, 0)
+#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2)
+#define RCP14_DBGDRCR() MRC14(0, c0, c4, 2)
+#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGECR(val) MCR14(val, 0, c0, c9, 0)
+#define WCP14_DBGDSCCR(val) MCR14(val, 0, c0, c10, 0)
+#define WCP14_DBGDSMCR(val) MCR14(val, 0, c0, c11, 0)
+#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGDRCR(val) MCR14(val, 0, c0, c4, 2)
+#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6)
+
+/*
+ * ETM Registers
+ *
+ * Available only in ETMv3.3, 3.4, 3.5
+ * ETMASICCR, ETMTECR2, ETMFFRR, ETMVDEVR, ETMVDCR1, ETMVDCR2, ETMVDCR3,
+ * ETMDCVRn, ETMDCMRn
+ *
+ * Available only in ETMv3.5 as read only
+ * ETMIDR2
+ *
+ * Available only in ETMv3.5, PFTv1.0, 1.1
+ * ETMTSEVR, ETMVMIDCVR, ETMPDCR
+ *
+ * Read only
+ * ETMCCR, ETMSCR, ETMIDR, ETMCCER, ETMOSLSR
+ * ETMLSR, ETMAUTHSTATUS, ETMDEVID, ETMDEVTYPE, ETMPIDR4, ETMPIDR5, ETMPIDR6,
+ * ETMPIDR7, ETMPIDR0, ETMPIDR1, ETMPIDR2, ETMPIDR2, ETMPIDR3, ETMCIDR0,
+ * ETMCIDR1, ETMCIDR2, ETMCIDR3
+ *
+ * Write only
+ * ETMOSLAR, ETMLAR
+ * Note: ETMCCER[11] controls WO nature of certain regs. Refer ETM arch spec.
+ */
+#define RCP14_ETMCR() MRC14(1, c0, c0, 0)
+#define RCP14_ETMCCR() MRC14(1, c0, c1, 0)
+#define RCP14_ETMTRIGGER() MRC14(1, c0, c2, 0)
+#define RCP14_ETMASICCR() MRC14(1, c0, c3, 0)
+#define RCP14_ETMSR() MRC14(1, c0, c4, 0)
+#define RCP14_ETMSCR() MRC14(1, c0, c5, 0)
+#define RCP14_ETMTSSCR() MRC14(1, c0, c6, 0)
+#define RCP14_ETMTECR2() MRC14(1, c0, c7, 0)
+#define RCP14_ETMTEEVR() MRC14(1, c0, c8, 0)
+#define RCP14_ETMTECR1() MRC14(1, c0, c9, 0)
+#define RCP14_ETMFFRR() MRC14(1, c0, c10, 0)
+#define RCP14_ETMFFLR() MRC14(1, c0, c11, 0)
+#define RCP14_ETMVDEVR() MRC14(1, c0, c12, 0)
+#define RCP14_ETMVDCR1() MRC14(1, c0, c13, 0)
+#define RCP14_ETMVDCR2() MRC14(1, c0, c14, 0)
+#define RCP14_ETMVDCR3() MRC14(1, c0, c15, 0)
+#define RCP14_ETMACVR0() MRC14(1, c0, c0, 1)
+#define RCP14_ETMACVR1() MRC14(1, c0, c1, 1)
+#define RCP14_ETMACVR2() MRC14(1, c0, c2, 1)
+#define RCP14_ETMACVR3() MRC14(1, c0, c3, 1)
+#define RCP14_ETMACVR4() MRC14(1, c0, c4, 1)
+#define RCP14_ETMACVR5() MRC14(1, c0, c5, 1)
+#define RCP14_ETMACVR6() MRC14(1, c0, c6, 1)
+#define RCP14_ETMACVR7() MRC14(1, c0, c7, 1)
+#define RCP14_ETMACVR8() MRC14(1, c0, c8, 1)
+#define RCP14_ETMACVR9() MRC14(1, c0, c9, 1)
+#define RCP14_ETMACVR10() MRC14(1, c0, c10, 1)
+#define RCP14_ETMACVR11() MRC14(1, c0, c11, 1)
+#define RCP14_ETMACVR12() MRC14(1, c0, c12, 1)
+#define RCP14_ETMACVR13() MRC14(1, c0, c13, 1)
+#define RCP14_ETMACVR14() MRC14(1, c0, c14, 1)
+#define RCP14_ETMACVR15() MRC14(1, c0, c15, 1)
+#define RCP14_ETMACTR0() MRC14(1, c0, c0, 2)
+#define RCP14_ETMACTR1() MRC14(1, c0, c1, 2)
+#define RCP14_ETMACTR2() MRC14(1, c0, c2, 2)
+#define RCP14_ETMACTR3() MRC14(1, c0, c3, 2)
+#define RCP14_ETMACTR4() MRC14(1, c0, c4, 2)
+#define RCP14_ETMACTR5() MRC14(1, c0, c5, 2)
+#define RCP14_ETMACTR6() MRC14(1, c0, c6, 2)
+#define RCP14_ETMACTR7() MRC14(1, c0, c7, 2)
+#define RCP14_ETMACTR8() MRC14(1, c0, c8, 2)
+#define RCP14_ETMACTR9() MRC14(1, c0, c9, 2)
+#define RCP14_ETMACTR10() MRC14(1, c0, c10, 2)
+#define RCP14_ETMACTR11() MRC14(1, c0, c11, 2)
+#define RCP14_ETMACTR12() MRC14(1, c0, c12, 2)
+#define RCP14_ETMACTR13() MRC14(1, c0, c13, 2)
+#define RCP14_ETMACTR14() MRC14(1, c0, c14, 2)
+#define RCP14_ETMACTR15() MRC14(1, c0, c15, 2)
+#define RCP14_ETMDCVR0() MRC14(1, c0, c0, 3)
+#define RCP14_ETMDCVR2() MRC14(1, c0, c2, 3)
+#define RCP14_ETMDCVR4() MRC14(1, c0, c4, 3)
+#define RCP14_ETMDCVR6() MRC14(1, c0, c6, 3)
+#define RCP14_ETMDCVR8() MRC14(1, c0, c8, 3)
+#define RCP14_ETMDCVR10() MRC14(1, c0, c10, 3)
+#define RCP14_ETMDCVR12() MRC14(1, c0, c12, 3)
+#define RCP14_ETMDCVR14() MRC14(1, c0, c14, 3)
+#define RCP14_ETMDCMR0() MRC14(1, c0, c0, 4)
+#define RCP14_ETMDCMR2() MRC14(1, c0, c2, 4)
+#define RCP14_ETMDCMR4() MRC14(1, c0, c4, 4)
+#define RCP14_ETMDCMR6() MRC14(1, c0, c6, 4)
+#define RCP14_ETMDCMR8() MRC14(1, c0, c8, 4)
+#define RCP14_ETMDCMR10() MRC14(1, c0, c10, 4)
+#define RCP14_ETMDCMR12() MRC14(1, c0, c12, 4)
+#define RCP14_ETMDCMR14() MRC14(1, c0, c14, 4)
+#define RCP14_ETMCNTRLDVR0() MRC14(1, c0, c0, 5)
+#define RCP14_ETMCNTRLDVR1() MRC14(1, c0, c1, 5)
+#define RCP14_ETMCNTRLDVR2() MRC14(1, c0, c2, 5)
+#define RCP14_ETMCNTRLDVR3() MRC14(1, c0, c3, 5)
+#define RCP14_ETMCNTENR0() MRC14(1, c0, c4, 5)
+#define RCP14_ETMCNTENR1() MRC14(1, c0, c5, 5)
+#define RCP14_ETMCNTENR2() MRC14(1, c0, c6, 5)
+#define RCP14_ETMCNTENR3() MRC14(1, c0, c7, 5)
+#define RCP14_ETMCNTRLDEVR0() MRC14(1, c0, c8, 5)
+#define RCP14_ETMCNTRLDEVR1() MRC14(1, c0, c9, 5)
+#define RCP14_ETMCNTRLDEVR2() MRC14(1, c0, c10, 5)
+#define RCP14_ETMCNTRLDEVR3() MRC14(1, c0, c11, 5)
+#define RCP14_ETMCNTVR0() MRC14(1, c0, c12, 5)
+#define RCP14_ETMCNTVR1() MRC14(1, c0, c13, 5)
+#define RCP14_ETMCNTVR2() MRC14(1, c0, c14, 5)
+#define RCP14_ETMCNTVR3() MRC14(1, c0, c15, 5)
+#define RCP14_ETMSQ12EVR() MRC14(1, c0, c0, 6)
+#define RCP14_ETMSQ21EVR() MRC14(1, c0, c1, 6)
+#define RCP14_ETMSQ23EVR() MRC14(1, c0, c2, 6)
+#define RCP14_ETMSQ31EVR() MRC14(1, c0, c3, 6)
+#define RCP14_ETMSQ32EVR() MRC14(1, c0, c4, 6)
+#define RCP14_ETMSQ13EVR() MRC14(1, c0, c5, 6)
+#define RCP14_ETMSQR() MRC14(1, c0, c7, 6)
+#define RCP14_ETMEXTOUTEVR0() MRC14(1, c0, c8, 6)
+#define RCP14_ETMEXTOUTEVR1() MRC14(1, c0, c9, 6)
+#define RCP14_ETMEXTOUTEVR2() MRC14(1, c0, c10, 6)
+#define RCP14_ETMEXTOUTEVR3() MRC14(1, c0, c11, 6)
+#define RCP14_ETMCIDCVR0() MRC14(1, c0, c12, 6)
+#define RCP14_ETMCIDCVR1() MRC14(1, c0, c13, 6)
+#define RCP14_ETMCIDCVR2() MRC14(1, c0, c14, 6)
+#define RCP14_ETMCIDCMR() MRC14(1, c0, c15, 6)
+#define RCP14_ETMIMPSPEC0() MRC14(1, c0, c0, 7)
+#define RCP14_ETMIMPSPEC1() MRC14(1, c0, c1, 7)
+#define RCP14_ETMIMPSPEC2() MRC14(1, c0, c2, 7)
+#define RCP14_ETMIMPSPEC3() MRC14(1, c0, c3, 7)
+#define RCP14_ETMIMPSPEC4() MRC14(1, c0, c4, 7)
+#define RCP14_ETMIMPSPEC5() MRC14(1, c0, c5, 7)
+#define RCP14_ETMIMPSPEC6() MRC14(1, c0, c6, 7)
+#define RCP14_ETMIMPSPEC7() MRC14(1, c0, c7, 7)
+#define RCP14_ETMSYNCFR() MRC14(1, c0, c8, 7)
+#define RCP14_ETMIDR() MRC14(1, c0, c9, 7)
+#define RCP14_ETMCCER() MRC14(1, c0, c10, 7)
+#define RCP14_ETMEXTINSELR() MRC14(1, c0, c11, 7)
+#define RCP14_ETMTESSEICR() MRC14(1, c0, c12, 7)
+#define RCP14_ETMEIBCR() MRC14(1, c0, c13, 7)
+#define RCP14_ETMTSEVR() MRC14(1, c0, c14, 7)
+#define RCP14_ETMAUXCR() MRC14(1, c0, c15, 7)
+#define RCP14_ETMTRACEIDR() MRC14(1, c1, c0, 0)
+#define RCP14_ETMIDR2() MRC14(1, c1, c2, 0)
+#define RCP14_ETMVMIDCVR() MRC14(1, c1, c0, 1)
+#define RCP14_ETMOSLSR() MRC14(1, c1, c1, 4)
+/* Not available in PFTv1.1 */
+#define RCP14_ETMOSSRR() MRC14(1, c1, c2, 4)
+#define RCP14_ETMPDCR() MRC14(1, c1, c4, 4)
+#define RCP14_ETMPDSR() MRC14(1, c1, c5, 4)
+#define RCP14_ETMITCTRL() MRC14(1, c7, c0, 4)
+#define RCP14_ETMCLAIMSET() MRC14(1, c7, c8, 6)
+#define RCP14_ETMCLAIMCLR() MRC14(1, c7, c9, 6)
+#define RCP14_ETMLSR() MRC14(1, c7, c13, 6)
+#define RCP14_ETMAUTHSTATUS() MRC14(1, c7, c14, 6)
+#define RCP14_ETMDEVID() MRC14(1, c7, c2, 7)
+#define RCP14_ETMDEVTYPE() MRC14(1, c7, c3, 7)
+#define RCP14_ETMPIDR4() MRC14(1, c7, c4, 7)
+#define RCP14_ETMPIDR5() MRC14(1, c7, c5, 7)
+#define RCP14_ETMPIDR6() MRC14(1, c7, c6, 7)
+#define RCP14_ETMPIDR7() MRC14(1, c7, c7, 7)
+#define RCP14_ETMPIDR0() MRC14(1, c7, c8, 7)
+#define RCP14_ETMPIDR1() MRC14(1, c7, c9, 7)
+#define RCP14_ETMPIDR2() MRC14(1, c7, c10, 7)
+#define RCP14_ETMPIDR3() MRC14(1, c7, c11, 7)
+#define RCP14_ETMCIDR0() MRC14(1, c7, c12, 7)
+#define RCP14_ETMCIDR1() MRC14(1, c7, c13, 7)
+#define RCP14_ETMCIDR2() MRC14(1, c7, c14, 7)
+#define RCP14_ETMCIDR3() MRC14(1, c7, c15, 7)
+
+#define WCP14_ETMCR(val) MCR14(val, 1, c0, c0, 0)
+#define WCP14_ETMTRIGGER(val) MCR14(val, 1, c0, c2, 0)
+#define WCP14_ETMASICCR(val) MCR14(val, 1, c0, c3, 0)
+#define WCP14_ETMSR(val) MCR14(val, 1, c0, c4, 0)
+#define WCP14_ETMTSSCR(val) MCR14(val, 1, c0, c6, 0)
+#define WCP14_ETMTECR2(val) MCR14(val, 1, c0, c7, 0)
+#define WCP14_ETMTEEVR(val) MCR14(val, 1, c0, c8, 0)
+#define WCP14_ETMTECR1(val) MCR14(val, 1, c0, c9, 0)
+#define WCP14_ETMFFRR(val) MCR14(val, 1, c0, c10, 0)
+#define WCP14_ETMFFLR(val) MCR14(val, 1, c0, c11, 0)
+#define WCP14_ETMVDEVR(val) MCR14(val, 1, c0, c12, 0)
+#define WCP14_ETMVDCR1(val) MCR14(val, 1, c0, c13, 0)
+#define WCP14_ETMVDCR2(val) MCR14(val, 1, c0, c14, 0)
+#define WCP14_ETMVDCR3(val) MCR14(val, 1, c0, c15, 0)
+#define WCP14_ETMACVR0(val) MCR14(val, 1, c0, c0, 1)
+#define WCP14_ETMACVR1(val) MCR14(val, 1, c0, c1, 1)
+#define WCP14_ETMACVR2(val) MCR14(val, 1, c0, c2, 1)
+#define WCP14_ETMACVR3(val) MCR14(val, 1, c0, c3, 1)
+#define WCP14_ETMACVR4(val) MCR14(val, 1, c0, c4, 1)
+#define WCP14_ETMACVR5(val) MCR14(val, 1, c0, c5, 1)
+#define WCP14_ETMACVR6(val) MCR14(val, 1, c0, c6, 1)
+#define WCP14_ETMACVR7(val) MCR14(val, 1, c0, c7, 1)
+#define WCP14_ETMACVR8(val) MCR14(val, 1, c0, c8, 1)
+#define WCP14_ETMACVR9(val) MCR14(val, 1, c0, c9, 1)
+#define WCP14_ETMACVR10(val) MCR14(val, 1, c0, c10, 1)
+#define WCP14_ETMACVR11(val) MCR14(val, 1, c0, c11, 1)
+#define WCP14_ETMACVR12(val) MCR14(val, 1, c0, c12, 1)
+#define WCP14_ETMACVR13(val) MCR14(val, 1, c0, c13, 1)
+#define WCP14_ETMACVR14(val) MCR14(val, 1, c0, c14, 1)
+#define WCP14_ETMACVR15(val) MCR14(val, 1, c0, c15, 1)
+#define WCP14_ETMACTR0(val) MCR14(val, 1, c0, c0, 2)
+#define WCP14_ETMACTR1(val) MCR14(val, 1, c0, c1, 2)
+#define WCP14_ETMACTR2(val) MCR14(val, 1, c0, c2, 2)
+#define WCP14_ETMACTR3(val) MCR14(val, 1, c0, c3, 2)
+#define WCP14_ETMACTR4(val) MCR14(val, 1, c0, c4, 2)
+#define WCP14_ETMACTR5(val) MCR14(val, 1, c0, c5, 2)
+#define WCP14_ETMACTR6(val) MCR14(val, 1, c0, c6, 2)
+#define WCP14_ETMACTR7(val) MCR14(val, 1, c0, c7, 2)
+#define WCP14_ETMACTR8(val) MCR14(val, 1, c0, c8, 2)
+#define WCP14_ETMACTR9(val) MCR14(val, 1, c0, c9, 2)
+#define WCP14_ETMACTR10(val) MCR14(val, 1, c0, c10, 2)
+#define WCP14_ETMACTR11(val) MCR14(val, 1, c0, c11, 2)
+#define WCP14_ETMACTR12(val) MCR14(val, 1, c0, c12, 2)
+#define WCP14_ETMACTR13(val) MCR14(val, 1, c0, c13, 2)
+#define WCP14_ETMACTR14(val) MCR14(val, 1, c0, c14, 2)
+#define WCP14_ETMACTR15(val) MCR14(val, 1, c0, c15, 2)
+#define WCP14_ETMDCVR0(val) MCR14(val, 1, c0, c0, 3)
+#define WCP14_ETMDCVR2(val) MCR14(val, 1, c0, c2, 3)
+#define WCP14_ETMDCVR4(val) MCR14(val, 1, c0, c4, 3)
+#define WCP14_ETMDCVR6(val) MCR14(val, 1, c0, c6, 3)
+#define WCP14_ETMDCVR8(val) MCR14(val, 1, c0, c8, 3)
+#define WCP14_ETMDCVR10(val) MCR14(val, 1, c0, c10, 3)
+#define WCP14_ETMDCVR12(val) MCR14(val, 1, c0, c12, 3)
+#define WCP14_ETMDCVR14(val) MCR14(val, 1, c0, c14, 3)
+#define WCP14_ETMDCMR0(val) MCR14(val, 1, c0, c0, 4)
+#define WCP14_ETMDCMR2(val) MCR14(val, 1, c0, c2, 4)
+#define WCP14_ETMDCMR4(val) MCR14(val, 1, c0, c4, 4)
+#define WCP14_ETMDCMR6(val) MCR14(val, 1, c0, c6, 4)
+#define WCP14_ETMDCMR8(val) MCR14(val, 1, c0, c8, 4)
+#define WCP14_ETMDCMR10(val) MCR14(val, 1, c0, c10, 4)
+#define WCP14_ETMDCMR12(val) MCR14(val, 1, c0, c12, 4)
+#define WCP14_ETMDCMR14(val) MCR14(val, 1, c0, c14, 4)
+#define WCP14_ETMCNTRLDVR0(val) MCR14(val, 1, c0, c0, 5)
+#define WCP14_ETMCNTRLDVR1(val) MCR14(val, 1, c0, c1, 5)
+#define WCP14_ETMCNTRLDVR2(val) MCR14(val, 1, c0, c2, 5)
+#define WCP14_ETMCNTRLDVR3(val) MCR14(val, 1, c0, c3, 5)
+#define WCP14_ETMCNTENR0(val) MCR14(val, 1, c0, c4, 5)
+#define WCP14_ETMCNTENR1(val) MCR14(val, 1, c0, c5, 5)
+#define WCP14_ETMCNTENR2(val) MCR14(val, 1, c0, c6, 5)
+#define WCP14_ETMCNTENR3(val) MCR14(val, 1, c0, c7, 5)
+#define WCP14_ETMCNTRLDEVR0(val) MCR14(val, 1, c0, c8, 5)
+#define WCP14_ETMCNTRLDEVR1(val) MCR14(val, 1, c0, c9, 5)
+#define WCP14_ETMCNTRLDEVR2(val) MCR14(val, 1, c0, c10, 5)
+#define WCP14_ETMCNTRLDEVR3(val) MCR14(val, 1, c0, c11, 5)
+#define WCP14_ETMCNTVR0(val) MCR14(val, 1, c0, c12, 5)
+#define WCP14_ETMCNTVR1(val) MCR14(val, 1, c0, c13, 5)
+#define WCP14_ETMCNTVR2(val) MCR14(val, 1, c0, c14, 5)
+#define WCP14_ETMCNTVR3(val) MCR14(val, 1, c0, c15, 5)
+#define WCP14_ETMSQ12EVR(val) MCR14(val, 1, c0, c0, 6)
+#define WCP14_ETMSQ21EVR(val) MCR14(val, 1, c0, c1, 6)
+#define WCP14_ETMSQ23EVR(val) MCR14(val, 1, c0, c2, 6)
+#define WCP14_ETMSQ31EVR(val) MCR14(val, 1, c0, c3, 6)
+#define WCP14_ETMSQ32EVR(val) MCR14(val, 1, c0, c4, 6)
+#define WCP14_ETMSQ13EVR(val) MCR14(val, 1, c0, c5, 6)
+#define WCP14_ETMSQR(val) MCR14(val, 1, c0, c7, 6)
+#define WCP14_ETMEXTOUTEVR0(val) MCR14(val, 1, c0, c8, 6)
+#define WCP14_ETMEXTOUTEVR1(val) MCR14(val, 1, c0, c9, 6)
+#define WCP14_ETMEXTOUTEVR2(val) MCR14(val, 1, c0, c10, 6)
+#define WCP14_ETMEXTOUTEVR3(val) MCR14(val, 1, c0, c11, 6)
+#define WCP14_ETMCIDCVR0(val) MCR14(val, 1, c0, c12, 6)
+#define WCP14_ETMCIDCVR1(val) MCR14(val, 1, c0, c13, 6)
+#define WCP14_ETMCIDCVR2(val) MCR14(val, 1, c0, c14, 6)
+#define WCP14_ETMCIDCMR(val) MCR14(val, 1, c0, c15, 6)
+#define WCP14_ETMIMPSPEC0(val) MCR14(val, 1, c0, c0, 7)
+#define WCP14_ETMIMPSPEC1(val) MCR14(val, 1, c0, c1, 7)
+#define WCP14_ETMIMPSPEC2(val) MCR14(val, 1, c0, c2, 7)
+#define WCP14_ETMIMPSPEC3(val) MCR14(val, 1, c0, c3, 7)
+#define WCP14_ETMIMPSPEC4(val) MCR14(val, 1, c0, c4, 7)
+#define WCP14_ETMIMPSPEC5(val) MCR14(val, 1, c0, c5, 7)
+#define WCP14_ETMIMPSPEC6(val) MCR14(val, 1, c0, c6, 7)
+#define WCP14_ETMIMPSPEC7(val) MCR14(val, 1, c0, c7, 7)
+/* Can be read only in ETMv3.4, ETMv3.5 */
+#define WCP14_ETMSYNCFR(val) MCR14(val, 1, c0, c8, 7)
+#define WCP14_ETMEXTINSELR(val) MCR14(val, 1, c0, c11, 7)
+#define WCP14_ETMTESSEICR(val) MCR14(val, 1, c0, c12, 7)
+#define WCP14_ETMEIBCR(val) MCR14(val, 1, c0, c13, 7)
+#define WCP14_ETMTSEVR(val) MCR14(val, 1, c0, c14, 7)
+#define WCP14_ETMAUXCR(val) MCR14(val, 1, c0, c15, 7)
+#define WCP14_ETMTRACEIDR(val) MCR14(val, 1, c1, c0, 0)
+#define WCP14_ETMIDR2(val) MCR14(val, 1, c1, c2, 0)
+#define WCP14_ETMVMIDCVR(val) MCR14(val, 1, c1, c0, 1)
+#define WCP14_ETMOSLAR(val) MCR14(val, 1, c1, c0, 4)
+/* Not available in PFTv1.1 */
+#define WCP14_ETMOSSRR(val) MCR14(val, 1, c1, c2, 4)
+#define WCP14_ETMPDCR(val) MCR14(val, 1, c1, c4, 4)
+#define WCP14_ETMPDSR(val) MCR14(val, 1, c1, c5, 4)
+#define WCP14_ETMITCTRL(val) MCR14(val, 1, c7, c0, 4)
+#define WCP14_ETMCLAIMSET(val) MCR14(val, 1, c7, c8, 6)
+#define WCP14_ETMCLAIMCLR(val) MCR14(val, 1, c7, c9, 6)
+/* Writes to this from CP14 interface are ignored */
+#define WCP14_ETMLAR(val) MCR14(val, 1, c7, c12, 6)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/dec21285.h b/arch/arm/include/asm/hardware/dec21285.h
new file mode 100644
index 0000000000..894f2a635c
--- /dev/null
+++ b/arch/arm/include/asm/hardware/dec21285.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/dec21285.h
+ *
+ * Copyright (C) 1998 Russell King
+ *
+ * DC21285 registers
+ */
+#define DC21285_PCI_IACK 0x79000000
+#define DC21285_ARMCSR_BASE 0x42000000
+#define DC21285_PCI_TYPE_0_CONFIG 0x7b000000
+#define DC21285_PCI_TYPE_1_CONFIG 0x7a000000
+#define DC21285_OUTBOUND_WRITE_FLUSH 0x78000000
+#define DC21285_FLASH 0x41000000
+#define DC21285_PCI_IO 0x7c000000
+#define DC21285_PCI_MEM 0x80000000
+
+#ifndef __ASSEMBLY__
+#include <mach/hardware.h>
+#define DC21285_IO(x) ((volatile unsigned long *)(ARMCSR_BASE+(x)))
+#else
+#define DC21285_IO(x) (x)
+#endif
+
+/*
+ * The footbridge is programmed to expose the system RAM at 0xe0000000.
+ * The requirement is that the RAM isn't placed at bus address 0, which
+ * would clash with VGA cards.
+ */
+#define BUS_OFFSET 0xe0000000
+
+#define CSR_PCICMD DC21285_IO(0x0004)
+#define CSR_CLASSREV DC21285_IO(0x0008)
+#define CSR_PCICACHELINESIZE DC21285_IO(0x000c)
+#define CSR_PCICSRBASE DC21285_IO(0x0010)
+#define CSR_PCICSRIOBASE DC21285_IO(0x0014)
+#define CSR_PCISDRAMBASE DC21285_IO(0x0018)
+#define CSR_PCIROMBASE DC21285_IO(0x0030)
+#define CSR_MBOX0 DC21285_IO(0x0050)
+#define CSR_MBOX1 DC21285_IO(0x0054)
+#define CSR_MBOX2 DC21285_IO(0x0058)
+#define CSR_MBOX3 DC21285_IO(0x005c)
+#define CSR_DOORBELL DC21285_IO(0x0060)
+#define CSR_DOORBELL_SETUP DC21285_IO(0x0064)
+#define CSR_ROMWRITEREG DC21285_IO(0x0068)
+#define CSR_CSRBASEMASK DC21285_IO(0x00f8)
+#define CSR_CSRBASEOFFSET DC21285_IO(0x00fc)
+#define CSR_SDRAMBASEMASK DC21285_IO(0x0100)
+#define CSR_SDRAMBASEOFFSET DC21285_IO(0x0104)
+#define CSR_ROMBASEMASK DC21285_IO(0x0108)
+#define CSR_SDRAMTIMING DC21285_IO(0x010c)
+#define CSR_SDRAMADDRSIZE0 DC21285_IO(0x0110)
+#define CSR_SDRAMADDRSIZE1 DC21285_IO(0x0114)
+#define CSR_SDRAMADDRSIZE2 DC21285_IO(0x0118)
+#define CSR_SDRAMADDRSIZE3 DC21285_IO(0x011c)
+#define CSR_I2O_INFREEHEAD DC21285_IO(0x0120)
+#define CSR_I2O_INPOSTTAIL DC21285_IO(0x0124)
+#define CSR_I2O_OUTPOSTHEAD DC21285_IO(0x0128)
+#define CSR_I2O_OUTFREETAIL DC21285_IO(0x012c)
+#define CSR_I2O_INFREECOUNT DC21285_IO(0x0130)
+#define CSR_I2O_OUTPOSTCOUNT DC21285_IO(0x0134)
+#define CSR_I2O_INPOSTCOUNT DC21285_IO(0x0138)
+#define CSR_SA110_CNTL DC21285_IO(0x013c)
+#define SA110_CNTL_INITCMPLETE (1 << 0)
+#define SA110_CNTL_ASSERTSERR (1 << 1)
+#define SA110_CNTL_RXSERR (1 << 3)
+#define SA110_CNTL_SA110DRAMPARITY (1 << 4)
+#define SA110_CNTL_PCISDRAMPARITY (1 << 5)
+#define SA110_CNTL_DMASDRAMPARITY (1 << 6)
+#define SA110_CNTL_DISCARDTIMER (1 << 8)
+#define SA110_CNTL_PCINRESET (1 << 9)
+#define SA110_CNTL_I2O_256 (0 << 10)
+#define SA110_CNTL_I20_512 (1 << 10)
+#define SA110_CNTL_I2O_1024 (2 << 10)
+#define SA110_CNTL_I2O_2048 (3 << 10)
+#define SA110_CNTL_I2O_4096 (4 << 10)
+#define SA110_CNTL_I2O_8192 (5 << 10)
+#define SA110_CNTL_I2O_16384 (6 << 10)
+#define SA110_CNTL_I2O_32768 (7 << 10)
+#define SA110_CNTL_WATCHDOG (1 << 13)
+#define SA110_CNTL_ROMWIDTH_UNDEF (0 << 14)
+#define SA110_CNTL_ROMWIDTH_16 (1 << 14)
+#define SA110_CNTL_ROMWIDTH_32 (2 << 14)
+#define SA110_CNTL_ROMWIDTH_8 (3 << 14)
+#define SA110_CNTL_ROMACCESSTIME(x) ((x)<<16)
+#define SA110_CNTL_ROMBURSTTIME(x) ((x)<<20)
+#define SA110_CNTL_ROMTRISTATETIME(x) ((x)<<24)
+#define SA110_CNTL_XCSDIR(x) ((x)<<28)
+#define SA110_CNTL_PCICFN (1 << 31)
+
+#define CSR_PCIADDR_EXTN DC21285_IO(0x0140)
+#define CSR_PREFETCHMEMRANGE DC21285_IO(0x0144)
+#define CSR_XBUS_CYCLE DC21285_IO(0x0148)
+#define CSR_XBUS_IOSTROBE DC21285_IO(0x014c)
+#define CSR_DOORBELL_PCI DC21285_IO(0x0150)
+#define CSR_DOORBELL_SA110 DC21285_IO(0x0154)
+#define CSR_UARTDR DC21285_IO(0x0160)
+#define CSR_RXSTAT DC21285_IO(0x0164)
+#define CSR_H_UBRLCR DC21285_IO(0x0168)
+#define CSR_M_UBRLCR DC21285_IO(0x016c)
+#define CSR_L_UBRLCR DC21285_IO(0x0170)
+#define CSR_UARTCON DC21285_IO(0x0174)
+#define CSR_UARTFLG DC21285_IO(0x0178)
+#define CSR_IRQ_STATUS DC21285_IO(0x0180)
+#define CSR_IRQ_RAWSTATUS DC21285_IO(0x0184)
+#define CSR_IRQ_ENABLE DC21285_IO(0x0188)
+#define CSR_IRQ_DISABLE DC21285_IO(0x018c)
+#define CSR_IRQ_SOFT DC21285_IO(0x0190)
+#define CSR_FIQ_STATUS DC21285_IO(0x0280)
+#define CSR_FIQ_RAWSTATUS DC21285_IO(0x0284)
+#define CSR_FIQ_ENABLE DC21285_IO(0x0288)
+#define CSR_FIQ_DISABLE DC21285_IO(0x028c)
+#define CSR_FIQ_SOFT DC21285_IO(0x0290)
+#define CSR_TIMER1_LOAD DC21285_IO(0x0300)
+#define CSR_TIMER1_VALUE DC21285_IO(0x0304)
+#define CSR_TIMER1_CNTL DC21285_IO(0x0308)
+#define CSR_TIMER1_CLR DC21285_IO(0x030c)
+#define CSR_TIMER2_LOAD DC21285_IO(0x0320)
+#define CSR_TIMER2_VALUE DC21285_IO(0x0324)
+#define CSR_TIMER2_CNTL DC21285_IO(0x0328)
+#define CSR_TIMER2_CLR DC21285_IO(0x032c)
+#define CSR_TIMER3_LOAD DC21285_IO(0x0340)
+#define CSR_TIMER3_VALUE DC21285_IO(0x0344)
+#define CSR_TIMER3_CNTL DC21285_IO(0x0348)
+#define CSR_TIMER3_CLR DC21285_IO(0x034c)
+#define CSR_TIMER4_LOAD DC21285_IO(0x0360)
+#define CSR_TIMER4_VALUE DC21285_IO(0x0364)
+#define CSR_TIMER4_CNTL DC21285_IO(0x0368)
+#define CSR_TIMER4_CLR DC21285_IO(0x036c)
+
+#define TIMER_CNTL_ENABLE (1 << 7)
+#define TIMER_CNTL_AUTORELOAD (1 << 6)
+#define TIMER_CNTL_DIV1 (0)
+#define TIMER_CNTL_DIV16 (1 << 2)
+#define TIMER_CNTL_DIV256 (2 << 2)
+#define TIMER_CNTL_CNTEXT (3 << 2)
+
+
diff --git a/arch/arm/include/asm/hardware/ioc.h b/arch/arm/include/asm/hardware/ioc.h
new file mode 100644
index 0000000000..6edd27fcd0
--- /dev/null
+++ b/arch/arm/include/asm/hardware/ioc.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/ioc.h
+ *
+ * Copyright (C) Russell King
+ *
+ * Use these macros to read/write the IOC. All it does is perform the actual
+ * read/write.
+ */
+#ifndef __ASMARM_HARDWARE_IOC_H
+#define __ASMARM_HARDWARE_IOC_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * We use __raw_base variants here so that we give the compiler the
+ * chance to keep IOC_BASE in a register.
+ */
+#define ioc_readb(off) __raw_readb(IOC_BASE + (off))
+#define ioc_writeb(val,off) __raw_writeb(val, IOC_BASE + (off))
+
+#endif
+
+#define IOC_CONTROL (0x00)
+#define IOC_KARTTX (0x04)
+#define IOC_KARTRX (0x04)
+
+#define IOC_IRQSTATA (0x10)
+#define IOC_IRQREQA (0x14)
+#define IOC_IRQCLRA (0x14)
+#define IOC_IRQMASKA (0x18)
+
+#define IOC_IRQSTATB (0x20)
+#define IOC_IRQREQB (0x24)
+#define IOC_IRQMASKB (0x28)
+
+#define IOC_FIQSTAT (0x30)
+#define IOC_FIQREQ (0x34)
+#define IOC_FIQMASK (0x38)
+
+#define IOC_T0CNTL (0x40)
+#define IOC_T0LTCHL (0x40)
+#define IOC_T0CNTH (0x44)
+#define IOC_T0LTCHH (0x44)
+#define IOC_T0GO (0x48)
+#define IOC_T0LATCH (0x4c)
+
+#define IOC_T1CNTL (0x50)
+#define IOC_T1LTCHL (0x50)
+#define IOC_T1CNTH (0x54)
+#define IOC_T1LTCHH (0x54)
+#define IOC_T1GO (0x58)
+#define IOC_T1LATCH (0x5c)
+
+#define IOC_T2CNTL (0x60)
+#define IOC_T2LTCHL (0x60)
+#define IOC_T2CNTH (0x64)
+#define IOC_T2LTCHH (0x64)
+#define IOC_T2GO (0x68)
+#define IOC_T2LATCH (0x6c)
+
+#define IOC_T3CNTL (0x70)
+#define IOC_T3LTCHL (0x70)
+#define IOC_T3CNTH (0x74)
+#define IOC_T3LTCHH (0x74)
+#define IOC_T3GO (0x78)
+#define IOC_T3LATCH (0x7c)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/iomd.h b/arch/arm/include/asm/hardware/iomd.h
new file mode 100644
index 0000000000..53006ba535
--- /dev/null
+++ b/arch/arm/include/asm/hardware/iomd.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/iomd.h
+ *
+ * Copyright (C) 1999 Russell King
+ *
+ * This file contains information out the IOMD ASIC used in the
+ * Acorn RiscPC and subsequently integrated into the CLPS7500 chips.
+ */
+#ifndef __ASMARM_HARDWARE_IOMD_H
+#define __ASMARM_HARDWARE_IOMD_H
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * We use __raw_base variants here so that we give the compiler the
+ * chance to keep IOC_BASE in a register.
+ */
+#define iomd_readb(off) __raw_readb(IOMD_BASE + (off))
+#define iomd_readl(off) __raw_readl(IOMD_BASE + (off))
+#define iomd_writeb(val,off) __raw_writeb(val, IOMD_BASE + (off))
+#define iomd_writel(val,off) __raw_writel(val, IOMD_BASE + (off))
+
+#endif
+
+#define IOMD_CONTROL (0x000)
+#define IOMD_KARTTX (0x004)
+#define IOMD_KARTRX (0x004)
+#define IOMD_KCTRL (0x008)
+
+#define IOMD_IRQSTATA (0x010)
+#define IOMD_IRQREQA (0x014)
+#define IOMD_IRQCLRA (0x014)
+#define IOMD_IRQMASKA (0x018)
+
+#define IOMD_IRQSTATB (0x020)
+#define IOMD_IRQREQB (0x024)
+#define IOMD_IRQMASKB (0x028)
+
+#define IOMD_FIQSTAT (0x030)
+#define IOMD_FIQREQ (0x034)
+#define IOMD_FIQMASK (0x038)
+
+#define IOMD_T0CNTL (0x040)
+#define IOMD_T0LTCHL (0x040)
+#define IOMD_T0CNTH (0x044)
+#define IOMD_T0LTCHH (0x044)
+#define IOMD_T0GO (0x048)
+#define IOMD_T0LATCH (0x04c)
+
+#define IOMD_T1CNTL (0x050)
+#define IOMD_T1LTCHL (0x050)
+#define IOMD_T1CNTH (0x054)
+#define IOMD_T1LTCHH (0x054)
+#define IOMD_T1GO (0x058)
+#define IOMD_T1LATCH (0x05c)
+
+#define IOMD_ROMCR0 (0x080)
+#define IOMD_ROMCR1 (0x084)
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_DRAMCR (0x088)
+#endif
+#define IOMD_REFCR (0x08C)
+
+#define IOMD_FSIZE (0x090)
+#define IOMD_ID0 (0x094)
+#define IOMD_ID1 (0x098)
+#define IOMD_VERSION (0x09C)
+
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_MOUSEX (0x0A0)
+#define IOMD_MOUSEY (0x0A4)
+#endif
+
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_DMATCR (0x0C0)
+#endif
+#define IOMD_IOTCR (0x0C4)
+#define IOMD_ECTCR (0x0C8)
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_DMAEXT (0x0CC)
+#endif
+
+#ifdef CONFIG_ARCH_RPC
+#define DMA_EXT_IO0 1
+#define DMA_EXT_IO1 2
+#define DMA_EXT_IO2 4
+#define DMA_EXT_IO3 8
+
+#define IOMD_IO0CURA (0x100)
+#define IOMD_IO0ENDA (0x104)
+#define IOMD_IO0CURB (0x108)
+#define IOMD_IO0ENDB (0x10C)
+#define IOMD_IO0CR (0x110)
+#define IOMD_IO0ST (0x114)
+
+#define IOMD_IO1CURA (0x120)
+#define IOMD_IO1ENDA (0x124)
+#define IOMD_IO1CURB (0x128)
+#define IOMD_IO1ENDB (0x12C)
+#define IOMD_IO1CR (0x130)
+#define IOMD_IO1ST (0x134)
+
+#define IOMD_IO2CURA (0x140)
+#define IOMD_IO2ENDA (0x144)
+#define IOMD_IO2CURB (0x148)
+#define IOMD_IO2ENDB (0x14C)
+#define IOMD_IO2CR (0x150)
+#define IOMD_IO2ST (0x154)
+
+#define IOMD_IO3CURA (0x160)
+#define IOMD_IO3ENDA (0x164)
+#define IOMD_IO3CURB (0x168)
+#define IOMD_IO3ENDB (0x16C)
+#define IOMD_IO3CR (0x170)
+#define IOMD_IO3ST (0x174)
+#endif
+
+#define IOMD_SD0CURA (0x180)
+#define IOMD_SD0ENDA (0x184)
+#define IOMD_SD0CURB (0x188)
+#define IOMD_SD0ENDB (0x18C)
+#define IOMD_SD0CR (0x190)
+#define IOMD_SD0ST (0x194)
+
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_SD1CURA (0x1A0)
+#define IOMD_SD1ENDA (0x1A4)
+#define IOMD_SD1CURB (0x1A8)
+#define IOMD_SD1ENDB (0x1AC)
+#define IOMD_SD1CR (0x1B0)
+#define IOMD_SD1ST (0x1B4)
+#endif
+
+#define IOMD_CURSCUR (0x1C0)
+#define IOMD_CURSINIT (0x1C4)
+
+#define IOMD_VIDCUR (0x1D0)
+#define IOMD_VIDEND (0x1D4)
+#define IOMD_VIDSTART (0x1D8)
+#define IOMD_VIDINIT (0x1DC)
+#define IOMD_VIDCR (0x1E0)
+
+#define IOMD_DMASTAT (0x1F0)
+#define IOMD_DMAREQ (0x1F4)
+#define IOMD_DMAMASK (0x1F8)
+
+#define DMA_END_S (1 << 31)
+#define DMA_END_L (1 << 30)
+
+#define DMA_CR_C 0x80
+#define DMA_CR_D 0x40
+#define DMA_CR_E 0x20
+
+#define DMA_ST_OFL 4
+#define DMA_ST_INT 2
+#define DMA_ST_AB 1
+
+/*
+ * DMA (MEMC) compatibility
+ */
+#define HALF_SAM vram_half_sam
+#define VDMA_ALIGNMENT (HALF_SAM * 2)
+#define VDMA_XFERSIZE (HALF_SAM)
+#define VDMA_INIT IOMD_VIDINIT
+#define VDMA_START IOMD_VIDSTART
+#define VDMA_END IOMD_VIDEND
+
+#ifndef __ASSEMBLY__
+extern unsigned int vram_half_sam;
+#define video_set_dma(start,end,offset) \
+do { \
+ outl (SCREEN_START + start, VDMA_START); \
+ outl (SCREEN_START + end - VDMA_XFERSIZE, VDMA_END); \
+ if (offset >= end - VDMA_XFERSIZE) \
+ offset |= 0x40000000; \
+ outl (SCREEN_START + offset, VDMA_INIT); \
+} while (0)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
new file mode 100644
index 0000000000..aaaedafef7
--- /dev/null
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -0,0 +1,219 @@
+/*
+ * arch/arm/include/asm/hardware/locomo.h
+ *
+ * This file contains the definitions for the LoCoMo G/A Chip
+ *
+ * (C) Copyright 2004 John Lenz
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Based on sa1111.h
+ */
+#ifndef _ASM_ARCH_LOCOMO
+#define _ASM_ARCH_LOCOMO
+
+#define locomo_writel(val,addr) ({ *(volatile u16 *)(addr) = (val); })
+#define locomo_readl(addr) (*(volatile u16 *)(addr))
+
+/* LOCOMO version */
+#define LOCOMO_VER 0x00
+
+/* Pin status */
+#define LOCOMO_ST 0x04
+
+/* Pin status */
+#define LOCOMO_C32K 0x08
+
+/* Interrupt controller */
+#define LOCOMO_ICR 0x0C
+
+/* MCS decoder for boot selecting */
+#define LOCOMO_MCSX0 0x10
+#define LOCOMO_MCSX1 0x14
+#define LOCOMO_MCSX2 0x18
+#define LOCOMO_MCSX3 0x1c
+
+/* Touch panel controller */
+#define LOCOMO_ASD 0x20 /* AD start delay */
+#define LOCOMO_HSD 0x28 /* HSYS delay */
+#define LOCOMO_HSC 0x2c /* HSYS period */
+#define LOCOMO_TADC 0x30 /* tablet ADC clock */
+
+
+/* Long time timer */
+#define LOCOMO_LTC 0xd8 /* LTC interrupt setting */
+#define LOCOMO_LTINT 0xdc /* LTC interrupt */
+
+/* DAC control signal for LCD (COMADJ ) */
+#define LOCOMO_DAC 0xe0
+/* DAC control */
+#define LOCOMO_DAC_SCLOEB 0x08 /* SCL pin output data */
+#define LOCOMO_DAC_TEST 0x04 /* Test bit */
+#define LOCOMO_DAC_SDA 0x02 /* SDA pin level (read-only) */
+#define LOCOMO_DAC_SDAOEB 0x01 /* SDA pin output data */
+
+/* SPI interface */
+#define LOCOMO_SPI 0x60
+#define LOCOMO_SPIMD 0x00 /* SPI mode setting */
+#define LOCOMO_SPICT 0x04 /* SPI mode control */
+#define LOCOMO_SPIST 0x08 /* SPI status */
+#define LOCOMO_SPI_TEND (1 << 3) /* Transfer end bit */
+#define LOCOMO_SPI_REND (1 << 2) /* Receive end bit */
+#define LOCOMO_SPI_RFW (1 << 1) /* write buffer bit */
+#define LOCOMO_SPI_RFR (1) /* read buffer bit */
+
+#define LOCOMO_SPIIS 0x10 /* SPI interrupt status */
+#define LOCOMO_SPIWE 0x14 /* SPI interrupt status write enable */
+#define LOCOMO_SPIIE 0x18 /* SPI interrupt enable */
+#define LOCOMO_SPIIR 0x1c /* SPI interrupt request */
+#define LOCOMO_SPITD 0x20 /* SPI transfer data write */
+#define LOCOMO_SPIRD 0x24 /* SPI receive data read */
+#define LOCOMO_SPITS 0x28 /* SPI transfer data shift */
+#define LOCOMO_SPIRS 0x2C /* SPI receive data shift */
+
+/* GPIO */
+#define LOCOMO_GPD 0x90 /* GPIO direction */
+#define LOCOMO_GPE 0x94 /* GPIO input enable */
+#define LOCOMO_GPL 0x98 /* GPIO level */
+#define LOCOMO_GPO 0x9c /* GPIO out data setting */
+#define LOCOMO_GRIE 0xa0 /* GPIO rise detection */
+#define LOCOMO_GFIE 0xa4 /* GPIO fall detection */
+#define LOCOMO_GIS 0xa8 /* GPIO edge detection status */
+#define LOCOMO_GWE 0xac /* GPIO status write enable */
+#define LOCOMO_GIE 0xb0 /* GPIO interrupt enable */
+#define LOCOMO_GIR 0xb4 /* GPIO interrupt request */
+#define LOCOMO_GPIO(Nb) (0x01 << (Nb))
+#define LOCOMO_GPIO_RTS LOCOMO_GPIO(0)
+#define LOCOMO_GPIO_CTS LOCOMO_GPIO(1)
+#define LOCOMO_GPIO_DSR LOCOMO_GPIO(2)
+#define LOCOMO_GPIO_DTR LOCOMO_GPIO(3)
+#define LOCOMO_GPIO_LCD_VSHA_ON LOCOMO_GPIO(4)
+#define LOCOMO_GPIO_LCD_VSHD_ON LOCOMO_GPIO(5)
+#define LOCOMO_GPIO_LCD_VEE_ON LOCOMO_GPIO(6)
+#define LOCOMO_GPIO_LCD_MOD LOCOMO_GPIO(7)
+#define LOCOMO_GPIO_DAC_ON LOCOMO_GPIO(8)
+#define LOCOMO_GPIO_FL_VR LOCOMO_GPIO(9)
+#define LOCOMO_GPIO_DAC_SDATA LOCOMO_GPIO(10)
+#define LOCOMO_GPIO_DAC_SCK LOCOMO_GPIO(11)
+#define LOCOMO_GPIO_DAC_SLOAD LOCOMO_GPIO(12)
+#define LOCOMO_GPIO_CARD_DETECT LOCOMO_GPIO(13)
+#define LOCOMO_GPIO_WRITE_PROT LOCOMO_GPIO(14)
+#define LOCOMO_GPIO_CARD_POWER LOCOMO_GPIO(15)
+
+/* Start the definitions of the devices. Each device has an initial
+ * base address and a series of offsets from that base address. */
+
+/* Keyboard controller */
+#define LOCOMO_KEYBOARD 0x40
+#define LOCOMO_KIB 0x00 /* KIB level */
+#define LOCOMO_KSC 0x04 /* KSTRB control */
+#define LOCOMO_KCMD 0x08 /* KSTRB command */
+#define LOCOMO_KIC 0x0c /* Key interrupt */
+
+/* Front light adjustment controller */
+#define LOCOMO_FRONTLIGHT 0xc8
+#define LOCOMO_ALS 0x00 /* Adjust light cycle */
+#define LOCOMO_ALD 0x04 /* Adjust light duty */
+
+#define LOCOMO_ALC_EN 0x8000
+
+/* Backlight controller: TFT signal */
+#define LOCOMO_BACKLIGHT 0x38
+#define LOCOMO_TC 0x00 /* TFT control signal */
+#define LOCOMO_CPSD 0x04 /* CPS delay */
+
+/* Audio controller */
+#define LOCOMO_AUDIO 0x54
+#define LOCOMO_ACC 0x00 /* Audio clock */
+#define LOCOMO_PAIF 0xD0 /* PCM audio interface */
+/* Audio clock */
+#define LOCOMO_ACC_XON 0x80
+#define LOCOMO_ACC_XEN 0x40
+#define LOCOMO_ACC_XSEL0 0x00
+#define LOCOMO_ACC_XSEL1 0x20
+#define LOCOMO_ACC_MCLKEN 0x10
+#define LOCOMO_ACC_64FSEN 0x08
+#define LOCOMO_ACC_CLKSEL000 0x00 /* mclk 2 */
+#define LOCOMO_ACC_CLKSEL001 0x01 /* mclk 3 */
+#define LOCOMO_ACC_CLKSEL010 0x02 /* mclk 4 */
+#define LOCOMO_ACC_CLKSEL011 0x03 /* mclk 6 */
+#define LOCOMO_ACC_CLKSEL100 0x04 /* mclk 8 */
+#define LOCOMO_ACC_CLKSEL101 0x05 /* mclk 12 */
+/* PCM audio interface */
+#define LOCOMO_PAIF_SCINV 0x20
+#define LOCOMO_PAIF_SCEN 0x10
+#define LOCOMO_PAIF_LRCRST 0x08
+#define LOCOMO_PAIF_LRCEVE 0x04
+#define LOCOMO_PAIF_LRCINV 0x02
+#define LOCOMO_PAIF_LRCEN 0x01
+
+/* LED controller */
+#define LOCOMO_LED 0xe8
+#define LOCOMO_LPT0 0x00
+#define LOCOMO_LPT1 0x04
+/* LED control */
+#define LOCOMO_LPT_TOFH 0x80
+#define LOCOMO_LPT_TOFL 0x08
+#define LOCOMO_LPT_TOH(TOH) ((TOH & 0x7) << 4)
+#define LOCOMO_LPT_TOL(TOL) ((TOL & 0x7))
+
+extern struct bus_type locomo_bus_type;
+
+#define LOCOMO_DEVID_KEYBOARD 0
+#define LOCOMO_DEVID_FRONTLIGHT 1
+#define LOCOMO_DEVID_BACKLIGHT 2
+#define LOCOMO_DEVID_AUDIO 3
+#define LOCOMO_DEVID_LED 4
+#define LOCOMO_DEVID_UART 5
+#define LOCOMO_DEVID_SPI 6
+
+struct locomo_dev {
+ struct device dev;
+ unsigned int devid;
+ unsigned int irq[1];
+
+ void *mapbase;
+ unsigned long length;
+
+ u64 dma_mask;
+};
+
+#define LOCOMO_DEV(_d) container_of((_d), struct locomo_dev, dev)
+
+#define locomo_get_drvdata(d) dev_get_drvdata(&(d)->dev)
+#define locomo_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
+
+struct locomo_driver {
+ struct device_driver drv;
+ unsigned int devid;
+ int (*probe)(struct locomo_dev *);
+ void (*remove)(struct locomo_dev *);
+};
+
+#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
+
+#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
+
+extern void locomolcd_power(int on);
+
+int locomo_driver_register(struct locomo_driver *);
+void locomo_driver_unregister(struct locomo_driver *);
+
+/* GPIO control functions */
+void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir);
+int locomo_gpio_read_level(struct device *dev, unsigned int bits);
+int locomo_gpio_read_output(struct device *dev, unsigned int bits);
+void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set);
+
+/* M62332 control function */
+void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel);
+
+/* Frontlight control */
+void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf);
+
+struct locomo_platform_data {
+ int irq_base; /* IRQ base for cascaded on-chip IRQs */
+};
+
+#endif
diff --git a/arch/arm/include/asm/hardware/memc.h b/arch/arm/include/asm/hardware/memc.h
new file mode 100644
index 0000000000..1d4ebe0a96
--- /dev/null
+++ b/arch/arm/include/asm/hardware/memc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/memc.h
+ *
+ * Copyright (C) Russell King.
+ */
+#define VDMA_ALIGNMENT PAGE_SIZE
+#define VDMA_XFERSIZE 16
+#define VDMA_INIT 0
+#define VDMA_START 1
+#define VDMA_END 2
+
+#ifndef __ASSEMBLY__
+extern void memc_write(unsigned int reg, unsigned long val);
+
+#define video_set_dma(start,end,offset) \
+do { \
+ memc_write (VDMA_START, (start >> 2)); \
+ memc_write (VDMA_END, (end - VDMA_XFERSIZE) >> 2); \
+ memc_write (VDMA_INIT, (offset >> 2)); \
+} while (0)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
new file mode 100644
index 0000000000..d8c6f8a99d
--- /dev/null
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/hardware/sa1111.h
+ *
+ * Copyright (C) 2000 John G Dorsey <john+@cs.cmu.edu>
+ *
+ * This file contains definitions for the SA-1111 Companion Chip.
+ * (Structure and naming borrowed from SA-1101.h, by Peter Danielsson.)
+ *
+ * Macro that calculates real address for registers in the SA-1111
+ */
+
+#ifndef _ASM_ARCH_SA1111
+#define _ASM_ARCH_SA1111
+
+/*
+ * Don't ask the (SAC) DMA engines to move less than this amount.
+ */
+
+#define SA1111_SAC_DMA_MIN_XFER (0x800)
+
+/*
+ * System Bus Interface (SBI)
+ *
+ * Registers
+ * SKCR Control Register
+ * SMCR Shared Memory Controller Register
+ * SKID ID Register
+ */
+#define SA1111_SKCR 0x0000
+#define SA1111_SMCR 0x0004
+#define SA1111_SKID 0x0008
+
+#define SKCR_PLL_BYPASS (1<<0)
+#define SKCR_RCLKEN (1<<1)
+#define SKCR_SLEEP (1<<2)
+#define SKCR_DOZE (1<<3)
+#define SKCR_VCO_OFF (1<<4)
+#define SKCR_SCANTSTEN (1<<5)
+#define SKCR_CLKTSTEN (1<<6)
+#define SKCR_RDYEN (1<<7)
+#define SKCR_SELAC (1<<8)
+#define SKCR_OPPC (1<<9)
+#define SKCR_PLLTSTEN (1<<10)
+#define SKCR_USBIOTSTEN (1<<11)
+/*
+ * Don't believe the specs! Take them, throw them outside. Leave them
+ * there for a week. Spit on them. Walk on them. Stamp on them.
+ * Pour gasoline over them and finally burn them. Now think about coding.
+ * - The October 1999 errata (278260-007) says its bit 13, 1 to enable.
+ * - The Feb 2001 errata (278260-010) says that the previous errata
+ * (278260-009) is wrong, and its bit actually 12, fixed in spec
+ * 278242-003.
+ * - The SA1111 manual (278242) says bit 12, but 0 to enable.
+ * - Reality is bit 13, 1 to enable.
+ * -- rmk
+ */
+#define SKCR_OE_EN (1<<13)
+
+#define SMCR_DTIM (1<<0)
+#define SMCR_MBGE (1<<1)
+#define SMCR_DRAC_0 (1<<2)
+#define SMCR_DRAC_1 (1<<3)
+#define SMCR_DRAC_2 (1<<4)
+#define SMCR_DRAC Fld(3, 2)
+#define SMCR_CLAT (1<<5)
+
+#define SKID_SIREV_MASK (0x000000f0)
+#define SKID_MTREV_MASK (0x0000000f)
+#define SKID_ID_MASK (0xffffff00)
+#define SKID_SA1111_ID (0x690cc200)
+
+/*
+ * System Controller
+ *
+ * Registers
+ * SKPCR Power Control Register
+ * SKCDR Clock Divider Register
+ * SKAUD Audio Clock Divider Register
+ * SKPMC PS/2 Mouse Clock Divider Register
+ * SKPTC PS/2 Track Pad Clock Divider Register
+ * SKPEN0 PWM0 Enable Register
+ * SKPWM0 PWM0 Clock Register
+ * SKPEN1 PWM1 Enable Register
+ * SKPWM1 PWM1 Clock Register
+ */
+#define SA1111_SKPCR 0x0200
+#define SA1111_SKCDR 0x0204
+#define SA1111_SKAUD 0x0208
+#define SA1111_SKPMC 0x020c
+#define SA1111_SKPTC 0x0210
+#define SA1111_SKPEN0 0x0214
+#define SA1111_SKPWM0 0x0218
+#define SA1111_SKPEN1 0x021c
+#define SA1111_SKPWM1 0x0220
+
+#define SKPCR_UCLKEN (1<<0)
+#define SKPCR_ACCLKEN (1<<1)
+#define SKPCR_I2SCLKEN (1<<2)
+#define SKPCR_L3CLKEN (1<<3)
+#define SKPCR_SCLKEN (1<<4)
+#define SKPCR_PMCLKEN (1<<5)
+#define SKPCR_PTCLKEN (1<<6)
+#define SKPCR_DCLKEN (1<<7)
+#define SKPCR_PWMCLKEN (1<<8)
+
+/* USB Host controller */
+#define SA1111_USB 0x0400
+
+/*
+ * Serial Audio Controller
+ *
+ * Registers
+ * SACR0 Serial Audio Common Control Register
+ * SACR1 Serial Audio Alternate Mode (I2C/MSB) Control Register
+ * SACR2 Serial Audio AC-link Control Register
+ * SASR0 Serial Audio I2S/MSB Interface & FIFO Status Register
+ * SASR1 Serial Audio AC-link Interface & FIFO Status Register
+ * SASCR Serial Audio Status Clear Register
+ * L3_CAR L3 Control Bus Address Register
+ * L3_CDR L3 Control Bus Data Register
+ * ACCAR AC-link Command Address Register
+ * ACCDR AC-link Command Data Register
+ * ACSAR AC-link Status Address Register
+ * ACSDR AC-link Status Data Register
+ * SADTCS Serial Audio DMA Transmit Control/Status Register
+ * SADTSA Serial Audio DMA Transmit Buffer Start Address A
+ * SADTCA Serial Audio DMA Transmit Buffer Count Register A
+ * SADTSB Serial Audio DMA Transmit Buffer Start Address B
+ * SADTCB Serial Audio DMA Transmit Buffer Count Register B
+ * SADRCS Serial Audio DMA Receive Control/Status Register
+ * SADRSA Serial Audio DMA Receive Buffer Start Address A
+ * SADRCA Serial Audio DMA Receive Buffer Count Register A
+ * SADRSB Serial Audio DMA Receive Buffer Start Address B
+ * SADRCB Serial Audio DMA Receive Buffer Count Register B
+ * SAITR Serial Audio Interrupt Test Register
+ * SADR Serial Audio Data Register (16 x 32-bit)
+ */
+
+#define SA1111_SERAUDIO 0x0600
+
+/*
+ * These are offsets from the above base.
+ */
+#define SA1111_SACR0 0x00
+#define SA1111_SACR1 0x04
+#define SA1111_SACR2 0x08
+#define SA1111_SASR0 0x0c
+#define SA1111_SASR1 0x10
+#define SA1111_SASCR 0x18
+#define SA1111_L3_CAR 0x1c
+#define SA1111_L3_CDR 0x20
+#define SA1111_ACCAR 0x24
+#define SA1111_ACCDR 0x28
+#define SA1111_ACSAR 0x2c
+#define SA1111_ACSDR 0x30
+#define SA1111_SADTCS 0x34
+#define SA1111_SADTSA 0x38
+#define SA1111_SADTCA 0x3c
+#define SA1111_SADTSB 0x40
+#define SA1111_SADTCB 0x44
+#define SA1111_SADRCS 0x48
+#define SA1111_SADRSA 0x4c
+#define SA1111_SADRCA 0x50
+#define SA1111_SADRSB 0x54
+#define SA1111_SADRCB 0x58
+#define SA1111_SAITR 0x5c
+#define SA1111_SADR 0x80
+
+#ifndef CONFIG_ARCH_PXA
+
+#define SACR0_ENB (1<<0)
+#define SACR0_BCKD (1<<2)
+#define SACR0_RST (1<<3)
+
+#define SACR1_AMSL (1<<0)
+#define SACR1_L3EN (1<<1)
+#define SACR1_L3MB (1<<2)
+#define SACR1_DREC (1<<3)
+#define SACR1_DRPL (1<<4)
+#define SACR1_ENLBF (1<<5)
+
+#define SACR2_TS3V (1<<0)
+#define SACR2_TS4V (1<<1)
+#define SACR2_WKUP (1<<2)
+#define SACR2_DREC (1<<3)
+#define SACR2_DRPL (1<<4)
+#define SACR2_ENLBF (1<<5)
+#define SACR2_RESET (1<<6)
+
+#define SASR0_TNF (1<<0)
+#define SASR0_RNE (1<<1)
+#define SASR0_BSY (1<<2)
+#define SASR0_TFS (1<<3)
+#define SASR0_RFS (1<<4)
+#define SASR0_TUR (1<<5)
+#define SASR0_ROR (1<<6)
+#define SASR0_L3WD (1<<16)
+#define SASR0_L3RD (1<<17)
+
+#define SASR1_TNF (1<<0)
+#define SASR1_RNE (1<<1)
+#define SASR1_BSY (1<<2)
+#define SASR1_TFS (1<<3)
+#define SASR1_RFS (1<<4)
+#define SASR1_TUR (1<<5)
+#define SASR1_ROR (1<<6)
+#define SASR1_CADT (1<<16)
+#define SASR1_SADR (1<<17)
+#define SASR1_RSTO (1<<18)
+#define SASR1_CLPM (1<<19)
+#define SASR1_CRDY (1<<20)
+#define SASR1_RS3V (1<<21)
+#define SASR1_RS4V (1<<22)
+
+#define SASCR_TUR (1<<5)
+#define SASCR_ROR (1<<6)
+#define SASCR_DTS (1<<16)
+#define SASCR_RDD (1<<17)
+#define SASCR_STO (1<<18)
+
+#define SADTCS_TDEN (1<<0)
+#define SADTCS_TDIE (1<<1)
+#define SADTCS_TDBDA (1<<3)
+#define SADTCS_TDSTA (1<<4)
+#define SADTCS_TDBDB (1<<5)
+#define SADTCS_TDSTB (1<<6)
+#define SADTCS_TBIU (1<<7)
+
+#define SADRCS_RDEN (1<<0)
+#define SADRCS_RDIE (1<<1)
+#define SADRCS_RDBDA (1<<3)
+#define SADRCS_RDSTA (1<<4)
+#define SADRCS_RDBDB (1<<5)
+#define SADRCS_RDSTB (1<<6)
+#define SADRCS_RBIU (1<<7)
+
+#define SAD_CS_DEN (1<<0)
+#define SAD_CS_DIE (1<<1) /* Not functional on metal 1 */
+#define SAD_CS_DBDA (1<<3) /* Not functional on metal 1 */
+#define SAD_CS_DSTA (1<<4)
+#define SAD_CS_DBDB (1<<5) /* Not functional on metal 1 */
+#define SAD_CS_DSTB (1<<6)
+#define SAD_CS_BIU (1<<7) /* Not functional on metal 1 */
+
+#define SAITR_TFS (1<<0)
+#define SAITR_RFS (1<<1)
+#define SAITR_TUR (1<<2)
+#define SAITR_ROR (1<<3)
+#define SAITR_CADT (1<<4)
+#define SAITR_SADR (1<<5)
+#define SAITR_RSTO (1<<6)
+#define SAITR_TDBDA (1<<8)
+#define SAITR_TDBDB (1<<9)
+#define SAITR_RDBDA (1<<10)
+#define SAITR_RDBDB (1<<11)
+
+#endif /* !CONFIG_ARCH_PXA */
+
+/*
+ * General-Purpose I/O Interface
+ *
+ * Registers
+ * PA_DDR GPIO Block A Data Direction
+ * PA_DRR/PA_DWR GPIO Block A Data Value Register (read/write)
+ * PA_SDR GPIO Block A Sleep Direction
+ * PA_SSR GPIO Block A Sleep State
+ * PB_DDR GPIO Block B Data Direction
+ * PB_DRR/PB_DWR GPIO Block B Data Value Register (read/write)
+ * PB_SDR GPIO Block B Sleep Direction
+ * PB_SSR GPIO Block B Sleep State
+ * PC_DDR GPIO Block C Data Direction
+ * PC_DRR/PC_DWR GPIO Block C Data Value Register (read/write)
+ * PC_SDR GPIO Block C Sleep Direction
+ * PC_SSR GPIO Block C Sleep State
+ */
+
+#define SA1111_GPIO 0x1000
+
+#define SA1111_GPIO_PADDR (0x000)
+#define SA1111_GPIO_PADRR (0x004)
+#define SA1111_GPIO_PADWR (0x004)
+#define SA1111_GPIO_PASDR (0x008)
+#define SA1111_GPIO_PASSR (0x00c)
+#define SA1111_GPIO_PBDDR (0x010)
+#define SA1111_GPIO_PBDRR (0x014)
+#define SA1111_GPIO_PBDWR (0x014)
+#define SA1111_GPIO_PBSDR (0x018)
+#define SA1111_GPIO_PBSSR (0x01c)
+#define SA1111_GPIO_PCDDR (0x020)
+#define SA1111_GPIO_PCDRR (0x024)
+#define SA1111_GPIO_PCDWR (0x024)
+#define SA1111_GPIO_PCSDR (0x028)
+#define SA1111_GPIO_PCSSR (0x02c)
+
+#define GPIO_A0 (1 << 0)
+#define GPIO_A1 (1 << 1)
+#define GPIO_A2 (1 << 2)
+#define GPIO_A3 (1 << 3)
+
+#define GPIO_B0 (1 << 8)
+#define GPIO_B1 (1 << 9)
+#define GPIO_B2 (1 << 10)
+#define GPIO_B3 (1 << 11)
+#define GPIO_B4 (1 << 12)
+#define GPIO_B5 (1 << 13)
+#define GPIO_B6 (1 << 14)
+#define GPIO_B7 (1 << 15)
+
+#define GPIO_C0 (1 << 16)
+#define GPIO_C1 (1 << 17)
+#define GPIO_C2 (1 << 18)
+#define GPIO_C3 (1 << 19)
+#define GPIO_C4 (1 << 20)
+#define GPIO_C5 (1 << 21)
+#define GPIO_C6 (1 << 22)
+#define GPIO_C7 (1 << 23)
+
+/*
+ * Interrupt Controller
+ *
+ * Registers
+ * INTTEST0 Test register 0
+ * INTTEST1 Test register 1
+ * INTEN0 Interrupt Enable register 0
+ * INTEN1 Interrupt Enable register 1
+ * INTPOL0 Interrupt Polarity selection 0
+ * INTPOL1 Interrupt Polarity selection 1
+ * INTTSTSEL Interrupt source selection
+ * INTSTATCLR0 Interrupt Status/Clear 0
+ * INTSTATCLR1 Interrupt Status/Clear 1
+ * INTSET0 Interrupt source set 0
+ * INTSET1 Interrupt source set 1
+ * WAKE_EN0 Wake-up source enable 0
+ * WAKE_EN1 Wake-up source enable 1
+ * WAKE_POL0 Wake-up polarity selection 0
+ * WAKE_POL1 Wake-up polarity selection 1
+ */
+#define SA1111_INTC 0x1600
+
+/*
+ * These are offsets from the above base.
+ */
+#define SA1111_INTTEST0 0x0000
+#define SA1111_INTTEST1 0x0004
+#define SA1111_INTEN0 0x0008
+#define SA1111_INTEN1 0x000c
+#define SA1111_INTPOL0 0x0010
+#define SA1111_INTPOL1 0x0014
+#define SA1111_INTTSTSEL 0x0018
+#define SA1111_INTSTATCLR0 0x001c
+#define SA1111_INTSTATCLR1 0x0020
+#define SA1111_INTSET0 0x0024
+#define SA1111_INTSET1 0x0028
+#define SA1111_WAKEEN0 0x002c
+#define SA1111_WAKEEN1 0x0030
+#define SA1111_WAKEPOL0 0x0034
+#define SA1111_WAKEPOL1 0x0038
+
+/* PS/2 Trackpad and Mouse Interfaces */
+#define SA1111_KBD 0x0a00
+#define SA1111_MSE 0x0c00
+
+/* PCMCIA Interface */
+#define SA1111_PCMCIA 0x1600
+
+
+
+
+
+extern struct bus_type sa1111_bus_type;
+
+#define SA1111_DEVID_SBI (1 << 0)
+#define SA1111_DEVID_SK (1 << 1)
+#define SA1111_DEVID_USB (1 << 2)
+#define SA1111_DEVID_SAC (1 << 3)
+#define SA1111_DEVID_SSP (1 << 4)
+#define SA1111_DEVID_PS2 (3 << 5)
+#define SA1111_DEVID_PS2_KBD (1 << 5)
+#define SA1111_DEVID_PS2_MSE (1 << 6)
+#define SA1111_DEVID_GPIO (1 << 7)
+#define SA1111_DEVID_INT (1 << 8)
+#define SA1111_DEVID_PCMCIA (1 << 9)
+
+struct sa1111_dev {
+ struct device dev;
+ unsigned int devid;
+ struct resource res;
+ void __iomem *mapbase;
+ unsigned int skpcr_mask;
+ unsigned int hwirq[6];
+ u64 dma_mask;
+};
+
+#define to_sa1111_device(x) container_of(x, struct sa1111_dev, dev)
+
+#define sa1111_get_drvdata(d) dev_get_drvdata(&(d)->dev)
+#define sa1111_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
+
+struct sa1111_driver {
+ struct device_driver drv;
+ unsigned int devid;
+ int (*probe)(struct sa1111_dev *);
+ void (*remove)(struct sa1111_dev *);
+};
+
+#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
+
+#define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name)
+
+/*
+ * These frob the SKPCR register, and call platform specific
+ * enable/disable functions.
+ */
+int sa1111_enable_device(struct sa1111_dev *);
+void sa1111_disable_device(struct sa1111_dev *);
+
+int sa1111_get_irq(struct sa1111_dev *, unsigned num);
+
+unsigned int sa1111_pll_clock(struct sa1111_dev *);
+
+#define SA1111_AUDIO_ACLINK 0
+#define SA1111_AUDIO_I2S 1
+
+void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode);
+int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate);
+int sa1111_get_audio_rate(struct sa1111_dev *sadev);
+
+int sa1111_check_dma_bug(dma_addr_t addr);
+
+int sa1111_driver_register(struct sa1111_driver *);
+void sa1111_driver_unregister(struct sa1111_driver *);
+
+struct sa1111_platform_data {
+ int irq_base; /* base for cascaded on-chip IRQs */
+ unsigned disable_devs;
+ void *data;
+ int (*enable)(void *, unsigned);
+ void (*disable)(void *, unsigned);
+};
+
+#endif /* _ASM_ARCH_SA1111 */
diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h
new file mode 100644
index 0000000000..5054533152
--- /dev/null
+++ b/arch/arm/include/asm/hardware/scoop.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definitions for the SCOOP interface found on various Sharp PDAs
+ *
+ * Copyright (c) 2004 Richard Purdie
+ */
+
+#define SCOOP_MCR 0x00
+#define SCOOP_CDR 0x04
+#define SCOOP_CSR 0x08
+#define SCOOP_CPR 0x0C
+#define SCOOP_CCR 0x10
+#define SCOOP_IRR 0x14
+#define SCOOP_IRM 0x14
+#define SCOOP_IMR 0x18
+#define SCOOP_ISR 0x1C
+#define SCOOP_GPCR 0x20
+#define SCOOP_GPWR 0x24
+#define SCOOP_GPRR 0x28
+
+#define SCOOP_CPR_OUT (1 << 7)
+#define SCOOP_CPR_SD_3V (1 << 2)
+#define SCOOP_CPR_CF_XV (1 << 1)
+#define SCOOP_CPR_CF_3V (1 << 0)
+
+#define SCOOP_GPCR_PA22 (1 << 12)
+#define SCOOP_GPCR_PA21 (1 << 11)
+#define SCOOP_GPCR_PA20 (1 << 10)
+#define SCOOP_GPCR_PA19 (1 << 9)
+#define SCOOP_GPCR_PA18 (1 << 8)
+#define SCOOP_GPCR_PA17 (1 << 7)
+#define SCOOP_GPCR_PA16 (1 << 6)
+#define SCOOP_GPCR_PA15 (1 << 5)
+#define SCOOP_GPCR_PA14 (1 << 4)
+#define SCOOP_GPCR_PA13 (1 << 3)
+#define SCOOP_GPCR_PA12 (1 << 2)
+#define SCOOP_GPCR_PA11 (1 << 1)
+
+struct scoop_config {
+ unsigned short io_out;
+ unsigned short io_dir;
+ unsigned short suspend_clr;
+ unsigned short suspend_set;
+ int gpio_base;
+};
+
+/* Structure for linking scoop devices to PCMCIA sockets */
+struct scoop_pcmcia_dev {
+ struct device *dev; /* Pointer to this socket's scoop device */
+ int irq; /* irq for socket */
+ int cd_irq;
+ const char *cd_irq_str;
+ unsigned char keep_vs;
+ unsigned char keep_rd;
+};
+
+struct scoop_pcmcia_config {
+ struct scoop_pcmcia_dev *devs;
+ int num_devs;
+ void (*power_ctrl)(struct device *scoop, unsigned short cpr, int nr);
+};
+
+extern struct scoop_pcmcia_config *platform_scoop_config;
+
+void reset_scoop(struct device *dev);
+unsigned short read_scoop_reg(struct device *dev, unsigned short reg);
+void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data);
diff --git a/arch/arm/include/asm/hardware/ssp.h b/arch/arm/include/asm/hardware/ssp.h
new file mode 100644
index 0000000000..72d1767903
--- /dev/null
+++ b/arch/arm/include/asm/hardware/ssp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ssp.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ */
+#ifndef SSP_H
+#define SSP_H
+
+struct ssp_state {
+ unsigned int cr0;
+ unsigned int cr1;
+};
+
+int ssp_write_word(u16 data);
+int ssp_read_word(u16 *data);
+int ssp_flush(void);
+void ssp_enable(void);
+void ssp_disable(void);
+void ssp_save_state(struct ssp_state *ssp);
+void ssp_restore_state(struct ssp_state *ssp);
+int ssp_init(void);
+void ssp_exit(void);
+
+#endif
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
new file mode 100644
index 0000000000..b4b6622095
--- /dev/null
+++ b/arch/arm/include/asm/highmem.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#include <asm/cachetype.h>
+#include <asm/fixmap.h>
+
+#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
+#define LAST_PKMAP PTRS_PER_PTE
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define flush_cache_kmaps() \
+ do { \
+ if (cache_is_vivt()) \
+ flush_cache_all(); \
+ } while (0)
+
+extern pte_t *pkmap_page_table;
+
+/*
+ * The reason for kmap_high_get() is to ensure that the currently kmap'd
+ * page usage count does not decrease to zero while we're using its
+ * existing virtual mapping in an atomic context. With a VIVT cache this
+ * is essential to do, but with a VIPT cache this is only an optimization
+ * so not to pay the price of establishing a second mapping if an existing
+ * one can be used. However, on platforms without hardware TLB maintenance
+ * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
+ * the locking involved must also disable IRQs which is incompatible with
+ * the IPI mechanism used by global TLB operations.
+ */
+#define ARCH_NEEDS_KMAP_HIGH_GET
+#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
+#error "The sum of features in your kernel config cannot be supported together"
+#endif
+#endif
+
+/*
+ * Needed to be able to broadcast the TLB invalidation for kmap.
+ */
+#ifdef CONFIG_ARM_ERRATA_798181
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+extern void *kmap_high_get(struct page *page);
+
+static inline void *arch_kmap_local_high_get(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
+ return NULL;
+ return kmap_high_get(page);
+}
+#define arch_kmap_local_high_get arch_kmap_local_high_get
+
+#else /* ARCH_NEEDS_KMAP_HIGH_GET */
+static inline void *kmap_high_get(struct page *page)
+{
+ return NULL;
+}
+#endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
+
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_kernel_page(vaddr)
+
+#define arch_kmap_local_pre_unmap(vaddr) \
+do { \
+ if (cache_is_vivt()) \
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
+} while (0)
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_page(vaddr)
+
+#endif
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644
index 0000000000..a30be55057
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hugetlb-3level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h.
+ */
+
+#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
+#define _ASM_ARM_HUGETLB_3LEVEL_H
+
+
+/*
+ * If our huge pte is non-zero then mark the valid bit.
+ * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * ptes.
+ * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
+ */
+#define __HAVE_ARCH_HUGE_PTEP_GET
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ pte_t retval = *ptep;
+ if (pte_val(retval))
+ pte_val(retval) |= L_PTE_VALID;
+ return retval;
+}
+
+#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644
index 0000000000..a3a82b7158
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ */
+
+#ifndef _ASM_ARM_HUGETLB_H
+#define _ASM_ARM_HUGETLB_H
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/hugetlb-3level.h>
+#include <asm-generic/hugetlb.h>
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+
+#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
new file mode 100644
index 0000000000..62358d3ca0
--- /dev/null
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_HW_BREAKPOINT_H
+#define _ARM_HW_BREAKPOINT_H
+
+#ifdef __KERNEL__
+
+struct task_struct;
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+struct arch_hw_breakpoint_ctrl {
+ u32 __reserved : 9,
+ mismatch : 1,
+ : 9,
+ len : 8,
+ type : 2,
+ privilege : 2,
+ enabled : 1;
+};
+
+struct arch_hw_breakpoint {
+ u32 address;
+ u32 trigger;
+ struct arch_hw_breakpoint_ctrl step_ctrl;
+ struct arch_hw_breakpoint_ctrl ctrl;
+};
+
+static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
+{
+ return (ctrl.mismatch << 22) | (ctrl.len << 5) | (ctrl.type << 3) |
+ (ctrl.privilege << 1) | ctrl.enabled;
+}
+
+static inline void decode_ctrl_reg(u32 reg,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ ctrl->enabled = reg & 0x1;
+ reg >>= 1;
+ ctrl->privilege = reg & 0x3;
+ reg >>= 2;
+ ctrl->type = reg & 0x3;
+ reg >>= 2;
+ ctrl->len = reg & 0xff;
+ reg >>= 17;
+ ctrl->mismatch = reg & 0x1;
+}
+
+/* Debug architecture numbers. */
+#define ARM_DEBUG_ARCH_RESERVED 0 /* In case of ptrace ABI updates. */
+#define ARM_DEBUG_ARCH_V6 1
+#define ARM_DEBUG_ARCH_V6_1 2
+#define ARM_DEBUG_ARCH_V7_ECP14 3
+#define ARM_DEBUG_ARCH_V7_MM 4
+#define ARM_DEBUG_ARCH_V7_1 5
+#define ARM_DEBUG_ARCH_V8 6
+#define ARM_DEBUG_ARCH_V8_1 7
+#define ARM_DEBUG_ARCH_V8_2 8
+#define ARM_DEBUG_ARCH_V8_4 9
+
+/* Breakpoint */
+#define ARM_BREAKPOINT_EXECUTE 0
+
+/* Watchpoints */
+#define ARM_BREAKPOINT_LOAD 1
+#define ARM_BREAKPOINT_STORE 2
+#define ARM_FSR_ACCESS_MASK (1 << 11)
+
+/* Privilege Levels */
+#define ARM_BREAKPOINT_PRIV 1
+#define ARM_BREAKPOINT_USER 2
+
+/* Lengths */
+#define ARM_BREAKPOINT_LEN_1 0x1
+#define ARM_BREAKPOINT_LEN_2 0x3
+#define ARM_BREAKPOINT_LEN_4 0xf
+#define ARM_BREAKPOINT_LEN_8 0xff
+
+/* Limits */
+#define ARM_MAX_BRP 16
+#define ARM_MAX_WRP 16
+#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
+
+/* DSCR method of entry bits. */
+#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
+#define ARM_ENTRY_BREAKPOINT 0x1
+#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
+#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
+
+/* DSCR monitor/halting bits. */
+#define ARM_DSCR_HDBGEN (1 << 14)
+#define ARM_DSCR_MDBGEN (1 << 15)
+
+/* OSLSR os lock model bits */
+#define ARM_OSLSR_OSLM0 (1 << 0)
+
+/* opcode2 numbers for the co-processor instructions. */
+#define ARM_OP2_BVR 4
+#define ARM_OP2_BCR 5
+#define ARM_OP2_WVR 6
+#define ARM_OP2_WCR 7
+
+/* Base register numbers for the debug registers. */
+#define ARM_BASE_BVR 64
+#define ARM_BASE_BCR 80
+#define ARM_BASE_WVR 96
+#define ARM_BASE_WCR 112
+
+/* Accessor macros for the debug registers. */
+#define ARM_DBG_READ(N, M, OP2, VAL) do {\
+ asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\
+} while (0)
+
+#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\
+ asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
+} while (0)
+
+struct perf_event_attr;
+struct notifier_block;
+struct perf_event;
+struct pmu;
+
+extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+ int *gen_len, int *gen_type);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+
+extern u8 arch_get_debug_arch(void);
+extern u8 arch_get_max_wp_len(void);
+extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+int hw_breakpoint_slots(int type);
+
+#else
+static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {}
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* __KERNEL__ */
+#endif /* _ARM_HW_BREAKPOINT_H */
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
new file mode 100644
index 0000000000..cecc13214e
--- /dev/null
+++ b/arch/arm/include/asm/hw_irq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Nothing to see here yet
+ */
+#ifndef _ARCH_ARM_HW_IRQ_H
+#define _ARCH_ARM_HW_IRQ_H
+
+static inline void ack_bad_irq(int irq)
+{
+ extern unsigned long irq_err_count;
+ irq_err_count++;
+ pr_crit("unexpected IRQ trap at vector %02x\n", irq);
+}
+
+#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
+
+#endif
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
new file mode 100644
index 0000000000..e31d9f1b85
--- /dev/null
+++ b/arch/arm/include/asm/hwcap.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMARM_HWCAP_H
+#define __ASMARM_HWCAP_H
+
+#include <uapi/asm/hwcap.h>
+
+#if !defined(__ASSEMBLY__)
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (elf_hwcap)
+#define ELF_HWCAP2 (elf_hwcap2)
+extern unsigned int elf_hwcap, elf_hwcap2;
+#endif
+#endif
diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h
new file mode 100644
index 0000000000..bd61502b97
--- /dev/null
+++ b/arch/arm/include/asm/hypervisor.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_HYPERVISOR_H
+#define _ASM_ARM_HYPERVISOR_H
+
+#include <asm/xen/hypervisor.h>
+
+void kvm_init_hyp_services(void);
+bool kvm_arm_hyp_service_available(u32 func_id);
+
+#endif
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
new file mode 100644
index 0000000000..baebb67b35
--- /dev/null
+++ b/arch/arm/include/asm/idmap.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IDMAP_H
+#define __ASM_IDMAP_H
+
+#include <linux/compiler.h>
+#include <linux/pgtable.h>
+
+/* Tag a function as requiring to be executed via an identity mapping. */
+#define __idmap __section(".idmap.text") noinline notrace
+
+extern pgd_t *idmap_pgd;
+
+void setup_mm_for_reboot(void);
+
+#endif /* __ASM_IDMAP_H */
diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
new file mode 100644
index 0000000000..faf3d1c283
--- /dev/null
+++ b/arch/arm/include/asm/insn.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_INSN_H
+#define __ASM_ARM_INSN_H
+
+#include <linux/types.h>
+
+/*
+ * Avoid a literal load by emitting a sequence of ADD/LDR instructions with the
+ * appropriate relocations. The combined sequence has a range of -/+ 256 MiB,
+ * which should be sufficient for the core kernel as well as modules loaded
+ * into the module region. (Not supported by LLD before release 14)
+ */
+#define LOAD_SYM_ARMV6(reg, sym) \
+ " .globl " #sym " \n\t" \
+ " .reloc 10f, R_ARM_ALU_PC_G0_NC, " #sym " \n\t" \
+ " .reloc 11f, R_ARM_ALU_PC_G1_NC, " #sym " \n\t" \
+ " .reloc 12f, R_ARM_LDR_PC_G2, " #sym " \n\t" \
+ "10: sub " #reg ", pc, #8 \n\t" \
+ "11: sub " #reg ", " #reg ", #4 \n\t" \
+ "12: ldr " #reg ", [" #reg ", #0] \n\t"
+
+static inline unsigned long
+arm_gen_nop(void)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ return 0xf3af8000; /* nop.w */
+#else
+ return 0xe1a00000; /* mov r0, r0 */
+#endif
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn);
+
+static inline unsigned long
+arm_gen_branch(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, false, true);
+}
+
+static inline unsigned long
+arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn)
+{
+ return __arm_gen_branch(pc, addr, true, warn);
+}
+
+#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
new file mode 100644
index 0000000000..56b08ed6cc
--- /dev/null
+++ b/arch/arm/include/asm/io.h
@@ -0,0 +1,435 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ *
+ * Modifications:
+ * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both
+ * constant addresses and variable addresses.
+ * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture
+ * specific IO header files.
+ * 27-Mar-1999 PJB Second parameter of memcpy_toio is const..
+ * 04-Apr-1999 PJB Added check_signature.
+ * 12-Dec-1999 RMK More cleanups
+ * 18-Jun-2000 RMK Removed virt_to_* and friends definitions
+ * 05-Oct-2004 BJD Moved memory string functions to use void __iomem
+ */
+#ifndef __ASM_ARM_IO_H
+#define __ASM_ARM_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm-generic/pci_iomap.h>
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus virt_to_phys
+#define isa_bus_to_virt phys_to_virt
+
+/*
+ * Atomic MMIO-wide IO modify
+ */
+extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
+extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
+
+/*
+ * Generic IO read/write. These perform native-endian accesses. Note
+ * that some architectures will want to re-define __raw_{read,write}w.
+ */
+void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
+void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
+void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
+
+void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
+void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
+void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
+
+#if __LINUX_ARM_ARCH__ < 6
+/*
+ * Half-word accesses are problematic with RiscPC due to limitations of
+ * the bus. Rather than special-case the machine, just let the compiler
+ * generate the access for CPUs prior to ARMv6.
+ */
+#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#else
+/*
+ * When running under a hypervisor, we want to avoid I/O accesses with
+ * writeback addressing modes as these incur a significant performance
+ * overhead (the address generation must be emulated in software).
+ */
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("strh %1, %0"
+ : : "Q" (*(volatile u16 __force *)addr), "r" (val));
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+ asm volatile("ldrh %0, %1"
+ : "=r" (val)
+ : "Q" (*(volatile u16 __force *)addr));
+ return val;
+}
+#endif
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("strb %1, %0"
+ : : "Qo" (*(volatile u8 __force *)addr), "r" (val));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("str %1, %0"
+ : : "Qo" (*(volatile u32 __force *)addr), "r" (val));
+}
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+ asm volatile("ldrb %0, %1"
+ : "=r" (val)
+ : "Qo" (*(volatile u8 __force *)addr));
+ return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+ asm volatile("ldr %0, %1"
+ : "=r" (val)
+ : "Qo" (*(volatile u32 __force *)addr));
+ return val;
+}
+
+/*
+ * Architecture ioremap implementation.
+ */
+#define MT_DEVICE 0
+#define MT_DEVICE_NONSHARED 1
+#define MT_DEVICE_CACHED 2
+#define MT_DEVICE_WC 3
+/*
+ * types 4 onwards can be found in asm/mach/map.h and are undefined
+ * for ioremap
+ */
+
+/*
+ * __arm_ioremap takes CPU physical address.
+ * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
+ * The _caller variety takes a __builtin_return_address(0) value for
+ * /proc/vmalloc to use - and should only be used in non-inline functions.
+ */
+extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
+ void *);
+extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
+void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
+
+extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
+ unsigned int, void *);
+
+/*
+ * Bad read/write accesses...
+ */
+extern void __readwrite_bug(const char *fn);
+
+/*
+ * A typesafe __io() helper
+ */
+static inline void __iomem *__typesafe_io(unsigned long addr)
+{
+ return (void __iomem *)addr;
+}
+
+#define IOMEM(x) ((void __force __iomem *)(x))
+
+/* IO barriers */
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#include <asm/barrier.h>
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
+/* PCI fixed i/o mapping */
+#define PCI_IO_VIRT_BASE 0xfee00000
+#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
+
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
+void pci_ioremap_set_mem_type(int mem_type);
+#else
+static inline void pci_ioremap_set_mem_type(int mem_type) {}
+#endif
+
+struct resource;
+
+#define pci_remap_iospace pci_remap_iospace
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+
+/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification does not allow configuration write
+ * transactions to be posted. Add an arch specific
+ * pci_remap_cfgspace() definition that is implemented
+ * through strongly ordered memory mappings.
+ */
+#define pci_remap_cfgspace pci_remap_cfgspace
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
+/*
+ * Now, pick up the machine-defined IO definitions
+ */
+#ifdef CONFIG_NEED_MACH_IO_H
+#include <mach/io.h>
+#else
+#if IS_ENABLED(CONFIG_PCMCIA) || defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
+#else
+#define IO_SPACE_LIMIT ((resource_size_t)0)
+#endif
+#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
+#endif
+
+/*
+ * IO port access primitives
+ * -------------------------
+ *
+ * The ARM doesn't have special IO access instructions; all IO is memory
+ * mapped. Note that these are defined to perform little endian accesses
+ * only. Their primary purpose is to access PCI and ISA peripherals.
+ *
+ * Note that for a big endian machine, this implies that the following
+ * big endian mode connectivity is in place, as described by numerous
+ * ARM documents:
+ *
+ * PCI: D0-D7 D8-D15 D16-D23 D24-D31
+ * ARM: D24-D31 D16-D23 D8-D15 D0-D7
+ *
+ * The machine specific io.h include defines __io to translate an "IO"
+ * address to a memory address.
+ *
+ * Note that we prevent GCC re-ordering or caching values in expressions
+ * by introducing sequence points into the in*() definitions. Note that
+ * __raw_* do not guarantee this behaviour.
+ *
+ * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
+ */
+#ifdef __io
+#define outb(v,p) ({ __iowmb(); __raw_writeb(v,__io(p)); })
+#define outw(v,p) ({ __iowmb(); __raw_writew((__force __u16) \
+ cpu_to_le16(v),__io(p)); })
+#define outl(v,p) ({ __iowmb(); __raw_writel((__force __u32) \
+ cpu_to_le32(v),__io(p)); })
+
+#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
+#define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \
+ __raw_readw(__io(p))); __iormb(); __v; })
+#define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \
+ __raw_readl(__io(p))); __iormb(); __v; })
+
+#define outsb(p,d,l) __raw_writesb(__io(p),d,l)
+#define outsw(p,d,l) __raw_writesw(__io(p),d,l)
+#define outsl(p,d,l) __raw_writesl(__io(p),d,l)
+
+#define insb(p,d,l) __raw_readsb(__io(p),d,l)
+#define insw(p,d,l) __raw_readsw(__io(p),d,l)
+#define insl(p,d,l) __raw_readsl(__io(p),d,l)
+#endif
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
+extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
+extern void _memset_io(volatile void __iomem *, int, size_t);
+
+/*
+ * Memory access primitives
+ * ------------------------
+ *
+ * These perform PCI memory accesses via an ioremap region. They don't
+ * take an address as such, but a cookie.
+ *
+ * Again, these are defined to perform little endian accesses. See the
+ * IO port primitives for more information.
+ */
+#ifndef readl
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
+
+#define writeb_relaxed(v,c) __raw_writeb(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
+
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+
+#define readsb(p,d,l) __raw_readsb(p,d,l)
+#define readsw(p,d,l) __raw_readsw(p,d,l)
+#define readsl(p,d,l) __raw_readsl(p,d,l)
+
+#define writesb(p,d,l) __raw_writesb(p,d,l)
+#define writesw(p,d,l) __raw_writesw(p,d,l)
+#define writesl(p,d,l) __raw_writesl(p,d,l)
+
+#ifndef __ARMBE__
+static inline void memset_io(volatile void __iomem *dst, unsigned c,
+ size_t count)
+{
+ extern void mmioset(void *, unsigned int, size_t);
+ mmioset((void __force *)dst, c, count);
+}
+#define memset_io(dst,c,count) memset_io(dst,c,count)
+
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
+ size_t count)
+{
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy(to, (const void __force *)from, count);
+}
+#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
+
+static inline void memcpy_toio(volatile void __iomem *to, const void *from,
+ size_t count)
+{
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy((void __force *)to, from, count);
+}
+#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
+
+#else
+#define memset_io(c,v,l) _memset_io(c,(v),(l))
+#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
+#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
+#endif
+
+#endif /* readl */
+
+/*
+ * ioremap() and friends.
+ *
+ * ioremap() takes a resource address, and size. Due to the ARM memory
+ * types, it is important to use the correct ioremap() function as each
+ * mapping has specific properties.
+ *
+ * Function Memory type Cacheability Cache hint
+ * ioremap() Device n/a n/a
+ * ioremap_cache() Normal Writeback Read allocate
+ * ioremap_wc() Normal Non-cacheable n/a
+ * ioremap_wt() Normal Non-cacheable n/a
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ * - writes may be delayed before they hit the endpoint device
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ * - ordering is not guaranteed without explicit dependencies or barrier
+ * instructions
+ * - writes may be delayed before they hit the endpoint memory
+ *
+ * The cache hint is only a performance hint: CPUs may alias these hints.
+ * Eg, a CPU not implementing read allocate but implementing write allocate
+ * will provide a write allocate mapping instead.
+ */
+void __iomem *ioremap(resource_size_t res_cookie, size_t size);
+#define ioremap ioremap
+
+/*
+ * Do not use ioremap_cache for mapping memory. Use memremap instead.
+ */
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
+#define ioremap_cache ioremap_cache
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+#define ioremap_wc ioremap_wc
+#define ioremap_wt ioremap_wc
+
+void iounmap(volatile void __iomem *io_addr);
+#define iounmap iounmap
+
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
+#define arch_memremap_wb arch_memremap_wb
+
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
+
+#ifndef ioport_map
+#define ioport_map ioport_map
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+#endif
+#ifndef ioport_unmap
+#define ioport_unmap ioport_unmap
+extern void ioport_unmap(void __iomem *addr);
+#endif
+
+struct pci_dev;
+
+#define pci_iounmap pci_iounmap
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+#include <asm-generic/io.h>
+
+#ifdef CONFIG_MMU
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
+#endif
+
+/*
+ * Register ISA memory and port locations for glibc iopl/inb/outb
+ * emulation.
+ */
+extern void register_isa_ports(unsigned int mmio, unsigned int io,
+ unsigned int io_shift);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_ARM_IO_H */
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
new file mode 100644
index 0000000000..26c1d2ced4
--- /dev/null
+++ b/arch/arm/include/asm/irq.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_IRQ_H
+#define __ASM_ARM_IRQ_H
+
+#define NR_IRQS_LEGACY 16
+
+#ifndef CONFIG_SPARSE_IRQ
+#include <mach/irqs.h>
+#else
+#define NR_IRQS NR_IRQS_LEGACY
+#endif
+
+#ifndef irq_canonicalize
+#define irq_canonicalize(i) (i)
+#endif
+
+/*
+ * Use this value to indicate lack of interrupt
+ * capability
+ */
+#ifndef NO_IRQ
+#define NO_IRQ ((unsigned int)(-1))
+#endif
+
+#ifndef __ASSEMBLY__
+struct irqaction;
+struct pt_regs;
+
+void handle_IRQ(unsigned int, struct pt_regs *);
+
+#ifdef CONFIG_SMP
+#include <linux/cpumask.h>
+
+extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+ int exclude_cpu);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
+static inline int nr_legacy_irqs(void)
+{
+ return NR_IRQS_LEGACY;
+}
+
+#endif
+
+#endif
+
diff --git a/arch/arm/include/asm/irq_work.h b/arch/arm/include/asm/irq_work.h
new file mode 100644
index 0000000000..3149e4dc1b
--- /dev/null
+++ b/arch/arm/include/asm/irq_work.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_IRQ_WORK_H
+#define __ASM_ARM_IRQ_WORK_H
+
+#include <asm/smp_plat.h>
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return is_smp();
+}
+
+extern void arch_irq_work_raise(void);
+
+#endif /* _ASM_ARM_IRQ_WORK_H */
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
new file mode 100644
index 0000000000..aeec7f24eb
--- /dev/null
+++ b/arch/arm/include/asm/irqflags.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_IRQFLAGS_H
+#define __ASM_ARM_IRQFLAGS_H
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+/*
+ * CPU interrupt mask handling.
+ */
+#ifdef CONFIG_CPU_V7M
+#define IRQMASK_REG_NAME_R "primask"
+#define IRQMASK_REG_NAME_W "primask"
+#define IRQMASK_I_BIT 1
+#else
+#define IRQMASK_REG_NAME_R "cpsr"
+#define IRQMASK_REG_NAME_W "cpsr_c"
+#define IRQMASK_I_BIT PSR_I_BIT
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define arch_local_irq_save arch_local_irq_save
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ asm volatile(
+ " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n"
+ " cpsid i"
+ : "=r" (flags) : : "memory", "cc");
+ return flags;
+}
+
+#define arch_local_irq_enable arch_local_irq_enable
+static inline void arch_local_irq_enable(void)
+{
+ asm volatile(
+ " cpsie i @ arch_local_irq_enable"
+ :
+ :
+ : "memory", "cc");
+}
+
+#define arch_local_irq_disable arch_local_irq_disable
+static inline void arch_local_irq_disable(void)
+{
+ asm volatile(
+ " cpsid i @ arch_local_irq_disable"
+ :
+ :
+ : "memory", "cc");
+}
+
+#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
+#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
+
+#ifndef CONFIG_CPU_V7M
+#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
+#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
+#else
+#define local_abt_enable() do { } while (0)
+#define local_abt_disable() do { } while (0)
+#endif
+#else
+
+/*
+ * Save the current interrupt enable state & disable IRQs
+ */
+#define arch_local_irq_save arch_local_irq_save
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags, temp;
+
+ asm volatile(
+ " mrs %0, cpsr @ arch_local_irq_save\n"
+ " orr %1, %0, #128\n"
+ " msr cpsr_c, %1"
+ : "=r" (flags), "=r" (temp)
+ :
+ : "memory", "cc");
+ return flags;
+}
+
+/*
+ * Enable IRQs
+ */
+#define arch_local_irq_enable arch_local_irq_enable
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long temp;
+ asm volatile(
+ " mrs %0, cpsr @ arch_local_irq_enable\n"
+ " bic %0, %0, #128\n"
+ " msr cpsr_c, %0"
+ : "=r" (temp)
+ :
+ : "memory", "cc");
+}
+
+/*
+ * Disable IRQs
+ */
+#define arch_local_irq_disable arch_local_irq_disable
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+ asm volatile(
+ " mrs %0, cpsr @ arch_local_irq_disable\n"
+ " orr %0, %0, #128\n"
+ " msr cpsr_c, %0"
+ : "=r" (temp)
+ :
+ : "memory", "cc");
+}
+
+/*
+ * Enable FIQs
+ */
+#define local_fiq_enable() \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ stf\n" \
+" bic %0, %0, #64\n" \
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory", "cc"); \
+ })
+
+/*
+ * Disable FIQs
+ */
+#define local_fiq_disable() \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ clf\n" \
+" orr %0, %0, #64\n" \
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory", "cc"); \
+ })
+
+#define local_abt_enable() do { } while (0)
+#define local_abt_disable() do { } while (0)
+#endif
+
+/*
+ * Save the current interrupt enable state.
+ */
+#define arch_local_save_flags arch_local_save_flags
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile(
+ " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags"
+ : "=r" (flags) : : "memory", "cc");
+ return flags;
+}
+
+/*
+ * restore saved IRQ & FIQ state
+ */
+#define arch_local_irq_restore arch_local_irq_restore
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile(
+ " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore"
+ :
+ : "r" (flags)
+ : "memory", "cc");
+}
+
+#define arch_irqs_disabled_flags arch_irqs_disabled_flags
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return flags & IRQMASK_I_BIT;
+}
+
+#include <asm-generic/irqflags.h>
+
+#endif /* ifdef __KERNEL__ */
+#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
new file mode 100644
index 0000000000..e12d7d096f
--- /dev/null
+++ b/arch/arm/include/asm/jump_label.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_JUMP_LABEL_H
+#define _ASM_ARM_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/unified.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ WASM(nop) "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ WASM(b) " %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
new file mode 100644
index 0000000000..303c35df31
--- /dev/null
+++ b/arch/arm/include/asm/kasan.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan.h
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+
+#include <asm/kasan_def.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/*
+ * The compiler uses a shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from 'compiler's shadow offset' +
+ * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT)
+ */
+
+asmlinkage void kasan_early_init(void);
+extern void kasan_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/kasan_def.h b/arch/arm/include/asm/kasan_def.h
new file mode 100644
index 0000000000..5739605aa7
--- /dev/null
+++ b/arch/arm/include/asm/kasan_def.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan_def.h
+ *
+ * Copyright (c) 2018 Huawei Technologies Co., Ltd.
+ *
+ * Author: Abbott Liu <liuwenliang@huawei.com>
+ */
+
+#ifndef __ASM_KASAN_DEF_H
+#define __ASM_KASAN_DEF_H
+
+#ifdef CONFIG_KASAN
+
+/*
+ * Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for
+ * the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB
+ * addressable by a 32bit architecture) out of the virtual address
+ * space to use as shadow memory for KASan as follows:
+ *
+ * +----+ 0xffffffff
+ * | | \
+ * | | |-> Static kernel image (vmlinux) BSS and page table
+ * | |/
+ * +----+ PAGE_OFFSET
+ * | | \
+ * | | |-> Loadable kernel modules virtual address space area
+ * | |/
+ * +----+ MODULES_VADDR = KASAN_SHADOW_END
+ * | | \
+ * | | |-> The shadow area of kernel virtual address.
+ * | |/
+ * +----+-> TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the
+ * | |\ shadow address of MODULES_VADDR
+ * | | |
+ * | | |
+ * | | |-> The user space area in lowmem. The kernel address
+ * | | | sanitizer do not use this space, nor does it map it.
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | |/
+ * ------ 0
+ *
+ * 1) KASAN_SHADOW_START
+ * This value begins with the MODULE_VADDR's shadow address. It is the
+ * start of kernel virtual space. Since we have modules to load, we need
+ * to cover also that area with shadow memory so we can find memory
+ * bugs in modules.
+ *
+ * 2) KASAN_SHADOW_END
+ * This value is the 0x100000000's shadow address: the mapping that would
+ * be after the end of the kernel memory at 0xffffffff. It is the end of
+ * kernel address sanitizer shadow area. It is also the start of the
+ * module area.
+ *
+ * 3) KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ *
+ * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * As you would expect, >> 3 is equal to dividing by 8, meaning each
+ * byte in the shadow memory covers 8 bytes of kernel memory, so one
+ * bit shadow memory per byte of kernel memory is used.
+ *
+ * The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending
+ * on the VMSPLIT layout of the system: the kernel and userspace can
+ * split up lowmem in different ways according to needs, so we calculate
+ * the shadow offset depending on this.
+ */
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END ((UL(1) << (32 - KASAN_SHADOW_SCALE_SHIFT)) \
+ + KASAN_SHADOW_OFFSET)
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET)
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644
index 0000000000..ecc2322db7
--- /dev/null
+++ b/arch/arm/include/asm/kexec-internal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
new file mode 100644
index 0000000000..a8287e7ab9
--- /dev/null
+++ b/arch/arm/include/asm/kexec.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_H
+#define _ARM_KEXEC_H
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_PAGE_SIZE 4096
+
+#define KEXEC_ARCH KEXEC_ARCH_ARM
+
+#define KEXEC_ARM_ATAGS_OFFSET 0x1000
+#define KEXEC_ARM_ZIMAGE_OFFSET 0x8000
+
+#ifndef __ASSEMBLY__
+
+#define ARCH_HAS_KIMAGE_ARCH
+struct kimage_arch {
+ u32 kernel_r2;
+};
+
+/**
+ * crash_setup_regs() - save registers for the panic kernel
+ * @newregs: registers are saved here
+ * @oldregs: registers to be saved (may be %NULL)
+ *
+ * Function copies machine registers from @oldregs to @newregs. If @oldregs is
+ * %NULL then current registers are stored there.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs) {
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ } else {
+ __asm__ __volatile__ (
+ "stmia %[regs_base], {r0-r12}\n\t"
+ "mov %[_ARM_sp], sp\n\t"
+ "str lr, %[_ARM_lr]\n\t"
+ "adr %[_ARM_pc], 1f\n\t"
+ "mrs %[_ARM_cpsr], cpsr\n\t"
+ "1:"
+ : [_ARM_pc] "=r" (newregs->ARM_pc),
+ [_ARM_cpsr] "=r" (newregs->ARM_cpsr),
+ [_ARM_sp] "=r" (newregs->ARM_sp),
+ [_ARM_lr] "=o" (newregs->ARM_lr)
+ : [regs_base] "r" (&newregs->ARM_r0)
+ : "memory"
+ );
+ }
+}
+
+static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
+{
+ return phys_to_idmap(phys);
+}
+#define phys_to_boot_phys phys_to_boot_phys
+
+static inline phys_addr_t boot_phys_to_phys(unsigned long entry)
+{
+ return idmap_to_phys(entry);
+}
+#define boot_phys_to_phys boot_phys_to_phys
+
+static inline unsigned long page_to_boot_pfn(struct page *page)
+{
+ return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT);
+}
+#define page_to_boot_pfn page_to_boot_pfn
+
+static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
+{
+ return pfn_to_page(boot_pfn - (arch_phys_to_idmap_offset >> PAGE_SHIFT));
+}
+#define boot_pfn_to_page boot_pfn_to_page
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ARM_KEXEC_H */
diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
new file mode 100644
index 0000000000..7980d0f227
--- /dev/null
+++ b/arch/arm/include/asm/kfence.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARM_KFENCE_H
+#define __ASM_ARM_KFENCE_H
+
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/set_memory.h>
+
+static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
+{
+ int i;
+ unsigned long pfn = PFN_DOWN(__pa(addr));
+ pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+ if (!pte)
+ return -ENOMEM;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+ unsigned long addr;
+ pmd_t *pmd;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ pmd = pmd_off_k(addr);
+
+ if (pmd_leaf(*pmd)) {
+ if (split_pmd_page(pmd, addr & PMD_MASK))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ set_memory_valid(addr, 1, !protect);
+
+ return true;
+}
+
+#endif /* __ASM_ARM_KFENCE_H */
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
new file mode 100644
index 0000000000..8de1100d10
--- /dev/null
+++ b/arch/arm/include/asm/kgdb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM KGDB support
+ *
+ * Author: Deepak Saxena <dsaxena@mvista.com>
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ *
+ */
+
+#ifndef __ARM_KGDB_H__
+#define __ARM_KGDB_H__
+
+#include <linux/ptrace.h>
+#include <asm/opcodes.h>
+
+/*
+ * GDB assumes that we're a user process being debugged, so
+ * it will send us an SWI command to write into memory as the
+ * debug trap. When an SWI occurs, the next instruction addr is
+ * placed into R14_svc before jumping to the vector trap.
+ * This doesn't work for kernel debugging as we are already in SVC
+ * we would loose the kernel's LR, which is a bad thing. This
+ * is bad thing.
+ *
+ * By doing this as an undefined instruction trap, we force a mode
+ * switch from SVC to UND mode, allowing us to save full kernel state.
+ *
+ * We also define a KGDB_COMPILED_BREAK which can be used to compile
+ * in breakpoints. This is important for things like sysrq-G and for
+ * the initial breakpoint from trap_init().
+ *
+ * Note to ARM HW designers: Add real trap support like SH && PPC to
+ * make our lives much much simpler. :)
+ */
+#define BREAK_INSTR_SIZE 4
+#define GDB_BREAKINST 0xef9f0001
+#define KGDB_BREAKINST 0xe7ffdefe
+#define KGDB_COMPILED_BREAK 0xe7ffdeff
+#define CACHE_FLUSH_IS_SAFE 1
+
+#ifndef __ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(__inst_arm(0xe7ffdeff));
+}
+
+extern void kgdb_handle_bus_error(void);
+extern int kgdb_fault_expected;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * From Kevin Hilman:
+ *
+ * gdb is expecting the following registers layout.
+ *
+ * r0-r15: 1 long word each
+ * f0-f7: unused, 3 long words each !!
+ * fps: unused, 1 long word
+ * cpsr: 1 long word
+ *
+ * Even though f0-f7 and fps are not used, they need to be
+ * present in the registers sent for correct processing in
+ * the host-side gdb.
+ *
+ * In particular, it is crucial that CPSR is in the right place,
+ * otherwise gdb will not be able to correctly interpret stepping over
+ * conditional branches.
+ */
+#define _GP_REGS 16
+#define _FP_REGS 8
+#define _EXTRA_REGS 2
+#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
+
+#define KGDB_MAX_NO_CPUS 1
+#define BUFMAX 400
+#define NUMREGBYTES (GDB_MAX_REGS << 2)
+#define NUMCRITREGBYTES (32 << 2)
+
+#define _R0 0
+#define _R1 1
+#define _R2 2
+#define _R3 3
+#define _R4 4
+#define _R5 5
+#define _R6 6
+#define _R7 7
+#define _R8 8
+#define _R9 9
+#define _R10 10
+#define _FP 11
+#define _IP 12
+#define _SPT 13
+#define _LR 14
+#define _PC 15
+#define _CPSR (GDB_MAX_REGS - 1)
+
+/*
+ * So that we can denote the end of a frame for tracing,
+ * in the simple case:
+ */
+#define CFI_END_FRAME(func) __CFI_END_FRAME(_PC, _SPT, func)
+
+#endif /* __ASM_KGDB_H__ */
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
new file mode 100644
index 0000000000..e26a278d30
--- /dev/null
+++ b/arch/arm/include/asm/kprobes.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/kprobes.h
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ */
+
+#ifndef _ARM_KPROBES_H
+#define _ARM_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/notifier.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 2
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+typedef u32 kprobe_opcode_t;
+struct kprobe;
+#include <asm/probes.h>
+
+#define arch_specific_insn arch_probes_insn
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct prev_kprobe prev_kprobe;
+};
+
+void arch_remove_kprobe(struct kprobe *);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+/* optinsn template addresses */
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
+extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
+extern __visible kprobe_opcode_t optprobe_template_add_sp[];
+extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
+extern __visible kprobe_opcode_t optprobe_template_restore_end[];
+
+#define MAX_OPTIMIZED_LENGTH 4
+#define MAX_OPTINSN_SIZE \
+ ((unsigned long)optprobe_template_end - \
+ (unsigned long)optprobe_template_entry)
+#define RELATIVEJUMP_SIZE 4
+
+struct arch_optimized_insn {
+ /*
+ * copy of the original instructions.
+ * Different from x86, ARM kprobe_opcode_t is u32.
+ */
+#define MAX_COPIED_INSN DIV_ROUND_UP(RELATIVEJUMP_SIZE, sizeof(kprobe_opcode_t))
+ kprobe_opcode_t copied_insn[MAX_COPIED_INSN];
+ /* detour code buffer */
+ kprobe_opcode_t *insn;
+ /*
+ * We always copy one instruction on ARM,
+ * so size will always be 4, and unlike x86, there is no
+ * need for a size field.
+ */
+};
+
+#endif /* CONFIG_KPROBES */
+#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h
new file mode 100644
index 0000000000..a5f2cdd644
--- /dev/null
+++ b/arch/arm/include/asm/krait-l2-accessors.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H
+#define __ASMARM_KRAIT_L2_ACCESSORS_H
+
+extern void krait_set_l2_indirect_reg(u32 addr, u32 val);
+extern u32 krait_get_l2_indirect_reg(u32 addr);
+
+#endif
diff --git a/arch/arm/include/asm/linkage.h b/arch/arm/include/asm/linkage.h
new file mode 100644
index 0000000000..c4670694ad
--- /dev/null
+++ b/arch/arm/include/asm/linkage.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 0
+#define __ALIGN_STR ".align 0"
+
+#define ENDPROC(name) \
+ .type name, %function; \
+ END(name)
+
+#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
new file mode 100644
index 0000000000..2b18a25820
--- /dev/null
+++ b/arch/arm/include/asm/mach/arch.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mach/arch.h
+ *
+ * Copyright (C) 2000 Russell King
+ */
+
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/reboot.h>
+
+struct tag;
+struct pt_regs;
+struct smp_operations;
+#ifdef CONFIG_SMP
+#define smp_ops(ops) (&(ops))
+#define smp_init_ops(ops) (&(ops))
+#else
+#define smp_ops(ops) (struct smp_operations *)NULL
+#define smp_init_ops(ops) (bool (*)(void))NULL
+#endif
+
+struct machine_desc {
+ unsigned int nr; /* architecture number */
+ const char *name; /* architecture name */
+ unsigned long atag_offset; /* tagged list (relative) */
+ const char *const *dt_compat; /* array of device tree
+ * 'compatible' strings */
+
+ unsigned int nr_irqs; /* number of IRQs */
+
+#ifdef CONFIG_ZONE_DMA
+ phys_addr_t dma_zone_size; /* size of DMA-able area */
+#endif
+
+ unsigned int video_start; /* start of video RAM */
+ unsigned int video_end; /* end of video RAM */
+
+ unsigned char reserve_lp0 :1; /* never has lp0 */
+ unsigned char reserve_lp1 :1; /* never has lp1 */
+ unsigned char reserve_lp2 :1; /* never has lp2 */
+ enum reboot_mode reboot_mode; /* default restart mode */
+ unsigned l2c_aux_val; /* L2 cache aux value */
+ unsigned l2c_aux_mask; /* L2 cache aux mask */
+ void (*l2c_write_sec)(unsigned long, unsigned);
+ const struct smp_operations *smp; /* SMP operations */
+ bool (*smp_init)(void);
+ void (*fixup)(struct tag *, char **);
+ void (*dt_fixup)(void);
+ long long (*pv_fixup)(void);
+ void (*reserve)(void);/* reserve mem blocks */
+ void (*map_io)(void);/* IO mapping function */
+ void (*init_early)(void);
+ void (*init_irq)(void);
+ void (*init_time)(void);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+ void (*restart)(enum reboot_mode, const char *);
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern const struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+/*
+ * Set of macros to define architecture features. This is built into
+ * a table by the linker.
+ */
+#define MACHINE_START(_type,_name) \
+static const struct machine_desc __mach_desc_##_type \
+ __used \
+ __section(".arch.info.init") = { \
+ .nr = MACH_TYPE_##_type, \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+#define DT_MACHINE_START(_name, _namestr) \
+static const struct machine_desc __mach_desc_##_name \
+ __used \
+ __section(".arch.info.init") = { \
+ .nr = ~0, \
+ .name = _namestr,
+
+#endif
diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
new file mode 100644
index 0000000000..5ec11d7f0d
--- /dev/null
+++ b/arch/arm/include/asm/mach/dma.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mach/dma.h
+ *
+ * Copyright (C) 1998-2000 Russell King
+ *
+ * This header file describes the interface between the generic DMA handler
+ * (dma.c) and the architecture-specific DMA backends (dma-*.c)
+ */
+
+struct dma_struct;
+typedef struct dma_struct dma_t;
+
+struct dma_ops {
+ int (*request)(unsigned int, dma_t *); /* optional */
+ void (*free)(unsigned int, dma_t *); /* optional */
+ void (*enable)(unsigned int, dma_t *); /* mandatory */
+ void (*disable)(unsigned int, dma_t *); /* mandatory */
+ int (*residue)(unsigned int, dma_t *); /* optional */
+ int (*setspeed)(unsigned int, dma_t *, int); /* optional */
+ const char *type;
+};
+
+struct dma_struct {
+ void *addr; /* single DMA address */
+ unsigned long count; /* single DMA size */
+ struct scatterlist buf; /* single DMA */
+ int sgcount; /* number of DMA SG */
+ struct scatterlist *sg; /* DMA Scatter-Gather List */
+
+ unsigned int active:1; /* Transfer active */
+ unsigned int invalid:1; /* Address/Count changed */
+
+ unsigned int dma_mode; /* DMA mode */
+ int speed; /* DMA speed */
+
+ unsigned int lock; /* Device is allocated */
+ const char *device_id; /* Device name */
+
+ const struct dma_ops *d_ops;
+};
+
+/*
+ * isa_dma_add - add an ISA-style DMA channel
+ */
+extern int isa_dma_add(unsigned int, dma_t *dma);
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
new file mode 100644
index 0000000000..c9cbfdefc9
--- /dev/null
+++ b/arch/arm/include/asm/mach/flash.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mach/flash.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ */
+#ifndef ASMARM_MACH_FLASH_H
+#define ASMARM_MACH_FLASH_H
+
+struct mtd_partition;
+struct mtd_info;
+
+/*
+ * map_name: the map probe function name
+ * name: flash device name (eg, as used with mtdparts=)
+ * width: width of mapped device
+ * init: method called at driver/device initialisation
+ * exit: method called at driver/device removal
+ * set_vpp: method called to enable or disable VPP
+ * mmcontrol: method called to enable or disable Sync. Burst Read in OneNAND
+ * parts: optional array of mtd_partitions for static partitioning
+ * nr_parts: number of mtd_partitions for static partitioning
+ */
+struct flash_platform_data {
+ const char *map_name;
+ const char *name;
+ unsigned int width;
+ int (*init)(void);
+ void (*exit)(void);
+ void (*set_vpp)(int on);
+ void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+};
+
+#endif
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
new file mode 100644
index 0000000000..dfe832a3bf
--- /dev/null
+++ b/arch/arm/include/asm/mach/irq.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mach/irq.h
+ *
+ * Copyright (C) 1995-2000 Russell King.
+ */
+#ifndef __ASM_ARM_MACH_IRQ_H
+#define __ASM_ARM_MACH_IRQ_H
+
+#include <linux/irq.h>
+
+struct seq_file;
+
+/*
+ * This is internal. Do not use it.
+ */
+extern void init_FIQ(int);
+extern int show_fiq_list(struct seq_file *, int);
+
+/*
+ * This is for easy migration, but should be changed in the source
+ */
+#define do_bad_IRQ(desc) \
+do { \
+ raw_spin_lock(&desc->lock); \
+ handle_bad_irq(desc); \
+ raw_spin_unlock(&desc->lock); \
+} while(0)
+
+#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
new file mode 100644
index 0000000000..2b8970d8e5
--- /dev/null
+++ b/arch/arm/include/asm/mach/map.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/map.h
+ *
+ * Copyright (C) 1999-2000 Russell King
+ *
+ * Page table mapping constructs and function prototypes
+ */
+#ifndef __ASM_MACH_MAP_H
+#define __ASM_MACH_MAP_H
+
+#include <asm/io.h>
+
+struct map_desc {
+ unsigned long virtual;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+};
+
+/* types 0-3 are defined in asm/io.h */
+enum {
+ MT_UNCACHED = 4,
+ MT_CACHECLEAN,
+ MT_MINICLEAN,
+ MT_LOW_VECTORS,
+ MT_HIGH_VECTORS,
+ MT_MEMORY_RWX,
+ MT_MEMORY_RW,
+ MT_MEMORY_RO,
+ MT_ROM,
+ MT_MEMORY_RWX_NONCACHED,
+ MT_MEMORY_RW_DTCM,
+ MT_MEMORY_RWX_ITCM,
+ MT_MEMORY_RW_SO,
+ MT_MEMORY_DMA_READY,
+};
+
+#ifdef CONFIG_MMU
+extern void iotable_init(struct map_desc *, int);
+extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
+ void *caller);
+extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md,
+ bool ng);
+
+#ifdef CONFIG_DEBUG_LL
+extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr);
+extern void debug_ll_io_init(void);
+#else
+static inline void debug_ll_io_init(void) {}
+#endif
+
+struct mem_type;
+extern const struct mem_type *get_mem_type(unsigned int type);
+/*
+ * external interface to remap single page with appropriate type
+ */
+extern int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype);
+#else
+#define iotable_init(map,num) do { } while (0)
+#define vm_reserve_area_early(a,s,c) do { } while (0)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
new file mode 100644
index 0000000000..ea9bd08895
--- /dev/null
+++ b/arch/arm/include/asm/mach/pci.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mach/pci.h
+ *
+ * Copyright (C) 2000 Russell King
+ */
+
+#ifndef __ASM_MACH_PCI_H
+#define __ASM_MACH_PCI_H
+
+#include <linux/ioport.h>
+
+struct pci_sys_data;
+struct pci_ops;
+struct pci_bus;
+struct pci_host_bridge;
+struct device;
+
+struct hw_pci {
+ struct pci_ops *ops;
+ int nr_controllers;
+ void **private_data;
+ int (*setup)(int nr, struct pci_sys_data *);
+ int (*scan)(int nr, struct pci_host_bridge *);
+ void (*preinit)(void);
+ void (*postinit)(void);
+ u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
+ int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+};
+
+/*
+ * Per-controller structure
+ */
+struct pci_sys_data {
+ struct list_head node;
+ int busnr; /* primary bus number */
+ u64 mem_offset; /* bus->cpu memory mapping offset */
+ unsigned long io_offset; /* bus->cpu IO mapping offset */
+ struct pci_bus *bus; /* PCI bus */
+ struct list_head resources; /* root bus resources (apertures) */
+ struct resource io_res;
+ char io_res_name[12];
+ /* Bridge swizzling */
+ u8 (*swizzle)(struct pci_dev *, u8 *);
+ /* IRQ mapping */
+ int (*map_irq)(const struct pci_dev *, u8, u8);
+ void *private_data; /* platform controller private data */
+};
+
+/*
+ * Call this with your hw_pci struct to initialise the PCI system.
+ */
+void pci_common_init_dev(struct device *, struct hw_pci *);
+
+/*
+ * Compatibility wrapper for older platforms that do not care about
+ * passing the parent device.
+ */
+static inline void pci_common_init(struct hw_pci *hw)
+{
+ pci_common_init_dev(NULL, hw);
+}
+
+/*
+ * Setup early fixed I/O mapping.
+ */
+#if defined(CONFIG_PCI)
+extern void pci_map_io_early(unsigned long pfn);
+#else
+static inline void pci_map_io_early(unsigned long pfn) {}
+#endif
+
+/*
+ * PCI controllers
+ */
+extern struct pci_ops iop3xx_ops;
+extern int iop3xx_pci_setup(int nr, struct pci_sys_data *);
+extern void iop3xx_pci_preinit(void);
+extern void iop3xx_pci_preinit_cond(void);
+
+extern struct pci_ops dc21285_ops;
+extern int dc21285_setup(int nr, struct pci_sys_data *);
+extern void dc21285_preinit(void);
+extern void dc21285_postinit(void);
+
+#endif /* __ASM_MACH_PCI_H */
diff --git a/arch/arm/include/asm/mach/sharpsl_param.h b/arch/arm/include/asm/mach/sharpsl_param.h
new file mode 100644
index 0000000000..700a377c20
--- /dev/null
+++ b/arch/arm/include/asm/mach/sharpsl_param.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Hardware parameter area specific to Sharp SL series devices
+ *
+ * Copyright (c) 2005 Richard Purdie
+ *
+ * Based on Sharp's 2.4 kernel patches
+ */
+
+struct sharpsl_param_info {
+ unsigned int comadj_keyword;
+ unsigned int comadj;
+
+ unsigned int uuid_keyword;
+ unsigned char uuid[16];
+
+ unsigned int touch_keyword;
+ unsigned int touch_xp;
+ unsigned int touch_yp;
+ unsigned int touch_xd;
+ unsigned int touch_yd;
+
+ unsigned int adadj_keyword;
+ unsigned int adadj;
+
+ unsigned int phad_keyword;
+ unsigned int phadadj;
+} __attribute__((packed));
+
+
+extern struct sharpsl_param_info sharpsl_param;
+extern void sharpsl_save_param(void);
+
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
new file mode 100644
index 0000000000..5f522916ec
--- /dev/null
+++ b/arch/arm/include/asm/mach/time.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mach/time.h
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ */
+#ifndef __ASM_ARM_MACH_TIME_H
+#define __ASM_ARM_MACH_TIME_H
+
+typedef void (*clock_access_fn)(struct timespec64 *);
+extern int register_persistent_clock(clock_access_fn read_persistent);
+
+#endif
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
new file mode 100644
index 0000000000..58922879a6
--- /dev/null
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#define RTC_IRQ BUILD_BUG_ON(1)
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 0000000000..755c97de34
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mcpm.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ */
+
+#ifndef MCPM_H
+#define MCPM_H
+
+/*
+ * Maximum number of possible clusters / CPUs per cluster.
+ *
+ * This should be sufficient for quite a while, while keeping the
+ * (assembly) code simpler. When this starts to grow then we'll have
+ * to consider dynamic allocation.
+ */
+#define MAX_CPUS_PER_CLUSTER 4
+
+#ifdef CONFIG_MCPM_QUAD_CLUSTER
+#define MAX_NR_CLUSTERS 4
+#else
+#define MAX_NR_CLUSTERS 2
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Platform specific code should use this symbol to set up secondary
+ * entry location for processors to use when released from reset.
+ */
+extern void mcpm_entry_point(void);
+
+/*
+ * This is used to indicate where the given CPU from given cluster should
+ * branch once it is ready to re-enter the kernel using ptr, or NULL if it
+ * should be gated. A gated CPU is held in a WFE loop until its vector
+ * becomes non NULL.
+ */
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
+
+/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
+ * CPU/cluster power operations API for higher subsystems to use.
+ */
+
+/**
+ * mcpm_is_available - returns whether MCPM is initialized and available
+ *
+ * This returns true or false accordingly.
+ */
+bool mcpm_is_available(void);
+
+/**
+ * mcpm_cpu_power_up - make given CPU in given cluster runable
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * The identified CPU is brought out of reset. If the cluster was powered
+ * down then it is brought up as well, taking care not to let the other CPUs
+ * in the cluster run, and ensuring appropriate cluster setup.
+ *
+ * Caller must ensure the appropriate entry vector is initialized with
+ * mcpm_set_entry_vector() prior to calling this.
+ *
+ * This must be called in a sleepable context. However, the implementation
+ * is strongly encouraged to return early and let the operation happen
+ * asynchronously, especially when significant delays are expected.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_power_down - power the calling CPU down
+ *
+ * The calling CPU is powered down.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster is prepared for power-down too.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * On success this does not return. Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
+ *
+ * On success, the CPU is not guaranteed to be truly halted until
+ * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the
+ * specified cpu. Until then, other CPUs should make sure they do not
+ * trash memory the target CPU might be executing/accessing.
+ */
+void mcpm_cpu_power_down(void);
+
+/**
+ * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and
+ * make sure it is powered off
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * Call this function to ensure that a pending powerdown has taken
+ * effect and the CPU is safely parked before performing non-mcpm
+ * operations that may affect the CPU (such as kexec trashing the
+ * kernel text).
+ *
+ * It is *not* necessary to call this function if you only need to
+ * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
+ * event.
+ *
+ * Do not call this function unless the specified CPU has already
+ * called mcpm_cpu_power_down() or has committed to doing so.
+ *
+ * @return:
+ * - zero if the CPU is in a safely parked state
+ * - nonzero otherwise (e.g., timeout)
+ */
+int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_suspend - bring the calling CPU in a suspended state
+ *
+ * The calling CPU is suspended. This is similar to mcpm_cpu_power_down()
+ * except for possible extra platform specific configuration steps to allow
+ * an asynchronous wake-up e.g. with a pending interrupt.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster may be prepared for power-down too.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * On success this does not return. Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
+ */
+void mcpm_cpu_suspend(void);
+
+/**
+ * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
+ *
+ * This lets the platform specific backend code perform needed housekeeping
+ * work. This must be called by the newly activated CPU as soon as it is
+ * fully operational in kernel space, before it enables interrupts.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_powered_up(void);
+
+/*
+ * Platform specific callbacks used in the implementation of the above API.
+ *
+ * cpu_powerup:
+ * Make given CPU runable. Called with MCPM lock held and IRQs disabled.
+ * The given cluster is assumed to be set up (cluster_powerup would have
+ * been called beforehand). Must return 0 for success or negative error code.
+ *
+ * cluster_powerup:
+ * Set up power for given cluster. Called with MCPM lock held and IRQs
+ * disabled. Called before first cpu_powerup when cluster is down. Must
+ * return 0 for success or negative error code.
+ *
+ * cpu_suspend_prepare:
+ * Special suspend configuration. Called on target CPU with MCPM lock held
+ * and IRQs disabled. This callback is optional. If provided, it is called
+ * before cpu_powerdown_prepare.
+ *
+ * cpu_powerdown_prepare:
+ * Configure given CPU for power down. Called on target CPU with MCPM lock
+ * held and IRQs disabled. Power down must be effective only at the next WFI instruction.
+ *
+ * cluster_powerdown_prepare:
+ * Configure given cluster for power down. Called on one CPU from target
+ * cluster with MCPM lock held and IRQs disabled. A cpu_powerdown_prepare
+ * for each CPU in the cluster has happened when this occurs.
+ *
+ * cpu_cache_disable:
+ * Clean and disable CPU level cache for the calling CPU. Called on with IRQs
+ * disabled only. The CPU is no longer cache coherent with the rest of the
+ * system when this returns.
+ *
+ * cluster_cache_disable:
+ * Clean and disable the cluster wide cache as well as the CPU level cache
+ * for the calling CPU. No call to cpu_cache_disable will happen for this
+ * CPU. Called with IRQs disabled and only when all the other CPUs are done
+ * with their own cpu_cache_disable. The cluster is no longer cache coherent
+ * with the rest of the system when this returns.
+ *
+ * cpu_is_up:
+ * Called on given CPU after it has been powered up or resumed. The MCPM lock
+ * is held and IRQs disabled. This callback is optional.
+ *
+ * cluster_is_up:
+ * Called by the first CPU to be powered up or resumed in given cluster.
+ * The MCPM lock is held and IRQs disabled. This callback is optional. If
+ * provided, it is called before cpu_is_up for that CPU.
+ *
+ * wait_for_powerdown:
+ * Wait until given CPU is powered down. This is called in sleeping context.
+ * Some reasonable timeout must be considered. Must return 0 for success or
+ * negative error code.
+ */
+struct mcpm_platform_ops {
+ int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
+ int (*cluster_powerup)(unsigned int cluster);
+ void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
+ void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
+ void (*cluster_powerdown_prepare)(unsigned int cluster);
+ void (*cpu_cache_disable)(void);
+ void (*cluster_cache_disable)(void);
+ void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
+ void (*cluster_is_up)(unsigned int cluster);
+ int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
+};
+
+/**
+ * mcpm_platform_register - register platform specific power methods
+ *
+ * @ops: mcpm_platform_ops structure to register
+ *
+ * An error is returned if the registration has been done previously.
+ */
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
+
+/**
+ * mcpm_sync_init - Initialize the cluster synchronization support
+ *
+ * @power_up_setup: platform specific function invoked during very
+ * early CPU/cluster bringup stage.
+ *
+ * This prepares memory used by vlocks and the MCPM state machine used
+ * across CPUs that may have their caches active or inactive. Must be
+ * called only after a successful call to mcpm_platform_register().
+ *
+ * The power_up_setup argument is a pointer to assembly code called when
+ * the MMU and caches are still disabled during boot and no stack space is
+ * available. The affinity level passed to that code corresponds to the
+ * resource that needs to be initialized (e.g. 1 for cluster level, 0 for
+ * CPU level). Proper exclusion mechanisms are already activated at that
+ * point.
+ */
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level));
+
+/**
+ * mcpm_loopback - make a run through the MCPM low-level code
+ *
+ * @cache_disable: pointer to function performing cache disabling
+ *
+ * This exercises the MCPM machinery by soft resetting the CPU and branching
+ * to the MCPM low-level entry code before returning to the caller.
+ * The @cache_disable function must do the necessary cache disabling to
+ * let the regular kernel init code turn it back on as if the CPU was
+ * hotplugged in. The MCPM state machine is set as if the cluster was
+ * initialized meaning the power_up_setup callback passed to mcpm_sync_init()
+ * will be invoked for all affinity levels. This may be useful to initialize
+ * some resources such as enabling the CCI that requires the cache to be off, or simply for testing purposes.
+ */
+int __init mcpm_loopback(void (*cache_disable)(void));
+
+void __init mcpm_smp_set_ops(void);
+
+/*
+ * Synchronisation structures for coordinating safe cluster setup/teardown.
+ * This is private to the MCPM core code and shared between C and assembly.
+ * When modifying this structure, make sure you update the MCPM_SYNC_ defines
+ * to match.
+ */
+struct mcpm_sync_struct {
+ /* individual CPU states */
+ struct {
+ s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
+ } cpus[MAX_CPUS_PER_CLUSTER];
+
+ /* cluster state */
+ s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
+
+ /* inbound-side state */
+ s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
+};
+
+struct sync_struct {
+ struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
+};
+
+#else
+
+/*
+ * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
+ * cannot be included in asm files. Let's work around the conflict like this.
+ */
+#include <asm/asm-offsets.h>
+#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
+
+#endif /* ! __ASSEMBLY__ */
+
+/* Definitions for mcpm_sync_struct */
+#define CPU_DOWN 0x11
+#define CPU_COMING_UP 0x12
+#define CPU_UP 0x13
+#define CPU_GOING_DOWN 0x14
+
+#define CLUSTER_DOWN 0x21
+#define CLUSTER_UP 0x22
+#define CLUSTER_GOING_DOWN 0x23
+
+#define INBOUND_NOT_COMING_UP 0x31
+#define INBOUND_COMING_UP 0x32
+
+/*
+ * Offsets for the mcpm_sync_struct members, for use in asm.
+ * We don't want to make them global to the kernel via asm-offsets.c.
+ */
+#define MCPM_SYNC_CLUSTER_CPUS 0
+#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
+#define MCPM_SYNC_CLUSTER_CLUSTER \
+ (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
+#define MCPM_SYNC_CLUSTER_INBOUND \
+ (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
+#define MCPM_SYNC_CLUSTER_SIZE \
+ (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
+
+#endif
diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h
new file mode 100644
index 0000000000..529d2cf4d0
--- /dev/null
+++ b/arch/arm/include/asm/mcs_spinlock.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MCS_LOCK_H
+#define __ASM_MCS_LOCK_H
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+
+/* MCS spin-locking. */
+#define arch_mcs_spin_lock_contended(lock) \
+do { \
+ /* Ensure prior stores are observed before we enter wfe. */ \
+ smp_mb(); \
+ while (!(smp_load_acquire(lock))) \
+ wfe(); \
+} while (0) \
+
+#define arch_mcs_spin_unlock_contended(lock) \
+do { \
+ smp_store_release(lock, 1); \
+ dsb_sev(); \
+} while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* __ASM_MCS_LOCK_H */
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
new file mode 100644
index 0000000000..b10fd358cc
--- /dev/null
+++ b/arch/arm/include/asm/memblock.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_MEMBLOCK_H
+#define _ASM_ARM_MEMBLOCK_H
+
+struct machine_desc;
+
+void arm_memblock_init(const struct machine_desc *);
+phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
+
+#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
new file mode 100644
index 0000000000..ef2aa79ece
--- /dev/null
+++ b/arch/arm/include/asm/memory.h
@@ -0,0 +1,396 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/memory.h
+ *
+ * Copyright (C) 2000-2002 Russell King
+ * modification for nommu, Hyok S. Choi, 2004
+ *
+ * Note: this file should not be included explicitly, include <asm/page.h>
+ * to get access to these definitions.
+ */
+#ifndef __ASM_ARM_MEMORY_H
+#define __ASM_ARM_MEMORY_H
+
+#ifndef _ASMARM_PAGE_H
+#error "Do not include <asm/memory.h> directly"
+#endif
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_NEED_MACH_MEMORY_H
+#include <mach/memory.h>
+#endif
+#include <asm/kasan_def.h>
+
+/*
+ * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
+ * the virtual address range for userspace.
+ * KERNEL_OFFSET: the virtual address of the start of the kernel image.
+ * we may further offset this with TEXT_OFFSET in practice.
+ */
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#define KERNEL_OFFSET (PAGE_OFFSET)
+
+#ifdef CONFIG_MMU
+
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
+ */
+#ifndef CONFIG_KASAN
+#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#else
+#define TASK_SIZE (KASAN_SHADOW_START)
+#endif
+#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
+
+/*
+ * The maximum size of a 26-bit user space task.
+ */
+#define TASK_SIZE_26 (UL(1) << 26)
+
+/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#ifndef CONFIG_THUMB2_KERNEL
+#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
+#else
+/* smaller range for Thumb-2 symbols relocation (2^24)*/
+#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
+#endif
+
+#if TASK_SIZE > MODULES_VADDR
+#error Top of user space clashes with start of module space
+#endif
+
+/*
+ * The highmem pkmap virtual space shares the end of the module area.
+ */
+#ifdef CONFIG_HIGHMEM
+#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
+#else
+#define MODULES_END (PAGE_OFFSET)
+#endif
+
+/*
+ * The XIP kernel gets mapped at the bottom of the module vm area.
+ * Since we use sections to map it, this macro replaces the physical address
+ * with its virtual address while keeping offset from the base section.
+ */
+#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
+
+#define FDT_FIXED_BASE UL(0xff800000)
+#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
+#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
+
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+/*
+ * Allow 16MB-aligned ioremap pages
+ */
+#define IOREMAP_MAX_ORDER 24
+#endif
+
+#define VECTORS_BASE UL(0xffff0000)
+
+#else /* CONFIG_MMU */
+
+#ifndef __ASSEMBLY__
+extern unsigned long setup_vectors_base(void);
+extern unsigned long vectors_base;
+#define VECTORS_BASE vectors_base
+#endif
+
+/*
+ * The limitation of user task size can grow up to the end of free ram region.
+ * It is difficult to define and perhaps will never meet the original meaning
+ * of this define that was meant to.
+ * Fortunately, there is no reference for this in noMMU mode, for now.
+ */
+#define TASK_SIZE UL(0xffffffff)
+
+#ifndef TASK_UNMAPPED_BASE
+#define TASK_UNMAPPED_BASE UL(0x00000000)
+#endif
+
+#ifndef END_MEM
+#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
+#endif
+
+/*
+ * The module can be at any place in ram in nommu mode.
+ */
+#define MODULES_END (END_MEM)
+#define MODULES_VADDR PAGE_OFFSET
+
+#define XIP_VIRT_ADDR(physaddr) (physaddr)
+#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
+
+#endif /* !CONFIG_MMU */
+
+#ifdef CONFIG_XIP_KERNEL
+#define KERNEL_START _sdata
+#else
+#define KERNEL_START _stext
+#endif
+#define KERNEL_END _end
+
+/*
+ * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
+ * locations
+ */
+#ifdef CONFIG_HAVE_TCM
+#define ITCM_OFFSET UL(0xfffe0000)
+#define DTCM_OFFSET UL(0xfffe8000)
+#endif
+
+/*
+ * Convert a page to/from a physical address
+ */
+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
+ * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Physical start and end address of the kernel sections. These addresses are
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
+ */
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
+
+/*
+ * Physical vs virtual RAM address space conversion. These are
+ * private definitions which should NOT be used outside memory.h
+ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ *
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
+ */
+
+#if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
+
+/*
+ * Constants used to force the right instruction encodings and shifts
+ * so that all we need to do is modify the 8-bit constant field.
+ */
+#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_23_16 0x810000
+#define __PV_BITS_7_0 0x81
+
+extern unsigned long __pv_phys_pfn_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
+
+#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
+#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
+
+#ifndef CONFIG_THUMB2_KERNEL
+#define __pv_stub(from,to,instr) \
+ __asm__("@ __pv_stub\n" \
+ "1: " instr " %0, %1, %2\n" \
+ "2: " instr " %0, %0, %3\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b - ., 2b - .\n" \
+ " .popsection\n" \
+ : "=r" (to) \
+ : "r" (from), "I" (__PV_BITS_31_24), \
+ "I"(__PV_BITS_23_16))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " adds %Q0, %1, %R0, lsl #20\n" \
+ "1: mov %R0, %2\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - ., 1b - .\n" \
+ " .popsection\n" \
+ : "=&r" (y) \
+ : "r" (x), "I" (__PV_BITS_7_0) \
+ : "cc")
+
+#else
+#define __pv_stub(from,to,instr) \
+ __asm__("@ __pv_stub\n" \
+ "0: movw %0, #0\n" \
+ " lsl %0, #21\n" \
+ " " instr " %0, %1, %0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - .\n" \
+ " .popsection\n" \
+ : "=&r" (to) \
+ : "r" (from))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " lsls %R0, #21\n" \
+ " adds %Q0, %1, %R0\n" \
+ "1: mvn %R0, #0\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - ., 1b - .\n" \
+ " .popsection\n" \
+ : "=&r" (y) \
+ : "r" (x) \
+ : "cc")
+#endif
+
+static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
+{
+ phys_addr_t t;
+
+ if (sizeof(phys_addr_t) == 4) {
+ __pv_stub(x, t, "add");
+ } else {
+ __pv_add_carry_stub(x, t);
+ }
+ return t;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+ unsigned long t;
+
+ /*
+ * 'unsigned long' cast discard upper word when
+ * phys_addr_t is 64 bit, and makes sure that inline
+ * assembler expression receives 32 bit argument
+ * in place where 'r' 32 bit operand is expected.
+ */
+ __pv_stub((unsigned long) x, t, "sub");
+ return t;
+}
+
+#else
+
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+
+static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
+{
+ return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
+#endif
+
+static inline unsigned long virt_to_pfn(const void *p)
+{
+ unsigned long kaddr = (unsigned long)p;
+ return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) +
+ PHYS_PFN_OFFSET);
+}
+#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(unsigned long x);
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
+#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
+#endif
+
+/*
+ * These are *only* valid on the kernel direct mapped RAM memory.
+ * Note: Drivers should NOT use these. They are the wrong
+ * translation for translating DMA addresses. Use the driver
+ * DMA support - see dma-mapping.h.
+ */
+#define virt_to_phys virt_to_phys
+static inline phys_addr_t virt_to_phys(const volatile void *x)
+{
+ return __virt_to_phys((unsigned long)(x));
+}
+
+#define phys_to_virt phys_to_virt
+static inline void *phys_to_virt(phys_addr_t x)
+{
+ return (void *)__phys_to_virt(x);
+}
+
+/*
+ * Drivers should NOT use these either.
+ */
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
+#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
+
+extern long long arch_phys_to_idmap_offset;
+
+/*
+ * These are for systems that have a hardware interconnect supported alias
+ * of physical memory for idmap purposes. Most cases should leave these
+ * untouched. Note: this can only return addresses less than 4GiB.
+ */
+static inline bool arm_has_idmap_alias(void)
+{
+ return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
+}
+
+#define IDMAP_INVALID_ADDR ((u32)~0)
+
+static inline unsigned long phys_to_idmap(phys_addr_t addr)
+{
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
+ addr += arch_phys_to_idmap_offset;
+ if (addr > (u32)~0)
+ addr = IDMAP_INVALID_ADDR;
+ }
+ return addr;
+}
+
+static inline phys_addr_t idmap_to_phys(unsigned long idmap)
+{
+ phys_addr_t addr = idmap;
+
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
+ addr -= arch_phys_to_idmap_offset;
+
+ return addr;
+}
+
+static inline unsigned long __virt_to_idmap(unsigned long x)
+{
+ return phys_to_idmap(__virt_to_phys(x));
+}
+
+#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
+
+/*
+ * Conversion between a struct page and a physical address.
+ *
+ * page_to_pfn(page) convert a struct page * to a PFN number
+ * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
+ *
+ * virt_to_page(k) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(k) indicates whether a virtual address is valid
+ */
+#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
+
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+ && pfn_valid(virt_to_pfn(kaddr)))
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
new file mode 100644
index 0000000000..e049723840
--- /dev/null
+++ b/arch/arm/include/asm/mmu.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARM_MMU_H
+#define __ARM_MMU_H
+
+#ifdef CONFIG_MMU
+
+typedef struct {
+#ifdef CONFIG_CPU_HAS_ASID
+ atomic64_t id;
+#else
+ int switch_pending;
+#endif
+ atomic_t vmalloc_seq;
+ unsigned long sigpage;
+#ifdef CONFIG_VDSO
+ unsigned long vdso;
+#endif
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
+} mm_context_t;
+
+#ifdef CONFIG_CPU_HAS_ASID
+#define ASID_BITS 8
+#define ASID_MASK ((~0ULL) << ASID_BITS)
+#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
+#else
+#define ASID(mm) (0)
+#endif
+
+#else
+
+/*
+ * From nommu.h:
+ * Copyright (C) 2002, David McCullough <davidm@snapgear.com>
+ * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
+ */
+typedef struct {
+ unsigned long end_brk;
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
+} mm_context_t;
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
new file mode 100644
index 0000000000..db2cb06aa8
--- /dev/null
+++ b/arch/arm/include/asm/mmu_context.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/mmu_context.h
+ *
+ * Copyright (C) 1996 Russell King.
+ *
+ * Changelog:
+ * 27-06-1996 RMK Created
+ */
+#ifndef __ASM_ARM_MMU_CONTEXT_H
+#define __ASM_ARM_MMU_CONTEXT_H
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/proc-fns.h>
+#include <asm/smp_plat.h>
+#include <asm-generic/mm_hooks.h>
+
+void __check_vmalloc_seq(struct mm_struct *mm);
+
+#ifdef CONFIG_MMU
+static inline void check_vmalloc_seq(struct mm_struct *mm)
+{
+ if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
+ unlikely(atomic_read(&mm->context.vmalloc_seq) !=
+ atomic_read(&init_mm.context.vmalloc_seq)))
+ __check_vmalloc_seq(mm);
+}
+#endif
+
+#ifdef CONFIG_CPU_HAS_ASID
+
+void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+
+#define init_new_context init_new_context
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ atomic64_set(&mm->context.id, 0);
+ return 0;
+}
+
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask);
+#else /* !CONFIG_ARM_ERRATA_798181 */
+static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+}
+#endif /* CONFIG_ARM_ERRATA_798181 */
+
+#else /* !CONFIG_CPU_HAS_ASID */
+
+#ifdef CONFIG_MMU
+
+static inline void check_and_switch_context(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+ check_vmalloc_seq(mm);
+
+ if (irqs_disabled())
+ /*
+ * cpu_switch_mm() needs to flush the VIVT caches. To avoid
+ * high interrupt latencies, defer the call and continue
+ * running with the old mm. Since we only support UP systems
+ * on non-ASID CPUs, the old mm will remain valid until the
+ * finish_arch_post_lock_switch() call.
+ */
+ mm->context.switch_pending = 1;
+ else
+ cpu_switch_mm(mm->pgd, mm);
+}
+
+#ifndef MODULE
+#define finish_arch_post_lock_switch \
+ finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+ struct mm_struct *mm = current->mm;
+
+ if (mm && mm->context.switch_pending) {
+ /*
+ * Preemption must be disabled during cpu_switch_mm() as we
+ * have some stateful cache flush implementations. Check
+ * switch_pending again in case we were preempted and the
+ * switch to this mm was already done.
+ */
+ preempt_disable();
+ if (mm->context.switch_pending) {
+ mm->context.switch_pending = 0;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+ preempt_enable_no_resched();
+ }
+}
+#endif /* !MODULE */
+
+#endif /* CONFIG_MMU */
+
+#endif /* CONFIG_CPU_HAS_ASID */
+
+#define activate_mm(prev,next) switch_mm(prev, next, NULL)
+
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned. No registers are touched. We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+#ifdef CONFIG_MMU
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
+ * so check for possible thread migration and invalidate the I-cache
+ * if we're new to this CPU.
+ */
+ if (cache_ops_need_broadcast() &&
+ !cpumask_empty(mm_cpumask(next)) &&
+ !cpumask_test_cpu(cpu, mm_cpumask(next)))
+ __flush_icache_all();
+
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+ check_and_switch_context(next, tsk);
+ if (cache_is_vivt())
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ }
+#endif
+}
+
+#ifdef CONFIG_VMAP_STACK
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (mm != &init_mm)
+ check_vmalloc_seq(mm);
+}
+#define enter_lazy_tlb enter_lazy_tlb
+#endif
+
+#include <asm-generic/mmu_context.h>
+
+#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
new file mode 100644
index 0000000000..07c51a34f7
--- /dev/null
+++ b/arch/arm/include/asm/module.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_MODULE_H
+#define _ASM_ARM_MODULE_H
+
+#include <asm-generic/module.h>
+#include <asm/unwind.h>
+
+#ifdef CONFIG_ARM_UNWIND
+#define ELF_SECTION_UNWIND 0x70000001
+#endif
+
+#define PLT_ENT_STRIDE L1_CACHE_BYTES
+#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
+#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
+
+struct plt_entries {
+ u32 ldr[PLT_ENT_COUNT];
+ u32 lit[PLT_ENT_COUNT];
+};
+
+struct mod_plt_sec {
+ struct elf32_shdr *plt;
+ struct plt_entries *plt_ent;
+ int plt_count;
+};
+
+struct mod_arch_specific {
+#ifdef CONFIG_ARM_UNWIND
+ struct list_head unwind_list;
+ struct unwind_table *init_table;
+#endif
+#ifdef CONFIG_ARM_MODULE_PLTS
+ struct mod_plt_sec core;
+ struct mod_plt_sec init;
+#endif
+};
+
+struct module;
+u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
+#ifdef CONFIG_ARM_MODULE_PLTS
+bool in_module_plt(unsigned long loc);
+#else
+static inline bool in_module_plt(unsigned long loc) { return false; }
+#endif
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ if (ELF_ST_TYPE(sym->st_info) == STT_FUNC)
+ return sym->st_value & ~1;
+
+ return sym->st_value;
+}
+#endif
+
+#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/module.lds.h b/arch/arm/include/asm/module.lds.h
new file mode 100644
index 0000000000..0e7cb4e314
--- /dev/null
+++ b/arch/arm/include/asm/module.lds.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_ARM_MODULE_PLTS
+SECTIONS {
+ .plt : { BYTE(0) }
+ .init.plt : { BYTE(0) }
+}
+#endif
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
new file mode 100644
index 0000000000..5e088c83d3
--- /dev/null
+++ b/arch/arm/include/asm/mpu.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARM_MPU_H
+#define __ARM_MPU_H
+
+/* MPUIR layout */
+#define MPUIR_nU 1
+#define MPUIR_DREGION 8
+#define MPUIR_IREGION 16
+#define MPUIR_DREGION_SZMASK (0xFF << MPUIR_DREGION)
+#define MPUIR_IREGION_SZMASK (0xFF << MPUIR_IREGION)
+
+/* ID_MMFR0 data relevant to MPU */
+#define MMFR0_PMSA (0xF << 4)
+#define MMFR0_PMSAv7 (3 << 4)
+#define MMFR0_PMSAv8 (4 << 4)
+
+/* MPU D/I Size Register fields */
+#define PMSAv7_RSR_SZ 1
+#define PMSAv7_RSR_EN 0
+#define PMSAv7_RSR_SD 8
+
+/* Number of subregions (SD) */
+#define PMSAv7_NR_SUBREGS 8
+#define PMSAv7_MIN_SUBREG_SIZE 256
+
+/* The D/I RSR value for an enabled region spanning the whole of memory */
+#define PMSAv7_RSR_ALL_MEM 63
+
+/* Individual bits in the DR/IR ACR */
+#define PMSAv7_ACR_XN (1 << 12)
+#define PMSAv7_ACR_SHARED (1 << 2)
+
+/* C, B and TEX[2:0] bits only have semantic meanings when grouped */
+#define PMSAv7_RGN_CACHEABLE 0xB
+#define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
+#define PMSAv7_RGN_STRONGLY_ORDERED 0
+
+/* Main region should only be shared for SMP */
+#ifdef CONFIG_SMP
+#define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
+#else
+#define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE
+#endif
+
+/* Access permission bits of ACR (only define those that we use)*/
+#define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8)
+#define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8)
+#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8)
+#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8)
+
+#define PMSAv8_BAR_XN 1
+
+#define PMSAv8_LAR_EN 1
+#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1)
+
+
+#define PMSAv8_AP_PL1RW_PL0NA (0 << 1)
+#define PMSAv8_AP_PL1RW_PL0RW (1 << 1)
+#define PMSAv8_AP_PL1RO_PL0RO (3 << 1)
+
+#ifdef CONFIG_SMP
+#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable
+#else
+#define PMSAv8_RGN_SHARED (0 << 3)
+#endif
+
+#define PMSAv8_RGN_DEVICE_nGnRnE 0
+#define PMSAv8_RGN_NORMAL 1
+
+#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8))
+
+#ifdef CONFIG_CPU_V7M
+#define PMSAv8_MINALIGN 32
+#else
+#define PMSAv8_MINALIGN 64
+#endif
+
+/* For minimal static MPU region configurations */
+#define PMSAv7_PROBE_REGION 0
+#define PMSAv7_BG_REGION 1
+#define PMSAv7_RAM_REGION 2
+#define PMSAv7_ROM_REGION 3
+
+/* Fixed for PMSAv8 only */
+#define PMSAv8_XIP_REGION 0
+#define PMSAv8_KERNEL_REGION 1
+
+/* Maximum number of regions Linux is interested in */
+#define MPU_MAX_REGIONS 16
+
+#define PMSAv7_DATA_SIDE 0
+#define PMSAv7_INSTR_SIDE 1
+
+#ifndef __ASSEMBLY__
+
+struct mpu_rgn {
+ /* Assume same attributes for d/i-side */
+ union {
+ u32 drbar; /* PMSAv7 */
+ u32 prbar; /* PMSAv8 */
+ };
+ union {
+ u32 drsr; /* PMSAv7 */
+ u32 prlar; /* PMSAv8 */
+ };
+ union {
+ u32 dracr; /* PMSAv7 */
+ u32 unused; /* not used in PMSAv8 */
+ };
+};
+
+struct mpu_rgn_info {
+ unsigned int used;
+ struct mpu_rgn rgns[MPU_MAX_REGIONS];
+};
+extern struct mpu_rgn_info mpu_rgn_info;
+
+#ifdef CONFIG_ARM_MPU
+extern void __init pmsav7_adjust_lowmem_bounds(void);
+extern void __init pmsav8_adjust_lowmem_bounds(void);
+
+extern void __init pmsav7_setup(void);
+extern void __init pmsav8_setup(void);
+#else
+static inline void pmsav7_adjust_lowmem_bounds(void) {};
+static inline void pmsav8_adjust_lowmem_bounds(void) {};
+static inline void pmsav7_setup(void) {};
+static inline void pmsav8_setup(void) {};
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arm/include/asm/mtd-xip.h b/arch/arm/include/asm/mtd-xip.h
new file mode 100644
index 0000000000..dfcef0152e
--- /dev/null
+++ b/arch/arm/include/asm/mtd-xip.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MTD primitives for XIP support. Architecture specific functions
+ *
+ * Do not include this file directly. It's included from linux/mtd/xip.h
+ *
+ * Author: Nicolas Pitre
+ * Created: Nov 2, 2004
+ * Copyright: (C) 2004 MontaVista Software, Inc.
+ */
+
+#ifndef __ARM_MTD_XIP_H__
+#define __ARM_MTD_XIP_H__
+
+#include <mach/mtd-xip.h>
+
+/* fill instruction prefetch */
+#define xip_iprefetch() do { asm volatile (".rep 8; nop; .endr"); } while (0)
+
+#endif /* __ARM_MTD_XIP_H__ */
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644
index 0000000000..aac10ba33e
--- /dev/null
+++ b/arch/arm/include/asm/neon.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ * -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+ BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
diff --git a/arch/arm/include/asm/nwflash.h b/arch/arm/include/asm/nwflash.h
new file mode 100644
index 0000000000..66b7e68c9b
--- /dev/null
+++ b/arch/arm/include/asm/nwflash.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FLASH_H
+#define _FLASH_H
+
+#define CMD_WRITE_DISABLE 0
+#define CMD_WRITE_ENABLE 0x28
+#define CMD_WRITE_BASE64K_ENABLE 0x47
+
+#endif /* _FLASH_H */
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
new file mode 100644
index 0000000000..b6f4b35024
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_OPCODES_SEC_H
+#define __ASM_ARM_OPCODES_SEC_H
+
+#include <asm/opcodes.h>
+
+#define __SMC(imm4) __inst_arm_thumb32( \
+ 0xE1600070 | (((imm4) & 0xF) << 0), \
+ 0xF7F08000 | (((imm4) & 0xF) << 16) \
+)
+
+#endif /* __ASM_ARM_OPCODES_SEC_H */
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h
new file mode 100644
index 0000000000..0b58da81b7
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-virt.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * opcodes-virt.h: Opcode definitions for the ARM virtualization extensions
+ * Copyright (C) 2012 Linaro Limited
+ */
+#ifndef __ASM_ARM_OPCODES_VIRT_H
+#define __ASM_ARM_OPCODES_VIRT_H
+
+#include <asm/opcodes.h>
+
+#define __HVC(imm16) __inst_arm_thumb32( \
+ 0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F), \
+ 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \
+)
+
+#define __ERET __inst_arm_thumb32( \
+ 0xE160006E, \
+ 0xF3DE8F00 \
+)
+
+#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \
+ 0xE12EF300 | regnum, \
+ 0xF3808E30 | (regnum << 16) \
+)
+
+#endif /* ! __ASM_ARM_OPCODES_VIRT_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
new file mode 100644
index 0000000000..38e3eabff5
--- /dev/null
+++ b/arch/arm/include/asm/opcodes.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/opcodes.h
+ */
+
+#ifndef __ASM_ARM_OPCODES_H
+#define __ASM_ARM_OPCODES_H
+
+#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
+extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
+#endif
+
+#define ARM_OPCODE_CONDTEST_FAIL 0
+#define ARM_OPCODE_CONDTEST_PASS 1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+
+/*
+ * Assembler opcode byteswap helpers.
+ * These are only intended for use by this header: don't use them directly,
+ * because they will be suboptimal in most cases.
+ */
+#define ___asm_opcode_swab32(x) ( \
+ (((x) << 24) & 0xFF000000) \
+ | (((x) << 8) & 0x00FF0000) \
+ | (((x) >> 8) & 0x0000FF00) \
+ | (((x) >> 24) & 0x000000FF) \
+)
+#define ___asm_opcode_swab16(x) ( \
+ (((x) << 8) & 0xFF00) \
+ | (((x) >> 8) & 0x00FF) \
+)
+#define ___asm_opcode_swahb32(x) ( \
+ (((x) << 8) & 0xFF00FF00) \
+ | (((x) >> 8) & 0x00FF00FF) \
+)
+#define ___asm_opcode_swahw32(x) ( \
+ (((x) << 16) & 0xFFFF0000) \
+ | (((x) >> 16) & 0x0000FFFF) \
+)
+#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF)
+#define ___asm_opcode_identity16(x) ((x) & 0xFFFF)
+
+
+/*
+ * Opcode byteswap helpers
+ *
+ * These macros help with converting instructions between a canonical integer
+ * format and in-memory representation, in an endianness-agnostic manner.
+ *
+ * __mem_to_opcode_*() convert from in-memory representation to canonical form.
+ * __opcode_to_mem_*() convert from canonical form to in-memory representation.
+ *
+ *
+ * Canonical instruction representation:
+ *
+ * ARM: 0xKKLLMMNN
+ * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8
+ * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8
+ *
+ * There is no way to distinguish an ARM instruction in canonical representation
+ * from a Thumb instruction (just as these cannot be distinguished in memory).
+ * Where this distinction is important, it needs to be tracked separately.
+ *
+ * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
+ * represent any valid Thumb-2 instruction. For this range,
+ * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
+ *
+ * The ___asm variants are intended only for use by this header, in situations
+ * involving inline assembler. For .S files, the normal __opcode_*() macros
+ * should do the right thing.
+ */
+#ifdef __ASSEMBLY__
+
+#define ___opcode_swab32(x) ___asm_opcode_swab32(x)
+#define ___opcode_swab16(x) ___asm_opcode_swab16(x)
+#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x)
+#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x)
+#define ___opcode_identity32(x) ___asm_opcode_identity32(x)
+#define ___opcode_identity16(x) ___asm_opcode_identity16(x)
+
+#else /* ! __ASSEMBLY__ */
+
+#include <linux/types.h>
+#include <linux/swab.h>
+
+#define ___opcode_swab32(x) swab32(x)
+#define ___opcode_swab16(x) swab16(x)
+#define ___opcode_swahb32(x) swahb32(x)
+#define ___opcode_swahw32(x) swahw32(x)
+#define ___opcode_identity32(x) ((u32)(x))
+#define ___opcode_identity16(x) ((u16)(x))
+
+#endif /* ! __ASSEMBLY__ */
+
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+
+#define __opcode_to_mem_arm(x) ___opcode_swab32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x)
+#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x)
+
+#else /* ! CONFIG_CPU_ENDIAN_BE8 */
+
+#define __opcode_to_mem_arm(x) ___opcode_identity32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
+#ifdef CONFIG_CPU_ENDIAN_BE32
+#ifndef __ASSEMBLY__
+/*
+ * On BE32 systems, using 32-bit accesses to store Thumb instructions will not
+ * work in all cases, due to alignment constraints. For now, a correct
+ * version is not provided for BE32, but the prototype needs to be there
+ * to compile patch.c.
+ */
+extern __u32 __opcode_to_mem_thumb32(__u32);
+#endif
+#else
+#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
+#endif
+
+#endif /* ! CONFIG_CPU_ENDIAN_BE8 */
+
+#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
+#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
+#ifndef CONFIG_CPU_ENDIAN_BE32
+#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
+#endif
+
+/* Operations specific to Thumb opcodes */
+
+/* Instruction size checks: */
+#define __opcode_is_thumb32(x) ( \
+ ((x) & 0xF8000000) == 0xE8000000 \
+ || ((x) & 0xF0000000) == 0xF0000000 \
+)
+#define __opcode_is_thumb16(x) ( \
+ ((x) & 0xFFFF0000) == 0 \
+ && !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000) \
+)
+
+/* Operations to construct or split 32-bit Thumb instructions: */
+#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16))
+#define __opcode_thumb32_second(x) (___opcode_identity16(x))
+#define __opcode_thumb32_compose(first, second) ( \
+ (___opcode_identity32(___opcode_identity16(first)) << 16) \
+ | ___opcode_identity32(___opcode_identity16(second)) \
+)
+#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16))
+#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x))
+#define ___asm_opcode_thumb32_compose(first, second) ( \
+ (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \
+ | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \
+)
+
+/*
+ * Opcode injection helpers
+ *
+ * In rare cases it is necessary to assemble an opcode which the
+ * assembler does not support directly, or which would normally be
+ * rejected because of the CFLAGS or AFLAGS used to build the affected
+ * file.
+ *
+ * Before using these macros, consider carefully whether it is feasible
+ * instead to change the build flags for your file, or whether it really
+ * makes sense to support old assembler versions when building that
+ * particular kernel feature.
+ *
+ * The macros defined here should only be used where there is no viable
+ * alternative.
+ *
+ *
+ * __inst_arm(x): emit the specified ARM opcode
+ * __inst_thumb16(x): emit the specified 16-bit Thumb opcode
+ * __inst_thumb32(x): emit the specified 32-bit Thumb opcode
+ *
+ * __inst_arm_thumb16(arm, thumb): emit either the specified arm or
+ * 16-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ * kernel is being built
+ *
+ * __inst_arm_thumb32(arm, thumb): emit either the specified arm or
+ * 32-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ * kernel is being built
+ *
+ *
+ * Note that using these macros directly is poor practice. Instead, you
+ * should use them to define human-readable wrapper macros to encode the
+ * instructions that you care about. In code which might run on ARMv7 or
+ * above, you can usually use the __inst_arm_thumb{16,32} macros to
+ * specify the ARM and Thumb alternatives at the same time. This ensures
+ * that the correct opcode gets emitted depending on the instruction set
+ * used for the kernel build.
+ *
+ * Look at opcodes-virt.h for an example of how to use these macros.
+ */
+#include <linux/stringify.h>
+
+#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x))
+#define __inst_thumb32(x) ___inst_thumb32( \
+ ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)), \
+ ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x)) \
+)
+#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x))
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \
+ __inst_thumb16(thumb_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \
+ __inst_thumb32(thumb_opcode)
+#else
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#endif
+
+/* Helpers for the helpers. Don't use these directly. */
+#ifdef __ASSEMBLY__
+#define ___inst_arm(x) .long x
+#define ___inst_thumb16(x) .short x
+#define ___inst_thumb32(first, second) .short first, second
+#else
+#define ___inst_arm(x) ".long " __stringify(x) "\n\t"
+#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t"
+#define ___inst_thumb32(first, second) \
+ ".short " __stringify(first) ", " __stringify(second) "\n\t"
+#endif
+
+#endif /* __ASM_ARM_OPCODES_H */
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
new file mode 100644
index 0000000000..3364637755
--- /dev/null
+++ b/arch/arm/include/asm/outercache.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/outercache.h
+ *
+ * Copyright (C) 2010 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#ifndef __ASM_OUTERCACHE_H
+#define __ASM_OUTERCACHE_H
+
+#include <linux/types.h>
+
+struct l2x0_regs;
+
+struct outer_cache_fns {
+ void (*inv_range)(unsigned long, unsigned long);
+ void (*clean_range)(unsigned long, unsigned long);
+ void (*flush_range)(unsigned long, unsigned long);
+ void (*flush_all)(void);
+ void (*disable)(void);
+#ifdef CONFIG_OUTER_CACHE_SYNC
+ void (*sync)(void);
+#endif
+ void (*resume)(void);
+
+ /* This is an ARM L2C thing */
+ void (*write_sec)(unsigned long, unsigned);
+ void (*configure)(const struct l2x0_regs *);
+};
+
+extern struct outer_cache_fns outer_cache;
+
+#ifdef CONFIG_OUTER_CACHE
+/**
+ * outer_inv_range - invalidate range of outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
+static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
+{
+ if (outer_cache.inv_range)
+ outer_cache.inv_range(start, end);
+}
+
+/**
+ * outer_clean_range - clean dirty outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
+static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
+{
+ if (outer_cache.clean_range)
+ outer_cache.clean_range(start, end);
+}
+
+/**
+ * outer_flush_range - clean and invalidate outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
+static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
+{
+ if (outer_cache.flush_range)
+ outer_cache.flush_range(start, end);
+}
+
+/**
+ * outer_flush_all - clean and invalidate all cache lines in the outer cache
+ *
+ * Note: depending on implementation, this may not be atomic - it must
+ * only be called with interrupts disabled and no other active outer
+ * cache masters.
+ *
+ * It is intended that this function is only used by implementations
+ * needing to override the outer_cache.disable() method due to security.
+ * (Some implementations perform this as a clean followed by an invalidate.)
+ */
+static inline void outer_flush_all(void)
+{
+ if (outer_cache.flush_all)
+ outer_cache.flush_all();
+}
+
+/**
+ * outer_disable - clean, invalidate and disable the outer cache
+ *
+ * Disable the outer cache, ensuring that any data contained in the outer
+ * cache is pushed out to lower levels of system memory. The note and
+ * conditions above concerning outer_flush_all() applies here.
+ */
+extern void outer_disable(void);
+
+/**
+ * outer_resume - restore the cache configuration and re-enable outer cache
+ *
+ * Restore any configuration that the cache had when previously enabled,
+ * and re-enable the outer cache.
+ */
+static inline void outer_resume(void)
+{
+ if (outer_cache.resume)
+ outer_cache.resume();
+}
+
+#else
+
+static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
+{ }
+static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
+{ }
+static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
+{ }
+static inline void outer_flush_all(void) { }
+static inline void outer_disable(void) { }
+static inline void outer_resume(void) { }
+
+#endif
+
+#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
new file mode 100644
index 0000000000..7c2c72323d
--- /dev/null
+++ b/arch/arm/include/asm/page-nommu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/page-nommu.h
+ *
+ * Copyright (C) 2004 Hyok S. Choi
+ */
+
+#ifndef _ASMARM_PAGE_NOMMU_H
+#define _ASMARM_PAGE_NOMMU_H
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t[2];
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) ((x)[0])
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
new file mode 100644
index 0000000000..119aa85d1f
--- /dev/null
+++ b/arch/arm/include/asm/page.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/page.h
+ *
+ * Copyright (C) 1995-2003 Russell King
+ */
+#ifndef _ASMARM_PAGE_H
+#define _ASMARM_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_MMU
+
+#include <asm/page-nommu.h>
+
+#else
+
+#include <asm/glue.h>
+
+/*
+ * User Space Model
+ * ================
+ *
+ * This section selects the correct set of functions for dealing with
+ * page-based copying and clearing for user space for the particular
+ * processor(s) we're building for.
+ *
+ * We have the following to choose from:
+ * v4wt - ARMv4 with writethrough cache, without minicache
+ * v4wb - ARMv4 with writeback cache, without minicache
+ * v4_mc - ARMv4 with minicache
+ * xscale - Xscale
+ * xsc3 - XScalev3
+ */
+#undef _USER
+#undef MULTI_USER
+
+#ifdef CONFIG_CPU_COPY_V4WT
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER v4wt
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_V4WB
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER v4wb
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_FEROCEON
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER feroceon
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_FA
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER fa
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER v4_mc
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER xscale_mc
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER xsc3_mc
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_V6
+# define MULTI_USER 1
+#endif
+
+#if !defined(_USER) && !defined(MULTI_USER)
+#error Unknown user operations model
+#endif
+
+struct page;
+struct vm_area_struct;
+
+struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+};
+
+void fa_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void fa_clear_user_highpage(struct page *page, unsigned long vaddr);
+void feroceon_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wb_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wt_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+
+#ifdef MULTI_USER
+extern struct cpu_user_fns cpu_user;
+
+#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage
+#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage
+
+#else
+
+#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage)
+#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage)
+
+extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
+extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#endif
+
+#define clear_user_highpage(page,vaddr) \
+ __cpu_clear_user_highpage(page, vaddr)
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+#define copy_user_highpage(to,from,vaddr,vma) \
+ __cpu_copy_user_highpage(to, from, vaddr, vma)
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+extern void copy_page(void *to, const void *from);
+
+#ifdef CONFIG_KUSER_HELPERS
+#define __HAVE_ARCH_GATE_AREA 1
+#endif
+
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-types.h>
+#else
+#include <asm/pgtable-2level-types.h>
+#ifdef CONFIG_VMAP_STACK
+#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
+#endif
+#endif
+
+#endif /* CONFIG_MMU */
+
+typedef struct page *pgtable_t;
+
+#ifdef CONFIG_HAVE_ARCH_PFN_VALID
+extern int pfn_valid(unsigned long);
+#define pfn_valid pfn_valid
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#include <asm/memory.h>
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+
+#include <asm-generic/getorder.h>
+#include <asm-generic/memory_model.h>
+
+#endif
diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h
new file mode 100644
index 0000000000..95d5b0d625
--- /dev/null
+++ b/arch/arm/include/asm/paravirt.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_PARAVIRT_H
+#define _ASM_ARM_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/paravirt_api_clock.h b/arch/arm/include/asm/paravirt_api_clock.h
new file mode 100644
index 0000000000..65ac7cee0d
--- /dev/null
+++ b/arch/arm/include/asm/paravirt_api_clock.h
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
diff --git a/arch/arm/include/asm/patch.h b/arch/arm/include/asm/patch.h
new file mode 100644
index 0000000000..0b48247c46
--- /dev/null
+++ b/arch/arm/include/asm/patch.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KERNEL_PATCH_H
+#define _ARM_KERNEL_PATCH_H
+
+void patch_text(void *addr, unsigned int insn);
+void __patch_text_real(void *addr, unsigned int insn, bool remap);
+
+static inline void __patch_text(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, true);
+}
+
+static inline void __patch_text_early(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, false);
+}
+
+#endif
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
new file mode 100644
index 0000000000..5916b88d4c
--- /dev/null
+++ b/arch/arm/include/asm/pci.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASMARM_PCI_H
+#define ASMARM_PCI_H
+
+#ifdef __KERNEL__
+#include <asm/mach/pci.h> /* for pci_sys_data */
+
+extern unsigned long pcibios_min_io;
+#define PCIBIOS_MIN_IO pcibios_min_io
+extern unsigned long pcibios_min_mem;
+#define PCIBIOS_MIN_MEM pcibios_min_mem
+
+#define pcibios_assign_all_busses() pci_has_flag(PCI_REASSIGN_ALL_BUS)
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
+#define HAVE_PCI_MMAP
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+extern void pcibios_report_status(unsigned int status_mask, int warn);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
new file mode 100644
index 0000000000..7545c87c25
--- /dev/null
+++ b/arch/arm/include/asm/percpu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2012 Calxeda, Inc.
+ */
+#ifndef _ASM_ARM_PERCPU_H_
+#define _ASM_ARM_PERCPU_H_
+
+#include <asm/insn.h>
+
+register unsigned long current_stack_pointer asm ("sp");
+
+/*
+ * Same as asm-generic/percpu.h, except that we store the per cpu offset
+ * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
+ */
+#ifdef CONFIG_SMP
+static inline void set_my_cpu_offset(unsigned long off)
+{
+ extern unsigned int smp_on_up;
+
+ if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
+ return;
+
+ /* Set TPIDRPRW */
+ asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
+}
+
+static __always_inline unsigned long __my_cpu_offset(void)
+{
+ unsigned long off;
+
+ /*
+ * Read TPIDRPRW.
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __per_cpu_offset \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r" (off)
+ : "Q" (*(const unsigned long *)current_stack_pointer));
+
+ return off;
+}
+#define __my_cpu_offset __my_cpu_offset()
+#else
+#define set_my_cpu_offset(x) do {} while(0)
+
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_ARM_PERCPU_H_ */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
new file mode 100644
index 0000000000..bdbc1e5908
--- /dev/null
+++ b/arch/arm/include/asm/perf_event.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/arch/arm/include/asm/perf_event.h
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ */
+
+#ifndef __ARM_PERF_EVENT_H__
+#define __ARM_PERF_EVENT_H__
+
+#ifdef CONFIG_PERF_EVENTS
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+#endif
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->ARM_pc = (__ip); \
+ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
+ (regs)->ARM_sp = current_stack_pointer; \
+ (regs)->ARM_cpsr = SVC_MODE; \
+}
+
+#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
new file mode 100644
index 0000000000..a17f01235c
--- /dev/null
+++ b/arch/arm/include/asm/pgalloc.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgalloc.h
+ *
+ * Copyright (C) 2000-2001 Russell King
+ */
+#ifndef _ASMARM_PGALLOC_H
+#define _ASMARM_PGALLOC_H
+
+#include <linux/pagemap.h>
+
+#include <asm/domain.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_MMU
+
+#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
+#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
+
+#ifdef CONFIG_ARM_LPAE
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+}
+
+#else /* !CONFIG_ARM_LPAE */
+#define PGD_SIZE (PAGE_SIZE << 2)
+
+/*
+ * Since we have only two-level page tables, these are trivial
+ */
+#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(mm, pmd) do { } while (0)
+#ifdef CONFIG_KASAN
+/* The KASan core unconditionally calls pud_populate() on all architectures */
+#define pud_populate(mm,pmd,pte) do { } while (0)
+#else
+#define pud_populate(mm,pmd,pte) BUG()
+#endif
+#endif /* CONFIG_ARM_LPAE */
+
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+
+static inline void clean_pte_table(pte_t *pte)
+{
+ clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
+}
+
+/*
+ * Allocate one PTE table.
+ *
+ * This actually allocates two hardware PTE tables, but we wrap this up
+ * into one table thus:
+ *
+ * +------------+
+ * | Linux pt 0 |
+ * +------------+
+ * | Linux pt 1 |
+ * +------------+
+ * | h/w pt 0 |
+ * +------------+
+ * | h/w pt 1 |
+ * +------------+
+ */
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#define __HAVE_ARCH_PGD_FREE
+#include <asm-generic/pgalloc.h>
+
+static inline pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte = __pte_alloc_one_kernel(mm);
+
+ if (pte)
+ clean_pte_table(pte);
+
+ return pte;
+}
+
+#ifdef CONFIG_HIGHPTE
+#define PGTABLE_HIGHMEM __GFP_HIGHMEM
+#else
+#define PGTABLE_HIGHMEM 0
+#endif
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm)
+{
+ struct page *pte;
+
+ pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
+ if (!pte)
+ return NULL;
+ if (!PageHighMem(pte))
+ clean_pte_table(page_address(pte));
+ return pte;
+}
+
+static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+ pmdval_t prot)
+{
+ pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
+ pmdp[0] = __pmd(pmdval);
+#ifndef CONFIG_ARM_LPAE
+ pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
+#endif
+ flush_pmd_entry(pmdp);
+}
+
+/*
+ * Populate the pmdp entry with a pointer to the pte. This pmd is part
+ * of the mm address space.
+ *
+ * Ensure that we always set both PMD entries.
+ */
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+{
+ /*
+ * The pmd must be loaded with the physical address of the PTE table
+ */
+ __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
+{
+ extern pmdval_t user_pmd_table;
+ pmdval_t prot;
+
+ if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
+ prot = user_pmd_table;
+ else
+ prot = _PAGE_USER_TABLE;
+
+ __pmd_populate(pmdp, page_to_phys(ptep), prot);
+}
+
+#endif /* CONFIG_MMU */
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
new file mode 100644
index 0000000000..556937e179
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-2level-hwdef.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H
+#define _ASM_PGTABLE_2LEVEL_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1 descriptor (PMD)
+ * - common
+ */
+#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
+#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
+#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
+#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
+#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
+#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
+/*
+ * - section
+ */
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
+#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
+#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10)
+#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */
+#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */
+#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */
+#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
+#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
+#define PMD_SECT_AF (_AT(pmdval_t, 0))
+
+#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
+#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
+#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
+#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
+#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
+
+/*
+ * - coarse table (not used)
+ */
+
+/*
+ * + Level 2 descriptor (PTE)
+ * - common
+ */
+#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
+#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
+#define PTE_TYPE_LARGE (_AT(pteval_t, 1) << 0)
+#define PTE_TYPE_SMALL (_AT(pteval_t, 2) << 0)
+#define PTE_TYPE_EXT (_AT(pteval_t, 3) << 0) /* v5 */
+#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2)
+#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3)
+
+/*
+ * - extended small page/tiny page
+ */
+#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
+#define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
+#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
+#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
+#define PTE_EXT_AP_UNO_SRO (_AT(pteval_t, 0) << 4)
+#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
+#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
+#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
+#define PTE_EXT_TEX(x) (_AT(pteval_t, (x)) << 6) /* v5 */
+#define PTE_EXT_APX (_AT(pteval_t, 1) << 9) /* v6 */
+#define PTE_EXT_COHERENT (_AT(pteval_t, 1) << 9) /* XScale3 */
+#define PTE_EXT_SHARED (_AT(pteval_t, 1) << 10) /* v6 */
+#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* v6 */
+
+/*
+ * - small page
+ */
+#define PTE_SMALL_AP_MASK (_AT(pteval_t, 0xff) << 4)
+#define PTE_SMALL_AP_UNO_SRO (_AT(pteval_t, 0x00) << 4)
+#define PTE_SMALL_AP_UNO_SRW (_AT(pteval_t, 0x55) << 4)
+#define PTE_SMALL_AP_URO_SRW (_AT(pteval_t, 0xaa) << 4)
+#define PTE_SMALL_AP_URW_SRW (_AT(pteval_t, 0xff) << 4)
+
+#define PHYS_MASK (~0UL)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h
new file mode 100644
index 0000000000..650e793f41
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level-types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-2level-types.h
+ *
+ * Copyright (C) 1995-2003 Russell King
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H
+#define _ASM_PGTABLE_2LEVEL_TYPES_H
+
+#include <asm/types.h>
+
+typedef u32 pteval_t;
+typedef u32 pmdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pmdval_t pgd[2]; } pgd_t;
+typedef struct { pteval_t pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd[0])
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef pteval_t pte_t;
+typedef pmdval_t pmd_t;
+typedef pmdval_t pgd_t[2];
+typedef pteval_t pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) ((x)[0])
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
new file mode 100644
index 0000000000..ce543cd938
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-2level.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_H
+#define _ASM_PGTABLE_2LEVEL_H
+
+#define __PAGETABLE_PMD_FOLDED 1
+
+/*
+ * Hardware-wise, we have a two level page table structure, where the first
+ * level has 4096 entries, and the second level has 256 entries. Each entry
+ * is one 32-bit word. Most of the bits in the second level entry are used
+ * by hardware, and there aren't any "accessed" and "dirty" bits.
+ *
+ * Linux on the other hand has a three level page table structure, which can
+ * be wrapped to fit a two level page table structure easily - using the PGD
+ * and PTE only. However, Linux also expects one "PTE" table per page, and
+ * at least a "dirty" bit.
+ *
+ * Therefore, we tweak the implementation slightly - we tell Linux that we
+ * have 2048 entries in the first level, each of which is 8 bytes (iow, two
+ * hardware pointers to the second level.) The second level contains two
+ * hardware PTE tables arranged contiguously, preceded by Linux versions
+ * which contain the state information Linux needs. We, therefore, end up
+ * with 512 entries in the "PTE" level.
+ *
+ * This leads to the page tables having the following layout:
+ *
+ * pgd pte
+ * | |
+ * +--------+
+ * | | +------------+ +0
+ * +- - - - + | Linux pt 0 |
+ * | | +------------+ +1024
+ * +--------+ +0 | Linux pt 1 |
+ * | |-----> +------------+ +2048
+ * +- - - - + +4 | h/w pt 0 |
+ * | |-----> +------------+ +3072
+ * +--------+ +8 | h/w pt 1 |
+ * | | +------------+ +4096
+ *
+ * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
+ * PTE_xxx for definitions of bits appearing in the "h/w pt".
+ *
+ * PMD_xxx definitions refer to bits in the first level page table.
+ *
+ * The "dirty" bit is emulated by only granting hardware write permission
+ * iff the page is marked "writable" and "dirty" in the Linux PTE. This
+ * means that a write to a clean page will cause a permission fault, and
+ * the Linux MM layer will mark the page dirty via handle_pte_fault().
+ * For the hardware to notice the permission change, the TLB entry must
+ * be flushed, and ptep_set_access_flags() does that for us.
+ *
+ * The "accessed" or "young" bit is emulated by a similar method; we only
+ * allow accesses to the page if the "young" bit is set. Accesses to the
+ * page will cause a fault, and handle_pte_fault() will set the young bit
+ * for us as long as the page is marked present in the corresponding Linux
+ * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
+ * up to date.
+ *
+ * However, when the "young" bit is cleared, we deny access to the page
+ * by clearing the hardware PTE. Currently Linux does not flush the TLB
+ * for us in this case, which means the TLB will retain the transation
+ * until either the TLB entry is evicted under pressure, or a context
+ * switch which changes the user space mapping occurs.
+ */
+#define PTRS_PER_PTE 512
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD 2048
+
+#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
+#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
+#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PMD_SHIFT 21
+#define PGDIR_SHIFT 21
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT 20
+#define SECTION_SIZE (1UL << SECTION_SHIFT)
+#define SECTION_MASK (~(SECTION_SIZE-1))
+
+/*
+ * ARMv6 supersection address mask and size definitions.
+ */
+#define SUPERSECTION_SHIFT 24
+#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
+#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * "Linux" PTE definitions.
+ *
+ * We keep two sets of PTEs - the hardware and the linux version.
+ * This allows greater flexibility in the way we map the Linux bits
+ * onto the hardware tables, and allows us to have YOUNG and DIRTY
+ * bits.
+ *
+ * The PTE table pointer refers to the hardware entries; the "Linux"
+ * entries are stored 1024 bytes below.
+ */
+#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
+#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
+#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
+#define L_PTE_USER (_AT(pteval_t, 1) << 8)
+#define L_PTE_XN (_AT(pteval_t, 1) << 9)
+#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
+#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE L_PTE_RDONLY
+
+/*
+ * These are the memory types, defined to be compatible with
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+ * ARMv6+ without TEX remapping, they are a table index.
+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
+ *
+ * MT type Pre-ARMv6 ARMv6+ type / cacheable status
+ * UNCACHED Uncached Strongly ordered
+ * BUFFERABLE Bufferable Normal memory / non-cacheable
+ * WRITETHROUGH Writethrough Normal memory / write through
+ * WRITEBACK Writeback Normal memory / write back, read alloc
+ * MINICACHE Minicache N/A
+ * WRITEALLOC Writeback Normal memory / write back, write alloc
+ * DEV_SHARED Uncached Device memory (shared)
+ * DEV_NONSHARED Uncached Device memory (non-shared)
+ * DEV_WC Bufferable Normal memory / non-cacheable
+ * DEV_CACHED Writeback Normal memory / write back, read alloc
+ * VECTORS Variable Normal memory / variable
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ */
+#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
+#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
+#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */
+#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */
+#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */
+#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */
+#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */
+#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
+#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
+#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
+#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * The "pud_xxx()" functions here are trivial when the pmd is folded into
+ * the pud: the pud entry is never bad, always exists, and can't be set or
+ * cleared.
+ */
+static inline int pud_none(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return 1;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+}
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+ return (pmd_t *)pud;
+}
+#define pmd_offset pmd_offset
+
+#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+#define pmd_large(pmd) (pmd_val(pmd) & 2)
+#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
+#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_present(pmd) (pmd_val(pmd))
+
+#define copy_pmd(pmdpd,pmdps) \
+ do { \
+ pmdpd[0] = pmdps[0]; \
+ pmdpd[1] = pmdps[1]; \
+ flush_pmd_entry(pmdpd); \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ pmdp[0] = __pmd(0); \
+ pmdp[1] = __pmd(0); \
+ clean_pmd_entry(pmdp); \
+ } while (0)
+
+/* we don't need complex calculations here as the pmd is folded into the pgd */
+#define pmd_addr_end(addr,end) (end)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd) (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
new file mode 100644
index 0000000000..2f35b4edda
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-3level-hwdef.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H
+#define _ASM_PGTABLE_3LEVEL_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1/2 descriptor
+ * - common
+ */
+#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
+#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
+#define PMD_BIT4 (_AT(pmdval_t, 0))
+#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
+#define PMD_APTABLE_SHIFT (61)
+#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
+#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
+
+/*
+ * - section
+ */
+#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7) /* read only */
+#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
+#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
+#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
+#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
+#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
+#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
+#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0) << 2) /* strongly ordered */
+#define PMD_SECT_BUFFERED (_AT(pmdval_t, 1) << 2) /* normal non-cacheable */
+#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
+#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
+#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2)
+
+/*
+ * + Level 3 descriptor (PTE)
+ */
+#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
+#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
+#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
+#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
+#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
+#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
+#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
+#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
+#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
+
+/*
+ * 40-bit physical address supported.
+ */
+#define PHYS_MASK_SHIFT (40)
+#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+
+/*
+ * TTBR0/TTBR1 split (PAGE_OFFSET):
+ * 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
+ * 0x80000000: T0SZ = 0, T1SZ = 1
+ * 0xc0000000: T0SZ = 0, T1SZ = 2
+ *
+ * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
+ * booting secondary CPUs would end up using TTBR1 for the identity
+ * mapping set up in TTBR0.
+ */
+#if defined CONFIG_VMSPLIT_2G
+#define TTBR1_OFFSET 16 /* skip two L1 entries */
+#elif defined CONFIG_VMSPLIT_3G
+#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */
+#else
+#define TTBR1_OFFSET 0
+#endif
+
+#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h
new file mode 100644
index 0000000000..d0f587a212
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level-types.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-3level-types.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H
+#define _ASM_PGTABLE_3LEVEL_TYPES_H
+
+#include <asm/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pgdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pgdval_t pgd; } pgd_t;
+typedef struct { pteval_t pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef pteval_t pte_t;
+typedef pmdval_t pmd_t;
+typedef pgdval_t pgd_t;
+typedef pteval_t pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#endif /* _ASM_PGTABLE_3LEVEL_TYPES_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
new file mode 100644
index 0000000000..71c3add641
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-3level.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_H
+#define _ASM_PGTABLE_3LEVEL_H
+
+/*
+ * With LPAE, there are 3 levels of page tables. Each level has 512 entries of
+ * 8 bytes each, occupying a 4K page. The first level table covers a range of
+ * 512GB, each entry representing 1GB. Since we are limited to 4GB input
+ * address range, only 4 entries in the PGD are used.
+ *
+ * There are enough spare bits in a page table entry for the kernel specific
+ * state.
+ */
+#define PTRS_PER_PTE 512
+#define PTRS_PER_PMD 512
+#define PTRS_PER_PGD 4
+
+#define PTE_HWTABLE_PTRS (0)
+#define PTE_HWTABLE_OFF (0)
+#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+
+/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map.
+ */
+#define PGDIR_SHIFT 30
+
+/*
+ * PMD_SHIFT determines the size a middle-level page table entry can map.
+ */
+#define PMD_SHIFT 21
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT 21
+#define SECTION_SIZE (1UL << SECTION_SHIFT)
+#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
+
+#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
+
+/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
+ * "Linux" PTE definitions for LPAE.
+ *
+ * These bits overlap with the hardware bits but the naming is preserved for
+ * consistency with the classic page table format.
+ */
+#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
+#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
+#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
+#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
+#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
+#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
+
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 7)
+
+#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
+
+/*
+ * To be used in assembly code with the upper page attributes.
+ */
+#define L_PTE_XN_HIGH (1 << (54 - 32))
+#define L_PTE_DIRTY_HIGH (1 << (55 - 32))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */
+#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
+#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */
+#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */
+#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */
+#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */
+#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */
+#define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
+#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */
+#define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2)
+
+/*
+ * Software PGD flags.
+ */
+#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
+
+#ifndef __ASSEMBLY__
+
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (!(pud_val(pud) & 2))
+#define pud_present(pud) (pud_val(pud))
+#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
+#define pmd_large(pmd) pmd_sect(pmd)
+#define pmd_leaf(pmd) pmd_sect(pmd)
+
+#define pud_clear(pudp) \
+ do { \
+ *pudp = __pud(0); \
+ clean_pmd_entry(pudp); \
+ } while (0)
+
+#define set_pud(pudp, pud) \
+ do { \
+ *pudp = pud; \
+ flush_pmd_entry(pudp); \
+ } while (0)
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+}
+
+#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+
+#define copy_pmd(pmdpd,pmdps) \
+ do { \
+ *pmdpd = *pmdps; \
+ flush_pmd_entry(pmdpd); \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ *pmdp = __pmd(0); \
+ clean_pmd_entry(pmdp); \
+ } while (0)
+
+/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
+ * hugetlb_cow, where it is compared with an entry in a page table.
+ * This comparison test fails erroneously leading ultimately to a memory leak.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
+ : pte_val(pte_a)) \
+ == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
+ : pte_val(pte_b)))
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+
+#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
+ : !!(pmd_val(pmd) & (val)))
+#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
+
+#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
+#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
+#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= L_PTE_SPECIAL;
+ return pte;
+}
+
+#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
+#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
+
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
+#endif
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+PMD_BIT_FUNC(mkwrite_novma, &= ~L_PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+
+#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
+#define pmdp_establish generic_pmdp_establish
+
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
+ L_PMD_SECT_VALID | L_PMD_SECT_NONE;
+ pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+ return pmd;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ BUG_ON(addr >= TASK_SIZE);
+
+ /* create a faulting entry if PROT_NONE protected */
+ if (pmd_val(pmd) & L_PMD_SECT_NONE)
+ pmd_val(pmd) &= ~L_PMD_SECT_VALID;
+
+ if (pmd_write(pmd) && pmd_dirty(pmd))
+ pmd_val(pmd) &= ~PMD_SECT_AP2;
+ else
+ pmd_val(pmd) |= PMD_SECT_AP2;
+
+ *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+ flush_pmd_entry(pmdp);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
new file mode 100644
index 0000000000..d60548ccd1
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-hwdef.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ */
+#ifndef _ASMARM_PGTABLE_HWDEF_H
+#define _ASMARM_PGTABLE_HWDEF_H
+
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-hwdef.h>
+#else
+#include <asm/pgtable-2level-hwdef.h>
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
new file mode 100644
index 0000000000..61480d0960
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable-nommu.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ * Copyright (C) 2004 Hyok S. Choi
+ */
+#ifndef _ASMARM_PGTABLE_NOMMU_H
+#define _ASMARM_PGTABLE_NOMMU_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/slab.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * Trivial page table functions.
+ */
+#define pgd_present(pgd) (1)
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_clear(pgdp)
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PGDIR_SHIFT 21
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+/* FIXME */
+
+#define PAGE_NONE __pgprot(0)
+#define PAGE_SHARED __pgprot(0)
+#define PAGE_COPY __pgprot(0)
+#define PAGE_READONLY __pgprot(0)
+#define PAGE_KERNEL __pgprot(0)
+
+#define swapper_pg_dir ((pgd_t *) 0)
+
+
+typedef pte_t *pte_addr_t;
+
+/*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+#define pgprot_noncached(prot) (prot)
+#define pgprot_writecombine(prot) (prot)
+#define pgprot_device(prot) (prot)
+
+
+/*
+ * These would be in other places but having them here reduces the diffs.
+ */
+extern unsigned int kobjsize(const void *objp);
+
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define VMALLOC_START 0UL
+#define VMALLOC_END 0xffffffffUL
+
+#define FIRST_USER_ADDRESS 0UL
+
+#else
+
+/*
+ * dummy tlb and user structures.
+ */
+#define v3_tlb_fns (0)
+#define v4_tlb_fns (0)
+#define v4wb_tlb_fns (0)
+#define v4wbi_tlb_fns (0)
+#define v6wbi_tlb_fns (0)
+#define v7wbi_tlb_fns (0)
+
+#define v3_user_fns (0)
+#define v4_user_fns (0)
+#define v4_mc_user_fns (0)
+#define v4wb_user_fns (0)
+#define v4wt_user_fns (0)
+#define v6_user_fns (0)
+#define xscale_mc_user_fns (0)
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _ASMARM_PGTABLE_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
new file mode 100644
index 0000000000..16b02f44c7
--- /dev/null
+++ b/arch/arm/include/asm/pgtable.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/pgtable.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ */
+#ifndef _ASMARM_PGTABLE_H
+#define _ASMARM_PGTABLE_H
+
+#include <linux/const.h>
+#include <asm/proc-fns.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+#endif
+
+#ifndef CONFIG_MMU
+
+#include <asm-generic/pgtable-nopud.h>
+#include <asm/pgtable-nommu.h>
+
+#else
+
+#include <asm-generic/pgtable-nopud.h>
+#include <asm/page.h>
+#include <asm/pgtable-hwdef.h>
+
+
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level.h>
+#else
+#include <asm/pgtable-2level.h>
+#endif
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_END 0xff800000UL
+
+#define LIBRARY_TEXT_START 0x0c000000
+
+#ifndef __ASSEMBLY__
+extern void __pte_error(const char *file, int line, pte_t);
+extern void __pmd_error(const char *file, int line, pmd_t);
+extern void __pgd_error(const char *file, int line, pgd_t);
+
+#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
+#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
+#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
+
+/*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
+
+/*
+ * Use TASK_SIZE as the ceiling argument for free_pgtables() and
+ * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
+ * page shared between user and kernel).
+ */
+#ifdef CONFIG_ARM_LPAE
+#define USER_PGTABLES_CEILING TASK_SIZE
+#endif
+
+/*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable and bufferable bits based on memory policy,
+ * as well as any architecture dependent bits like global/ASID and SMP
+ * shared mapping bits.
+ */
+#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
+
+extern pgprot_t pgprot_user;
+extern pgprot_t pgprot_kernel;
+
+#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
+
+#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
+#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
+#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
+#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
+#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
+#define PAGE_KERNEL_EXEC pgprot_kernel
+
+#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
+#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
+#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
+#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
+#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
+
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
+#define pgprot_noncached(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+
+#define pgprot_writecombine(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
+
+#define pgprot_stronglyordered(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+
+#define pgprot_device(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
+
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#define pgprot_dmacoherent(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#else
+#define pgprot_dmacoherent(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The table below defines the page protection levels that we insert into our
+ * Linux page table version. These get translated into the best that the
+ * architecture can perform. Note that on most ARM hardware:
+ * 1) We cannot do execute protection
+ * 2) If we could do execute protection, then read is implied
+ * 3) write implies read permissions
+ */
+
+#ifndef __ASSEMBLY__
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
+#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+
+static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+{
+ return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+}
+
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
+
+#define pte_page(pte) pfn_to_page(pte_pfn(pte))
+#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
+
+#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+
+#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
+ : !!(pte_val(pte) & (val)))
+#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
+
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
+#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
+#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
+#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
+#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
+#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
+#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
+
+#define pte_valid_user(pte) \
+ (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
+
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
+ pteval_t needed = mask;
+
+ if (write)
+ mask |= L_PTE_RDONLY;
+
+ return (pte_val(pte) & mask) == needed;
+}
+#define pte_access_permitted pte_access_permitted
+
+#if __LINUX_ARM_ARCH__ < 6
+static inline void __sync_icache_dcache(pte_t pteval)
+{
+}
+#else
+extern void __sync_icache_dcache(pte_t pteval);
+#endif
+
+void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr);
+#define set_ptes set_ptes
+
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) &= ~pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) |= pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_XN));
+}
+
+static inline pte_t pte_mknexec(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_XN));
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+ L_PTE_NONE | L_PTE_VALID;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+}
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------------- offset ------------------> E < type -> 0 0
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ *
+ * This gives us up to 31 swap files and 64GB per swap file. Note that
+ * the offset field is always non-zero.
+ */
+#define __SWP_TYPE_SHIFT 2
+#define __SWP_TYPE_BITS 5
+#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(swp) __pte((swp).val)
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_isset(pte, L_PTE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
+
+/*
+ * It is an error for the kernel to have more swap files than we can
+ * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
+ * is increased beyond what we presently support.
+ */
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+/*
+ * We provide our own arch_get_unmapped_area to cope with VIPT caches.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
+
+#endif /* _ASMARM_PGTABLE_H */
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
new file mode 100644
index 0000000000..ebbd9ec95d
--- /dev/null
+++ b/arch/arm/include/asm/probes.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/probes.h
+ *
+ * Original contents copied from arch/arm/include/asm/kprobes.h
+ * which contains the following notice...
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ */
+
+#ifndef _ASM_PROBES_H
+#define _ASM_PROBES_H
+
+#ifndef __ASSEMBLY__
+
+typedef u32 probes_opcode_t;
+
+struct arch_probes_insn;
+typedef void (probes_insn_handler_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef unsigned long (probes_check_cc)(unsigned long);
+typedef void (probes_insn_singlestep_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef void (probes_insn_fn_t)(void);
+
+/* Architecture specific copy of original instruction. */
+struct arch_probes_insn {
+ probes_opcode_t *insn;
+ probes_insn_handler_t *insn_handler;
+ probes_check_cc *insn_check_cc;
+ probes_insn_singlestep_t *insn_singlestep;
+ probes_insn_fn_t *insn_fn;
+ int stack_space;
+ unsigned long register_usage_flags;
+ bool kprobe_direct_exec;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * We assume one instruction can consume at most 64 bytes stack, which is
+ * 'push {r0-r15}'. Instructions consume more or unknown stack space like
+ * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
+ */
+#define MAX_STACK_SIZE 64
+
+#endif
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
new file mode 100644
index 0000000000..280396483f
--- /dev/null
+++ b/arch/arm/include/asm/proc-fns.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/proc-fns.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ */
+#ifndef __ASM_PROCFNS_H
+#define __ASM_PROCFNS_H
+
+#ifdef __KERNEL__
+
+#include <asm/glue-proc.h>
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
+/*
+ * Don't change this structure - ASM code relies on it.
+ */
+struct processor {
+ /* MISC
+ * get data abort address/flags
+ */
+ void (*_data_abort)(unsigned long pc);
+ /*
+ * Retrieve prefetch fault address
+ */
+ unsigned long (*_prefetch_abort)(unsigned long lr);
+ /*
+ * Set up any processor specifics
+ */
+ void (*_proc_init)(void);
+ /*
+ * Check for processor bugs
+ */
+ void (*check_bugs)(void);
+ /*
+ * Disable any processor specifics
+ */
+ void (*_proc_fin)(void);
+ /*
+ * Special stuff for a reset
+ */
+ void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
+ /*
+ * Idle the processor
+ */
+ int (*_do_idle)(void);
+ /*
+ * Processor architecture specific
+ */
+ /*
+ * clean a virtual address range from the
+ * D-cache without flushing the cache.
+ */
+ void (*dcache_clean_area)(void *addr, int size);
+
+ /*
+ * Set the page table
+ */
+ void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
+ /*
+ * Set a possibly extended PTE. Non-extended PTEs should
+ * ignore 'ext'.
+ */
+#ifdef CONFIG_ARM_LPAE
+ void (*set_pte_ext)(pte_t *ptep, pte_t pte);
+#else
+ void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
+
+ /* Suspend/resume */
+ unsigned int suspend_size;
+ void (*do_suspend)(void *);
+ void (*do_resume)(void *);
+};
+
+#ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
+extern void cpu_proc_init(void);
+extern void cpu_proc_fin(void);
+extern int cpu_do_idle(void);
+extern void cpu_dcache_clean_area(void *, int);
+extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+#ifdef CONFIG_ARM_LPAE
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
+#else
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
+extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
+
+/* These three are private to arch/arm/kernel/suspend.c */
+extern void cpu_do_suspend(void *);
+extern void cpu_do_resume(void *);
+#else
+
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised. We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f) cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ unsigned int cpu = smp_processor_id();
+ *cpu_vtable[cpu] = *p;
+ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+ cpu_vtable[0]->dcache_clean_area);
+ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+ cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f) processor.f
+#define PROC_TABLE(f) processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ processor = *p;
+}
+#endif
+
+#define cpu_proc_init PROC_VTABLE(_proc_init)
+#define cpu_check_bugs PROC_VTABLE(check_bugs)
+#define cpu_proc_fin PROC_VTABLE(_proc_fin)
+#define cpu_reset PROC_VTABLE(reset)
+#define cpu_do_idle PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend PROC_VTABLE(do_suspend)
+#define cpu_do_resume PROC_VTABLE(do_resume)
+#endif
+
+extern void cpu_resume(void);
+
+#ifdef CONFIG_MMU
+
+#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+
+#ifdef CONFIG_ARM_LPAE
+
+#define cpu_get_ttbr(nr) \
+ ({ \
+ u64 ttbr; \
+ __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
+ : "=r" (ttbr)); \
+ ttbr; \
+ })
+
+#define cpu_get_pgd() \
+ ({ \
+ u64 pg = cpu_get_ttbr(0); \
+ pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
+ (pgd_t *)phys_to_virt(pg); \
+ })
+#else
+#define cpu_get_pgd() \
+ ({ \
+ unsigned long pg; \
+ __asm__("mrc p15, 0, %0, c2, c0, 0" \
+ : "=r" (pg) : : "cc"); \
+ pg &= ~0x3fff; \
+ (pgd_t *)phys_to_virt(pg); \
+ })
+#endif
+
+#else /*!CONFIG_MMU */
+
+#define cpu_switch_mm(pgd,mm) { }
+
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
new file mode 100644
index 0000000000..326864f79d
--- /dev/null
+++ b/arch/arm/include/asm/processor.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/processor.h
+ *
+ * Copyright (C) 1995-1999 Russell King
+ */
+
+#ifndef __ASM_ARM_PROCESSOR_H
+#define __ASM_ARM_PROCESSOR_H
+
+#ifdef __KERNEL__
+
+#include <asm/hw_breakpoint.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/unified.h>
+#include <asm/vdso/processor.h>
+
+#ifdef __KERNEL__
+#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
+ TASK_SIZE : TASK_SIZE_26)
+#define STACK_TOP_MAX TASK_SIZE
+#endif
+
+struct debug_info {
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
+#endif
+};
+
+struct thread_struct {
+ /* fault info */
+ unsigned long address;
+ unsigned long trap_no;
+ unsigned long error_code;
+ /* debugging */
+ struct debug_info debug;
+};
+
+/*
+ * Everything usercopied to/from thread_struct is statically-sized, so
+ * no hardened usercopy whitelist is needed.
+ */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = *size = 0;
+}
+
+#define INIT_THREAD { }
+
+#define start_thread(regs,pc,sp) \
+({ \
+ unsigned long r7, r8, r9; \
+ \
+ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
+ r7 = regs->ARM_r7; \
+ r8 = regs->ARM_r8; \
+ r9 = regs->ARM_r9; \
+ } \
+ memset(regs->uregs, 0, sizeof(regs->uregs)); \
+ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
+ current->personality & FDPIC_FUNCPTRS) { \
+ regs->ARM_r7 = r7; \
+ regs->ARM_r8 = r8; \
+ regs->ARM_r9 = r9; \
+ regs->ARM_r10 = current->mm->start_data; \
+ } else if (!IS_ENABLED(CONFIG_MMU)) \
+ regs->ARM_r10 = current->mm->start_data; \
+ if (current->personality & ADDR_LIMIT_32BIT) \
+ regs->ARM_cpsr = USR_MODE; \
+ else \
+ regs->ARM_cpsr = USR26_MODE; \
+ if (elf_hwcap & HWCAP_THUMB && pc & 1) \
+ regs->ARM_cpsr |= PSR_T_BIT; \
+ regs->ARM_cpsr |= PSR_ENDSTATE; \
+ regs->ARM_pc = pc & ~1; /* pc */ \
+ regs->ARM_sp = sp; /* sp */ \
+})
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+unsigned long __get_wchan(struct task_struct *p);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
+#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
+
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .align 2\n" \
+ " .long 9998b - .\n" \
+ " " up "\n" \
+ " .popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up) up
+#endif
+
+/*
+ * Prefetching support - only ARMv5.
+ */
+#if __LINUX_ARM_ARCH__ >= 5
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(const void *ptr)
+{
+ __asm__ __volatile__(
+ "pld\t%a0"
+ :: "p" (ptr));
+}
+
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
+#define ARCH_HAS_PREFETCHW
+static inline void prefetchw(const void *ptr)
+{
+ __asm__ __volatile__(
+ ".arch_extension mp\n"
+ __ALT_SMP_ASM(
+ "pldw\t%a0",
+ "pld\t%a0"
+ )
+ :: "p" (ptr));
+}
+#endif
+#endif
+
+#endif
+
+#endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/include/asm/procinfo.h b/arch/arm/include/asm/procinfo.h
new file mode 100644
index 0000000000..42df316fb8
--- /dev/null
+++ b/arch/arm/include/asm/procinfo.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/procinfo.h
+ *
+ * Copyright (C) 1996-1999 Russell King
+ */
+#ifndef __ASM_PROCINFO_H
+#define __ASM_PROCINFO_H
+
+#ifdef __KERNEL__
+
+struct cpu_tlb_fns;
+struct cpu_user_fns;
+struct cpu_cache_fns;
+struct processor;
+
+/*
+ * Note! struct processor is always defined if we're
+ * using MULTI_CPU, otherwise this entry is unused,
+ * but still exists.
+ *
+ * NOTE! The following structure is defined by assembly
+ * language, NOT C code. For more information, check:
+ * arch/arm/mm/proc-*.S and arch/arm/kernel/head.S
+ */
+struct proc_info_list {
+ unsigned int cpu_val;
+ unsigned int cpu_mask;
+ unsigned long __cpu_mm_mmu_flags; /* used by head.S */
+ unsigned long __cpu_io_mmu_flags; /* used by head.S */
+ unsigned long __cpu_flush; /* used by head.S */
+ const char *arch_name;
+ const char *elf_name;
+ unsigned int elf_hwcap;
+ const char *cpu_name;
+ struct processor *proc;
+ struct cpu_tlb_fns *tlb;
+ struct cpu_user_fns *user;
+ struct cpu_cache_fns *cache;
+};
+
+#else /* __KERNEL__ */
+#include <asm/elf.h>
+#warning "Please include asm/elf.h instead"
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
new file mode 100644
index 0000000000..402e3f34c7
--- /dev/null
+++ b/arch/arm/include/asm/prom.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/prom.h
+ *
+ * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ */
+#ifndef __ASMARM_PROM_H
+#define __ASMARM_PROM_H
+
+#ifdef CONFIG_OF
+
+extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
+extern void __init arm_dt_init_cpu_maps(void);
+
+#else /* CONFIG_OF */
+
+static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
+{
+ return NULL;
+}
+
+static inline void arm_dt_init_cpu_maps(void) { }
+
+#endif /* CONFIG_OF */
+#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
new file mode 100644
index 0000000000..536e155328
--- /dev/null
+++ b/arch/arm/include/asm/psci.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_PSCI_H
+#define __ASM_ARM_PSCI_H
+
+extern const struct smp_operations psci_smp_ops;
+
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
+bool psci_smp_available(void);
+#else
+static inline bool psci_smp_available(void) { return false; }
+#endif
+
+#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/ptdump.h b/arch/arm/include/asm/ptdump.h
new file mode 100644
index 0000000000..aad1d03413
--- /dev/null
+++ b/arch/arm/include/asm/ptdump.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 ARM Ltd. */
+#ifndef __ASM_PTDUMP_H
+#define __ASM_PTDUMP_H
+
+#ifdef CONFIG_ARM_PTDUMP_CORE
+
+#include <linux/mm_types.h>
+#include <linux/seq_file.h>
+
+struct addr_marker {
+ unsigned long start_address;
+ char *name;
+};
+
+struct ptdump_info {
+ struct mm_struct *mm;
+ const struct addr_marker *markers;
+ unsigned long base_addr;
+};
+
+void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_ARM_PTDUMP_DEBUGFS
+#define EFI_RUNTIME_MAP_END SZ_1G
+void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+#else
+static inline void ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name) { }
+#endif /* CONFIG_ARM_PTDUMP_DEBUGFS */
+
+void ptdump_check_wx(void);
+
+#endif /* CONFIG_ARM_PTDUMP_CORE */
+
+#ifdef CONFIG_DEBUG_WX
+#define debug_checkwx() ptdump_check_wx()
+#else
+#define debug_checkwx() do { } while (0)
+#endif
+
+#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
new file mode 100644
index 0000000000..7f44e88d1f
--- /dev/null
+++ b/arch/arm/include/asm/ptrace.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/ptrace.h
+ *
+ * Copyright (C) 1996-2003 Russell King
+ */
+#ifndef __ASM_ARM_PTRACE_H
+#define __ASM_ARM_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+struct pt_regs {
+ unsigned long uregs[18];
+};
+
+struct svc_pt_regs {
+ struct pt_regs regs;
+ u32 dacr;
+};
+
+#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
+
+#define user_mode(regs) \
+ (((regs)->ARM_cpsr & 0xf) == 0)
+
+#ifdef CONFIG_ARM_THUMB
+#define thumb_mode(regs) \
+ (((regs)->ARM_cpsr & PSR_T_BIT))
+#else
+#define thumb_mode(regs) (0)
+#endif
+
+#ifndef CONFIG_CPU_V7M
+#define isa_mode(regs) \
+ ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
+ (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
+#else
+#define isa_mode(regs) 1 /* Thumb */
+#endif
+
+#define processor_mode(regs) \
+ ((regs)->ARM_cpsr & MODE_MASK)
+
+#define interrupts_enabled(regs) \
+ (!((regs)->ARM_cpsr & PSR_I_BIT))
+
+#define fast_interrupts_enabled(regs) \
+ (!((regs)->ARM_cpsr & PSR_F_BIT))
+
+/* Are the current registers suitable for user mode?
+ * (used to maintain security in signal handlers)
+ */
+static inline int valid_user_regs(struct pt_regs *regs)
+{
+#ifndef CONFIG_CPU_V7M
+ unsigned long mode = regs->ARM_cpsr & MODE_MASK;
+
+ /*
+ * Always clear the F (FIQ) and A (delayed abort) bits
+ */
+ regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+
+ if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
+ if (mode == USR_MODE)
+ return 1;
+ if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
+ return 1;
+ }
+
+ /*
+ * Force CPSR to something logical...
+ */
+ regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
+ if (!(elf_hwcap & HWCAP_26BIT))
+ regs->ARM_cpsr |= USR_MODE;
+
+ return 0;
+#else /* ifndef CONFIG_CPU_V7M */
+ return 1;
+#endif
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->ARM_r0;
+}
+
+#define instruction_pointer(regs) (regs)->ARM_pc
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define frame_pointer(regs) (regs)->ARM_r7
+#else
+#define frame_pointer(regs) (regs)->ARM_fp
+#endif
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ instruction_pointer(regs) = val;
+}
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+#define predicate(x) ((x) & 0xf0000000)
+#define PREDICATE_ALWAYS 0xe0000000
+
+/*
+ * True if instr is a 32-bit thumb instruction. This works if instr
+ * is the first or only half-word of a thumb instruction. It also works
+ * when instr holds all 32-bits of a wide thumb instruction if stored
+ * in the form (first_half<<16)|(second_half)
+ */
+#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800)
+
+/*
+ * kprobe-based event tracer support
+ */
+#include <linux/compiler.h>
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
+
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->ARM_sp;
+}
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->ARM_sp;
+}
+
+#define current_pt_regs(void) ({ (struct pt_regs *) \
+ ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
+})
+
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->ARM_r0 = rc;
+}
+
+/*
+ * Update ITSTATE after normal execution of an IT block instruction.
+ *
+ * The 8 IT state bits are split into two parts in CPSR:
+ * ITSTATE<1:0> are in CPSR<26:25>
+ * ITSTATE<7:2> are in CPSR<15:10>
+ */
+static inline unsigned long it_advance(unsigned long cpsr)
+{
+ if ((cpsr & 0x06000400) == 0) {
+ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+ cpsr &= ~PSR_IT_MASK;
+ } else {
+ /* We need to shift left ITSTATE<4:0> */
+ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
+ unsigned long it = cpsr & mask;
+ it <<= 1;
+ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
+ it &= mask;
+ cpsr &= ~mask;
+ cpsr |= it;
+ }
+ return cpsr;
+}
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h
new file mode 100644
index 0000000000..e9ad0f37d2
--- /dev/null
+++ b/arch/arm/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_ARM
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "arm"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
new file mode 100644
index 0000000000..700b8bcdf9
--- /dev/null
+++ b/arch/arm/include/asm/sections.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_SECTIONS_H
+#define _ASM_ARM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _exiprom[];
+
+extern char __idmap_text_start[];
+extern char __idmap_text_end[];
+extern char __entry_text_start[];
+extern char __entry_text_end[];
+
+static inline bool in_entry_text(unsigned long addr)
+{
+ return memory_contains(__entry_text_start, __entry_text_end,
+ (void *)addr, 1);
+}
+
+static inline bool in_idmap_text(unsigned long addr)
+{
+ void *a = (void *)addr;
+ return memory_contains(__idmap_text_start, __idmap_text_end, a, 1);
+}
+
+#endif /* _ASM_ARM_SECTIONS_H */
diff --git a/arch/arm/include/asm/secure_cntvoff.h b/arch/arm/include/asm/secure_cntvoff.h
new file mode 100644
index 0000000000..1f93aee1f6
--- /dev/null
+++ b/arch/arm/include/asm/secure_cntvoff.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASMARM_ARCH_CNTVOFF_H
+#define __ASMARM_ARCH_CNTVOFF_H
+
+extern void secure_cntvoff_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/semihost.h b/arch/arm/include/asm/semihost.h
new file mode 100644
index 0000000000..f365787e7c
--- /dev/null
+++ b/arch/arm/include/asm/semihost.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Adapted for ARM and earlycon:
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Rob Herring <robh@kernel.org>
+ */
+
+#ifndef _ARM_SEMIHOST_H_
+#define _ARM_SEMIHOST_H_
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEMIHOST_SWI "0xab"
+#else
+#define SEMIHOST_SWI "0x123456"
+#endif
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("mov r1, %0\n"
+ "mov r0, #3\n"
+ "svc " SEMIHOST_SWI "\n"
+ : : "r" (&c) : "r0", "r1", "memory");
+}
+
+#endif /* _ARM_SEMIHOST_H_ */
diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h
new file mode 100644
index 0000000000..0211b9c5b1
--- /dev/null
+++ b/arch/arm/include/asm/set_memory.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 1999-2002 Russell King
+ */
+
+#ifndef _ASMARM_SET_MEMORY_H
+#define _ASMARM_SET_MEMORY_H
+
+#ifdef CONFIG_MMU
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, int numpages, int enable);
+#else
+static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
new file mode 100644
index 0000000000..546af8b1e3
--- /dev/null
+++ b/arch/arm/include/asm/setup.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/include/asm/setup.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ *
+ * Structure passed to kernel to tell it about the
+ * hardware it's running on. See Documentation/arch/arm/setup.rst
+ * for more info.
+ */
+#ifndef __ASMARM_SETUP_H
+#define __ASMARM_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+
+#define __tag __used __section(".taglist.init")
+#define __tagtable(tag, fn) \
+static const struct tagtable __tagtable_##fn __tag = { tag, fn }
+
+extern int arm_add_memory(u64 start, u64 size);
+extern __printf(1, 2) void early_print(const char *str, ...);
+extern void dump_machine_table(void);
+
+#ifdef CONFIG_ATAGS_PROC
+extern void save_atags(const struct tag *tags);
+#else
+static inline void save_atags(const struct tag *tags) { }
+#endif
+
+struct machine_desc;
+void init_default_cache_policy(unsigned long);
+void paging_init(const struct machine_desc *desc);
+void early_mm_init(const struct machine_desc *);
+void adjust_lowmem_bounds(void);
+void setup_dma_zone(const struct machine_desc *desc);
+
+#endif
diff --git a/arch/arm/include/asm/shmparam.h b/arch/arm/include/asm/shmparam.h
new file mode 100644
index 0000000000..367a9dac61
--- /dev/null
+++ b/arch/arm/include/asm/shmparam.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASMARM_SHMPARAM_H
+#define _ASMARM_SHMPARAM_H
+
+/*
+ * This should be the size of the virtually indexed cache/ways,
+ * or page size, whichever is greater since the cache aliases
+ * every size/ways bytes.
+ */
+#define SHMLBA (4 * PAGE_SIZE) /* attach addr a multiple of this */
+
+/*
+ * Enforce SHMLBA in shmat
+ */
+#define __ARCH_FORCE_SHMLBA
+
+#endif /* _ASMARM_SHMPARAM_H */
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
new file mode 100644
index 0000000000..8b84092d15
--- /dev/null
+++ b/arch/arm/include/asm/signal.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASMARM_SIGNAL_H
+#define _ASMARM_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define __ARCH_UAPI_SA_FLAGS (SA_THIRTYTWO | SA_RESTORER)
+
+#define __ARCH_HAS_SA_RESTORER
+
+#include <asm/sigcontext.h>
+
+void do_rseq_syscall(struct pt_regs *regs);
+int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+ int syscall);
+
+#endif
diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h
new file mode 100644
index 0000000000..82191dbd7e
--- /dev/null
+++ b/arch/arm/include/asm/simd.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/hardirq.h>
+
+static __must_check inline bool may_use_simd(void)
+{
+ return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq();
+}
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
new file mode 100644
index 0000000000..8c05a7f374
--- /dev/null
+++ b/arch/arm/include/asm/smp.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/smp.h
+ *
+ * Copyright (C) 2004-2005 ARM Ltd.
+ */
+#ifndef __ASM_ARM_SMP_H
+#define __ASM_ARM_SMP_H
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/thread_info.h>
+
+#ifndef CONFIG_SMP
+# error "<asm/smp.h> included in non-SMP build"
+#endif
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+struct seq_file;
+
+/*
+ * generate IPI list text
+ */
+extern void show_ipi_list(struct seq_file *, int);
+
+/*
+ * Called from C code, this handles an IPI.
+ */
+void handle_IPI(int ipinr, struct pt_regs *regs);
+
+/*
+ * Setup the set of possible CPUs (via set_cpu_possible)
+ */
+extern void smp_init_cpus(void);
+
+/*
+ * Register IPI interrupts with the arch SMP code
+ */
+extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
+
+/*
+ * Called from platform specific assembly code, this is the
+ * secondary CPU entry point.
+ */
+asmlinkage void secondary_start_kernel(struct task_struct *task);
+
+
+/*
+ * Initial data for bringing up a secondary CPU.
+ */
+struct secondary_data {
+ union {
+ struct mpu_rgn_info *mpu_rgn_info;
+ u64 pgdir;
+ };
+ unsigned long swapper_pg_dir;
+ void *stack;
+ struct task_struct *task;
+};
+extern struct secondary_data secondary_data;
+extern void secondary_startup(void);
+extern void secondary_startup_arm(void);
+
+extern int __cpu_disable(void);
+
+static inline void __cpu_die(unsigned int cpu) { }
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
+struct smp_operations {
+#ifdef CONFIG_SMP
+ /*
+ * Setup the set of possible CPUs (via set_cpu_possible)
+ */
+ void (*smp_init_cpus)(void);
+ /*
+ * Initialize cpu_possible map, and enable coherency
+ */
+ void (*smp_prepare_cpus)(unsigned int max_cpus);
+
+ /*
+ * Perform platform specific initialisation of the specified CPU.
+ */
+ void (*smp_secondary_init)(unsigned int cpu);
+ /*
+ * Boot a secondary CPU, and assign it the specified idle task.
+ * This also gives us the initial stack to use for this CPU.
+ */
+ int (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_kill)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+ bool (*cpu_can_disable)(unsigned int cpu);
+ int (*cpu_disable)(unsigned int cpu);
+#endif
+#endif
+};
+
+struct of_cpu_method {
+ const char *method;
+ const struct smp_operations *ops;
+};
+
+#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpu_method __cpu_method_of_table_##name \
+ __used __section("__cpu_method_of_table") \
+ = { .method = _method, .ops = _ops }
+/*
+ * set platform specific SMP operations
+ */
+extern void smp_set_ops(const struct smp_operations *);
+
+#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
new file mode 100644
index 0000000000..f2c36acf98
--- /dev/null
+++ b/arch/arm/include/asm/smp_plat.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM specific SMP header, this contains our implementation
+ * details.
+ */
+#ifndef __ASMARM_SMP_PLAT_H
+#define __ASMARM_SMP_PLAT_H
+
+#include <linux/cpumask.h>
+#include <linux/err.h>
+
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+
+/*
+ * Return true if we are running on a SMP platform
+ */
+static inline bool is_smp(void)
+{
+#ifndef CONFIG_SMP
+ return false;
+#elif defined(CONFIG_SMP_ON_UP)
+ extern unsigned int smp_on_up;
+ return !!smp_on_up;
+#else
+ return true;
+#endif
+}
+
+/**
+ * smp_cpuid_part() - return part id for a given cpu
+ * @cpu: logical cpu id.
+ *
+ * Return: part id of logical cpu passed as argument.
+ */
+static inline unsigned int smp_cpuid_part(int cpu)
+{
+ struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
+
+ return is_smp() ? cpu_info->cpuid & ARM_CPU_PART_MASK :
+ read_cpuid_part();
+}
+
+/* all SMP configurations have the extended CPUID registers */
+#ifndef CONFIG_MMU
+#define tlb_ops_need_broadcast() 0
+#else
+static inline int tlb_ops_need_broadcast(void)
+{
+ if (!is_smp())
+ return 0;
+
+ return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
+}
+#endif
+
+#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
+#define cache_ops_need_broadcast() 0
+#else
+static inline int cache_ops_need_broadcast(void)
+{
+ if (!is_smp())
+ return 0;
+
+ return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
+}
+#endif
+
+/*
+ * Logical CPU mapping.
+ */
+extern u32 __cpu_logical_map[];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+/*
+ * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
+ * - mpidr: MPIDR[23:0] to be used for the look-up
+ *
+ * Returns the cpu logical index or -EINVAL on look-up error
+ */
+static inline int get_logical_index(u32 mpidr)
+{
+ int cpu;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (cpu_logical_map(cpu) == mpidr)
+ return cpu;
+ return -EINVAL;
+}
+
+/*
+ * NOTE ! Assembly code relies on the following
+ * structure memory layout in order to carry out load
+ * multiple from its base address. For more
+ * information check arch/arm/kernel/sleep.S
+ */
+struct mpidr_hash {
+ u32 mask; /* used by sleep.S */
+ u32 shift_aff[3]; /* used by sleep.S */
+ u32 bits;
+};
+
+extern struct mpidr_hash mpidr_hash;
+
+static inline u32 mpidr_hash_size(void)
+{
+ return 1 << mpidr_hash.bits;
+}
+
+extern int platform_can_secondary_boot(void);
+extern int platform_can_cpu_hotplug(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
new file mode 100644
index 0000000000..b818e5d0cd
--- /dev/null
+++ b/arch/arm/include/asm/smp_scu.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMARM_ARCH_SCU_H
+#define __ASMARM_ARCH_SCU_H
+
+#define SCU_PM_NORMAL 0
+#define SCU_PM_DORMANT 2
+#define SCU_PM_POWEROFF 3
+
+#ifndef __ASSEMBLER__
+
+#include <linux/errno.h>
+#include <asm/cputype.h>
+
+static inline bool scu_a9_has_base(void)
+{
+ return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
+}
+
+static inline unsigned long scu_a9_get_base(void)
+{
+ unsigned long pa;
+
+ asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (pa));
+
+ return pa;
+}
+
+#ifdef CONFIG_HAVE_ARM_SCU
+unsigned int scu_get_core_count(void __iomem *);
+int scu_power_mode(void __iomem *, unsigned int);
+int scu_cpu_power_enable(void __iomem *, unsigned int);
+int scu_get_cpu_power_mode(void __iomem *scu_base, unsigned int logical_cpu);
+#else
+static inline unsigned int scu_get_core_count(void __iomem *scu_base)
+{
+ return 0;
+}
+static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode)
+{
+ return -EINVAL;
+}
+static inline int scu_cpu_power_enable(void __iomem *scu_base,
+ unsigned int mode)
+{
+ return -EINVAL;
+}
+static inline int scu_get_cpu_power_mode(void __iomem *scu_base,
+ unsigned int logical_cpu)
+{
+ return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU)
+void scu_enable(void __iomem *scu_base);
+#else
+static inline void scu_enable(void __iomem *scu_base) {}
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
new file mode 100644
index 0000000000..c729d2113a
--- /dev/null
+++ b/arch/arm/include/asm/smp_twd.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMARM_SMP_TWD_H
+#define __ASMARM_SMP_TWD_H
+
+#define TWD_TIMER_LOAD 0x00
+#define TWD_TIMER_COUNTER 0x04
+#define TWD_TIMER_CONTROL 0x08
+#define TWD_TIMER_INTSTAT 0x0C
+
+#define TWD_WDOG_LOAD 0x20
+#define TWD_WDOG_COUNTER 0x24
+#define TWD_WDOG_CONTROL 0x28
+#define TWD_WDOG_INTSTAT 0x2C
+#define TWD_WDOG_RESETSTAT 0x30
+#define TWD_WDOG_DISABLE 0x34
+
+#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
+#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
+#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
+#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
+
+#endif
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
new file mode 100644
index 0000000000..421e341533
--- /dev/null
+++ b/arch/arm/include/asm/sparsemem.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASMARM_SPARSEMEM_H
+#define ASMARM_SPARSEMEM_H
+
+#include <asm/page.h>
+
+/*
+ * Two definitions are required for sparsemem:
+ *
+ * MAX_PHYSMEM_BITS: The number of physical address bits required
+ * to address the last byte of memory.
+ *
+ * SECTION_SIZE_BITS: The number of physical address bits to cover
+ * the maximum amount of memory in a section.
+ *
+ * Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000,
+ * then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26.
+ *
+ * These can be overridden in your mach/memory.h.
+ */
+#if !defined(MAX_PHYSMEM_BITS) || !defined(SECTION_SIZE_BITS)
+#define MAX_PHYSMEM_BITS 36
+#define SECTION_SIZE_BITS 28
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
new file mode 100644
index 0000000000..d9c28b3b6b
--- /dev/null
+++ b/arch/arm/include/asm/spectre.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+enum {
+ SPECTRE_UNAFFECTED,
+ SPECTRE_MITIGATED,
+ SPECTRE_VULNERABLE,
+};
+
+enum {
+ __SPECTRE_V2_METHOD_BPIALL,
+ __SPECTRE_V2_METHOD_ICIALLU,
+ __SPECTRE_V2_METHOD_SMC,
+ __SPECTRE_V2_METHOD_HVC,
+ __SPECTRE_V2_METHOD_LOOP8,
+};
+
+enum {
+ SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
+ SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+ SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+ SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+ SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+};
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+void spectre_v2_update_state(unsigned int state, unsigned int methods);
+#else
+static inline void spectre_v2_update_state(unsigned int state,
+ unsigned int methods)
+{}
+#endif
+
+int spectre_bhb_update_vectors(unsigned int method);
+
+void cpu_v7_ca8_ibe(void);
+void cpu_v7_ca15_ibe(void);
+void cpu_v7_bugs_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
new file mode 100644
index 0000000000..f610a773f2
--- /dev/null
+++ b/arch/arm/include/asm/spinlock.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#if __LINUX_ARM_ARCH__ < 6
+#error SMP not supported on pre-ARMv6 CPUs
+#endif
+
+#include <linux/prefetch.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+/*
+ * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
+ * extensions, so when running on UP, we have to patch these instructions away.
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+/*
+ * For Thumb-2, special care is needed to ensure that the conditional WFE
+ * instruction really does assemble to exactly 4 bytes (as required by
+ * the SMP_ON_UP fixup code). By itself "wfene" might cause the
+ * assembler to insert a extra (16-bit) IT instruction, depending on the
+ * presence or absence of neighbouring conditional instructions.
+ *
+ * To avoid this unpredictability, an appropriate IT is inserted explicitly:
+ * the assembler won't change IT instructions which are explicitly present
+ * in the input.
+ */
+#define WFE(cond) __ALT_SMP_ASM( \
+ "it " cond "\n\t" \
+ "wfe" cond ".n", \
+ \
+ "nop.w" \
+)
+#else
+#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
+#endif
+
+#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
+static inline void dsb_sev(void)
+{
+
+ dsb(ishst);
+ __asm__(SEV);
+}
+
+/*
+ * ARMv6 ticket-based spin-locking.
+ *
+ * A memory barrier is required after we get a lock, and before we
+ * release it, because V6 CPUs are assumed to have weakly ordered
+ * memory.
+ */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+ u32 newval;
+ arch_spinlock_t lockval;
+
+ prefetchw(&lock->slock);
+ __asm__ __volatile__(
+"1: ldrex %0, [%3]\n"
+" add %1, %0, %4\n"
+" strex %2, %1, [%3]\n"
+" teq %2, #0\n"
+" bne 1b"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");
+
+ while (lockval.tickets.next != lockval.tickets.owner) {
+ wfe();
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
+ }
+
+ smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long contended, res;
+ u32 slock;
+
+ prefetchw(&lock->slock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%3]\n"
+ " mov %2, #0\n"
+ " subs %1, %0, %0, ror #16\n"
+ " addeq %0, %0, %4\n"
+ " strexeq %2, %0, [%3]"
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+ lock->tickets.owner++;
+ dsb_sev();
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ struct __raw_tickets tickets = READ_ONCE(lock->tickets);
+ return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31. When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ prefetchw(&rw->lock);
+ __asm__ __volatile__(
+"1: ldrex %0, [%1]\n"
+" teq %0, #0\n"
+ WFE("ne")
+" strexeq %0, %2, [%1]\n"
+" teq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long contended, res;
+
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ smp_mb();
+
+ __asm__ __volatile__(
+ "str %1, [%0]\n"
+ :
+ : "r" (&rw->lock), "r" (0)
+ : "cc");
+
+ dsb_sev();
+}
+
+/*
+ * Read locks are a bit more hairy:
+ * - Exclusively load the lock value.
+ * - Increment it.
+ * - Store new lock value if positive, and we still own this location.
+ * If the value is negative, we've already failed.
+ * - If we failed to store the value, we want a negative result.
+ * - If we failed, try again.
+ * Unlocking is similarly hairy. We may have multiple read locks
+ * currently active. However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp, tmp2;
+
+ prefetchw(&rw->lock);
+ __asm__ __volatile__(
+" .syntax unified\n"
+"1: ldrex %0, [%2]\n"
+" adds %0, %0, #1\n"
+" strexpl %1, %0, [%2]\n"
+ WFE("mi")
+" rsbspl %0, %1, #0\n"
+" bmi 1b"
+ : "=&r" (tmp), "=&r" (tmp2)
+ : "r" (&rw->lock)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp, tmp2;
+
+ smp_mb();
+
+ prefetchw(&rw->lock);
+ __asm__ __volatile__(
+"1: ldrex %0, [%2]\n"
+" sub %0, %0, #1\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (tmp), "=&r" (tmp2)
+ : "r" (&rw->lock)
+ : "cc");
+
+ if (tmp == 0)
+ dsb_sev();
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long contended, res;
+
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
+
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
new file mode 100644
index 0000000000..0c14b36ef1
--- /dev/null
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "please don't include this file directly"
+#endif
+
+#define TICKET_SHIFT 16
+
+typedef struct {
+ union {
+ u32 slock;
+ struct __raw_tickets {
+#ifdef __ARMEB__
+ u16 next;
+ u16 owner;
+#else
+ u16 owner;
+ u16 next;
+#endif
+ } tickets;
+ };
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
+
+typedef struct {
+ u32 lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h
new file mode 100644
index 0000000000..0bd4979759
--- /dev/null
+++ b/arch/arm/include/asm/stackprotector.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on ARM. This prevents SMP systems from using a
+ * different value for each task unless we enable a GCC plugin that
+ * replaces these symbol references with references to each task's own
+ * value.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <asm/thread_info.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary = get_random_canary();
+
+ current->stack_canary = canary;
+#ifndef CONFIG_STACKPROTECTOR_PER_TASK
+ __stack_chk_guard = current->stack_canary;
+#endif
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
new file mode 100644
index 0000000000..360f0d2406
--- /dev/null
+++ b/arch/arm/include/asm/stacktrace.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <asm/ptrace.h>
+#include <linux/llist.h>
+
+struct stackframe {
+ /*
+ * FP member should hold R7 when CONFIG_THUMB2_KERNEL is enabled
+ * and R11 otherwise.
+ */
+ unsigned long fp;
+ unsigned long sp;
+ unsigned long lr;
+ unsigned long pc;
+
+ /* address of the LR value on the stack */
+ unsigned long *lr_addr;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+ struct task_struct *tsk;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ bool ex_frame;
+#endif
+};
+
+static __always_inline
+void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
+{
+ frame->fp = frame_pointer(regs);
+ frame->sp = regs->ARM_sp;
+ frame->lr = regs->ARM_lr;
+ frame->pc = regs->ARM_pc;
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+ frame->tsk = current;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ frame->ex_frame = in_entry_text(frame->pc);
+#endif
+}
+
+extern int unwind_frame(struct stackframe *frame);
+extern void walk_stackframe(struct stackframe *frame,
+ bool (*fn)(void *, unsigned long), void *data);
+extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
new file mode 100644
index 0000000000..6c607c68f3
--- /dev/null
+++ b/arch/arm/include/asm/string.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_STRING_H
+#define __ASM_ARM_STRING_H
+
+/*
+ * We don't do inline string functions, since the
+ * optimised inline asm versions are not small.
+ *
+ * The __underscore versions of some functions are for KASan to be able
+ * to replace them with instrumented versions.
+ */
+
+#define __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char * s, int c);
+
+#define __HAVE_ARCH_STRCHR
+extern char * strchr(const char * s, int c);
+
+#define __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
+
+#define __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void * memset(void *, int, __kernel_size_t);
+extern void *__memset(void *s, int c, __kernel_size_t n);
+
+#define __HAVE_ARCH_MEMSET32
+extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
+static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+{
+ return __memset32(p, v, n * 4);
+}
+
+#define __HAVE_ARCH_MEMSET64
+extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi);
+static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+{
+ return __memset64(p, v, n * 8, v >> 32);
+}
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * must use non-instrumented versions of the mem*
+ * functions named __memcpy() etc. All such kernel code has
+ * been tagged with KASAN_SANITIZE_file.o = n, which means
+ * that the address sanitization argument isn't passed to the
+ * compiler, and __SANITIZE_ADDRESS__ is not set. As a result
+ * these defines kick in.
+ */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
new file mode 100644
index 0000000000..be81b9ca2e
--- /dev/null
+++ b/arch/arm/include/asm/suspend.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_SUSPEND_H
+#define __ASM_ARM_SUSPEND_H
+
+#include <linux/types.h>
+
+struct sleep_save_sp {
+ u32 *save_ptr_stash;
+ u32 save_ptr_stash_phys;
+};
+
+extern void cpu_resume(void);
+extern void cpu_resume_no_hyp(void);
+extern void cpu_resume_arm(void);
+extern int cpu_suspend(unsigned long, int (*)(unsigned long));
+extern void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr);
+
+#endif
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
new file mode 100644
index 0000000000..c605182304
--- /dev/null
+++ b/arch/arm/include/asm/swab.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/byteorder.h
+ *
+ * ARM Endian-ness. In little endian mode, the data bus is connected such
+ * that byte accesses appear as:
+ * 0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31
+ * and word accesses (data or instruction) appear as:
+ * d0...d31
+ *
+ * When in big endian mode, byte accesses appear as:
+ * 0 = d24...d31, 1 = d16...d23, 2 = d8...d15, 3 = d0...d7
+ * and word accesses (data or instruction) appear as:
+ * d0...d31
+ */
+#ifndef __ASM_ARM_SWAB_H
+#define __ASM_ARM_SWAB_H
+
+#include <uapi/asm/swab.h>
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+static inline __attribute_const__ __u32 __arch_swahb32(__u32 x)
+{
+ __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+#define __arch_swahb32 __arch_swahb32
+#define __arch_swab16(x) ((__u16)__arch_swahb32(x))
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ __asm__ ("rev %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
new file mode 100644
index 0000000000..9372348516
--- /dev/null
+++ b/arch/arm/include/asm/switch_to.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_SWITCH_TO_H
+#define __ASM_ARM_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+#include <asm/smp_plat.h>
+
+/*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+ * to ensure that the maintenance completes in case we migrate to another
+ * CPU.
+ */
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#define __complete_pending_tlbi() dsb(ish)
+#else
+#define __complete_pending_tlbi()
+#endif
+
+/*
+ * switch_to(prev, next) should switch from task `prev' to `next'
+ * `prev' will never be the same as `next'. schedule() itself
+ * contains the memory barrier to tell GCC not to cache `current'.
+ */
+extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
+
+#define switch_to(prev,next,last) \
+do { \
+ __complete_pending_tlbi(); \
+ if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp()) \
+ __this_cpu_write(__entry_task, next); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
+} while (0)
+
+#endif /* __ASM_ARM_SWITCH_TO_H */
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
new file mode 100644
index 0000000000..f46b3c570f
--- /dev/null
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SYNC_BITOPS_H__
+#define __ASM_SYNC_BITOPS_H__
+
+#include <asm/bitops.h>
+
+/* sync_bitops functions are equivalent to the SMP implementation of the
+ * original functions, independently from CONFIG_SMP being defined.
+ *
+ * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
+ * under Xen you might be communicating with a completely external entity
+ * who might be on another CPU (e.g. two uniprocessor guests communicating
+ * via event channels and grant tables). So we need a variant of the bit
+ * ops which are SMP safe even on a UP kernel.
+ */
+
+/*
+ * Unordered
+ */
+
+#define sync_set_bit(nr, p) _set_bit(nr, p)
+#define sync_clear_bit(nr, p) _clear_bit(nr, p)
+#define sync_change_bit(nr, p) _change_bit(nr, p)
+#define sync_test_bit(nr, addr) test_bit(nr, addr)
+
+/*
+ * Fully ordered
+ */
+
+int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
+
+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
+
+int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
+
+#define arch_sync_cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __smp_mb__before_atomic(); \
+ __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
+ __smp_mb__after_atomic(); \
+ __ret; \
+})
+
+#endif
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
new file mode 100644
index 0000000000..fe4326d938
--- /dev/null
+++ b/arch/arm/include/asm/syscall.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Access to user system call parameters and results
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_ARM_SYSCALL_H
+#define _ASM_ARM_SYSCALL_H
+
+#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */
+#include <linux/elf.h> /* for ELF_EM */
+#include <linux/err.h>
+#include <linux/sched.h>
+
+#include <asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
+
+extern const unsigned long sys_call_table[];
+
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
+ return task_thread_info(task)->abi_syscall;
+
+ if (task_thread_info(task)->abi_syscall == -1)
+ return -1;
+
+ return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
+}
+
+static inline bool __in_oabi_syscall(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_OABI_COMPAT) &&
+ (task_thread_info(task)->abi_syscall & __NR_OABI_SYSCALL_BASE);
+}
+
+static inline bool in_oabi_syscall(void)
+{
+ return __in_oabi_syscall(current);
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->ARM_r0;
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->ARM_r0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->ARM_r0 = (long) error ? error : val;
+}
+
+#define SYSCALL_MAX_ARGS 7
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->ARM_ORIG_r0;
+ args++;
+
+ memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ /* ARM tasks don't change audit architectures on the fly. */
+ return AUDIT_ARCH_ARM;
+}
+
+#endif /* _ASM_ARM_SYSCALL_H */
diff --git a/arch/arm/include/asm/syscalls.h b/arch/arm/include/asm/syscalls.h
new file mode 100644
index 0000000000..5912e7cffa
--- /dev/null
+++ b/arch/arm/include/asm/syscalls.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_SYSCALLS_H
+#define __ASM_SYSCALLS_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+asmlinkage int sys_sigreturn(struct pt_regs *regs);
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
+asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
+ loff_t offset, loff_t len);
+
+struct oldabi_stat64;
+asmlinkage long sys_oabi_stat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_lstat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstat64(unsigned long fd,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstatat64(int dfd,
+ const char __user *filename,
+ struct oldabi_stat64 __user *statbuf,
+ int flag);
+asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+struct oabi_epoll_event;
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event);
+struct oabi_sembuf;
+struct old_timespec32;
+asmlinkage long sys_oabi_semtimedop(int semid,
+ struct oabi_sembuf __user *tsops,
+ unsigned nsops,
+ const struct old_timespec32 __user *timeout);
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+ unsigned nsops);
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth);
+struct sockaddr;
+asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
+ size_t len, unsigned flags,
+ struct sockaddr __user *addr,
+ int addrlen);
+struct user_msghdr;
+asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args);
+
+#endif
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h
new file mode 100644
index 0000000000..ef7fdb588b
--- /dev/null
+++ b/arch/arm/include/asm/system_info.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_SYSTEM_INFO_H
+#define __ASM_ARM_SYSTEM_INFO_H
+
+#define CPU_ARCH_UNKNOWN 0
+#define CPU_ARCH_ARMv3 1
+#define CPU_ARCH_ARMv4 2
+#define CPU_ARCH_ARMv4T 3
+#define CPU_ARCH_ARMv5 4
+#define CPU_ARCH_ARMv5T 5
+#define CPU_ARCH_ARMv5TE 6
+#define CPU_ARCH_ARMv5TEJ 7
+#define CPU_ARCH_ARMv6 8
+#define CPU_ARCH_ARMv7 9
+#define CPU_ARCH_ARMv7M 10
+
+#ifndef __ASSEMBLY__
+
+/* information about the system we're running on */
+extern unsigned int system_rev;
+extern const char *system_serial;
+extern unsigned int system_serial_low;
+extern unsigned int system_serial_high;
+extern unsigned int mem_fclk_21285;
+
+extern int __pure cpu_architecture(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_ARM_SYSTEM_INFO_H */
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
new file mode 100644
index 0000000000..98b3734037
--- /dev/null
+++ b/arch/arm/include/asm/system_misc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_SYSTEM_MISC_H
+#define __ASM_ARM_SYSTEM_MISC_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
+#include <linux/reboot.h>
+#include <linux/percpu.h>
+
+extern void cpu_init(void);
+
+void soft_restart(unsigned long);
+extern void (*arm_pm_idle)(void);
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+typedef void (*harden_branch_predictor_fn_t)(void);
+DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+static inline void harden_branch_predictor(void)
+{
+ harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
+ smp_processor_id());
+ if (fn)
+ fn();
+}
+#else
+#define harden_branch_predictor() do { } while (0)
+#endif
+
+#define UDBG_UNDEFINED (1 << 0)
+#define UDBG_SYSCALL (1 << 1)
+#define UDBG_BADABORT (1 << 2)
+#define UDBG_SEGV (1 << 3)
+#define UDBG_BUS (1 << 4)
+
+extern unsigned int user_debug;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
new file mode 100644
index 0000000000..e1f7dca86a
--- /dev/null
+++ b/arch/arm/include/asm/tcm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ *
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#ifndef __ASMARM_TCM_H
+#define __ASMARM_TCM_H
+
+#ifdef CONFIG_HAVE_TCM
+
+#include <linux/compiler.h>
+
+/* Tag variables with this */
+#define __tcmdata __section(".tcm.data")
+/* Tag constants with this */
+#define __tcmconst __section(".tcm.rodata")
+/* Tag functions inside TCM called from outside TCM with this */
+#define __tcmfunc __attribute__((long_call)) __section(".tcm.text") noinline
+/* Tag function inside TCM called from inside TCM with this */
+#define __tcmlocalfunc __section(".tcm.text")
+
+void *tcm_alloc(size_t len);
+void tcm_free(void *addr, size_t len);
+bool tcm_dtcm_present(void);
+bool tcm_itcm_present(void);
+
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+static inline void tcm_init(void)
+{
+}
+#endif
+#endif
diff --git a/arch/arm/include/asm/therm.h b/arch/arm/include/asm/therm.h
new file mode 100644
index 0000000000..17b0bc9b5e
--- /dev/null
+++ b/arch/arm/include/asm/therm.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/therm.h: Definitions for Dallas Semiconductor
+ * DS1620 thermometer driver (as used in the Rebel.com NetWinder)
+ */
+#ifndef __ASM_THERM_H
+#define __ASM_THERM_H
+
+/* ioctl numbers for /dev/therm */
+#define CMD_SET_THERMOSTATE 0x53
+#define CMD_GET_THERMOSTATE 0x54
+#define CMD_GET_STATUS 0x56
+#define CMD_GET_TEMPERATURE 0x57
+#define CMD_SET_THERMOSTATE2 0x58
+#define CMD_GET_THERMOSTATE2 0x59
+#define CMD_GET_TEMPERATURE2 0x5a
+#define CMD_GET_FAN 0x5b
+#define CMD_SET_FAN 0x5c
+
+#define FAN_OFF 0
+#define FAN_ON 1
+#define FAN_ALWAYS_ON 2
+
+struct therm {
+ int hi;
+ int lo;
+};
+
+#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
new file mode 100644
index 0000000000..943ffcf069
--- /dev/null
+++ b/arch/arm/include/asm/thread_info.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/thread_info.h
+ *
+ * Copyright (C) 2002 Russell King.
+ */
+#ifndef __ASM_ARM_THREAD_INFO_H
+#define __ASM_ARM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <asm/fpstate.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_KASAN
+/*
+ * KASan uses a lot of extra stack space so the thread size order needs to
+ * be increased.
+ */
+#define THREAD_SIZE_ORDER 2
+#else
+#define THREAD_SIZE_ORDER 1
+#endif
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_START_SP (THREAD_SIZE - 8)
+
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define OVERFLOW_STACK_SIZE SZ_4K
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+DECLARE_PER_CPU(struct task_struct *, __entry_task);
+
+#include <asm/types.h>
+#include <asm/traps.h>
+
+struct cpu_context_save {
+ __u32 r4;
+ __u32 r5;
+ __u32 r6;
+ __u32 r7;
+ __u32 r8;
+ __u32 r9;
+ __u32 sl;
+ __u32 fp;
+ __u32 sp;
+ __u32 pc;
+ __u32 extra[2]; /* Xscale 'acc' register, etc */
+};
+
+/*
+ * low level task data that entry.S needs immediate access to.
+ * __switch_to() assumes cpu_context follows immediately after cpu_domain.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
+ __u32 cpu; /* cpu */
+ __u32 cpu_domain; /* cpu domain */
+ struct cpu_context_save cpu_context; /* cpu context */
+ __u32 abi_syscall; /* ABI type and syscall nr */
+ unsigned long tp_value[2]; /* TLS registers */
+ union fp_state fpstate __attribute__((aligned(8)));
+ union vfp_state vfpstate;
+#ifdef CONFIG_ARM_THUMBEE
+ unsigned long thumbee_state; /* ThumbEE Handler Base register */
+#endif
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+static inline struct task_struct *thread_task(struct thread_info* ti)
+{
+ return (struct task_struct *)ti;
+}
+
+#define thread_saved_pc(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
+#define thread_saved_sp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+
+#ifndef CONFIG_THUMB2_KERNEL
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+#else
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
+#endif
+
+extern void iwmmxt_task_disable(struct thread_info *);
+extern void iwmmxt_task_copy(struct thread_info *, void *);
+extern void iwmmxt_task_restore(struct thread_info *, void *);
+extern void iwmmxt_task_release(struct thread_info *);
+extern void iwmmxt_task_switch(struct thread_info *);
+
+extern int iwmmxt_undef_handler(struct pt_regs *, u32);
+
+static inline void register_iwmmxt_undef_handler(void)
+{
+ static struct undef_hook iwmmxt_undef_hook = {
+ .instr_mask = 0x0c000e00,
+ .instr_val = 0x0c000000,
+ .cpsr_mask = MODE_MASK | PSR_T_BIT,
+ .cpsr_val = USR_MODE,
+ .fn = iwmmxt_undef_handler,
+ };
+
+ register_undef_hook(&iwmmxt_undef_hook);
+}
+
+extern void vfp_sync_hwstate(struct thread_info *);
+extern void vfp_flush_hwstate(struct thread_info *);
+
+struct user_vfp;
+struct user_vfp_exc;
+
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
+extern int vfp_restore_user_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
+#endif
+
+/*
+ * thread information flags:
+ * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
+ * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
+ *
+ * Any bit in the range of 0..15 will cause do_work_pending() to be invoked.
+ */
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
+#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_UPROBE 3 /* breakpointed or singlestepping */
+#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
+
+#define TIF_USING_IWMMXT 17
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 19
+#define TIF_SYSCALL_TRACE 20 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 23 /* seccomp syscall filtering active */
+
+
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
+
+/* Checks for any syscall work in entry-common.S */
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+
+/*
+ * Change these and you break ASM code in entry-common.S
+ */
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_SIGNAL)
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
new file mode 100644
index 0000000000..1c1542e2ed
--- /dev/null
+++ b/arch/arm/include/asm/thread_notify.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/thread_notify.h
+ *
+ * Copyright (C) 2006 Russell King.
+ */
+#ifndef ASMARM_THREAD_NOTIFY_H
+#define ASMARM_THREAD_NOTIFY_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/notifier.h>
+#include <asm/thread_info.h>
+
+static inline int thread_register_notifier(struct notifier_block *n)
+{
+ extern struct atomic_notifier_head thread_notify_head;
+ return atomic_notifier_chain_register(&thread_notify_head, n);
+}
+
+static inline void thread_unregister_notifier(struct notifier_block *n)
+{
+ extern struct atomic_notifier_head thread_notify_head;
+ atomic_notifier_chain_unregister(&thread_notify_head, n);
+}
+
+static inline void thread_notify(unsigned long rc, struct thread_info *thread)
+{
+ extern struct atomic_notifier_head thread_notify_head;
+ atomic_notifier_call_chain(&thread_notify_head, rc, thread);
+}
+
+#endif
+
+/*
+ * These are the reason codes for the thread notifier.
+ */
+#define THREAD_NOTIFY_FLUSH 0
+#define THREAD_NOTIFY_EXIT 1
+#define THREAD_NOTIFY_SWITCH 2
+#define THREAD_NOTIFY_COPY 3
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
new file mode 100644
index 0000000000..6d1337c169
--- /dev/null
+++ b/arch/arm/include/asm/timex.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/timex.h
+ *
+ * Copyright (C) 1997,1998 Russell King
+ *
+ * Architecture Specific TIME specifications
+ */
+#ifndef _ASMARM_TIMEX_H
+#define _ASMARM_TIMEX_H
+
+typedef unsigned long cycles_t;
+#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
+
+#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
new file mode 100644
index 0000000000..f40d06ad5d
--- /dev/null
+++ b/arch/arm/include/asm/tlb.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/tlb.h
+ *
+ * Copyright (C) 2002 Russell King
+ *
+ * Experimentation shows that on a StrongARM, it appears to be faster
+ * to use the "invalidate whole tlb" rather than "invalidate single
+ * tlb" for this.
+ *
+ * This appears true for both the process fork+exit case, as well as
+ * the munmap-large-area case.
+ */
+#ifndef __ASMARM_TLB_H
+#define __ASMARM_TLB_H
+
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_MMU
+
+#include <linux/pagemap.h>
+
+#define tlb_flush(tlb) ((void) tlb)
+
+#include <asm-generic/tlb.h>
+
+#else /* !CONFIG_MMU */
+
+#include <linux/swap.h>
+#include <asm/tlbflush.h>
+
+static inline void __tlb_remove_table(void *_table)
+{
+ free_page_and_swap_cache((struct page *)_table);
+}
+
+#include <asm-generic/tlb.h>
+
+static inline void
+__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
+{
+ struct ptdesc *ptdesc = page_ptdesc(pte);
+
+ pagetable_pte_dtor(ptdesc);
+
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * With the classic ARM MMU, a pte page has two corresponding pmd
+ * entries, each covering 1MB.
+ */
+ addr = (addr & PMD_MASK) + SZ_1M;
+ __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
+#endif
+
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
+
+static inline void
+__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+#ifdef CONFIG_ARM_LPAE
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
+
+ pagetable_pmd_dtor(ptdesc);
+ tlb_remove_ptdesc(tlb, ptdesc);
+#endif
+}
+
+#endif /* CONFIG_MMU */
+#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
new file mode 100644
index 0000000000..38c6e4a2a0
--- /dev/null
+++ b/arch/arm/include/asm/tlbflush.h
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/tlbflush.h
+ *
+ * Copyright (C) 1999-2003 Russell King
+ */
+#ifndef _ASMARM_TLBFLUSH_H
+#define _ASMARM_TLBFLUSH_H
+
+#ifndef __ASSEMBLY__
+# include <linux/mm_types.h>
+#endif
+
+#ifdef CONFIG_MMU
+
+#include <asm/glue.h>
+
+#define TLB_V4_U_PAGE (1 << 1)
+#define TLB_V4_D_PAGE (1 << 2)
+#define TLB_V4_I_PAGE (1 << 3)
+#define TLB_V6_U_PAGE (1 << 4)
+#define TLB_V6_D_PAGE (1 << 5)
+#define TLB_V6_I_PAGE (1 << 6)
+
+#define TLB_V4_U_FULL (1 << 9)
+#define TLB_V4_D_FULL (1 << 10)
+#define TLB_V4_I_FULL (1 << 11)
+#define TLB_V6_U_FULL (1 << 12)
+#define TLB_V6_D_FULL (1 << 13)
+#define TLB_V6_I_FULL (1 << 14)
+
+#define TLB_V6_U_ASID (1 << 16)
+#define TLB_V6_D_ASID (1 << 17)
+#define TLB_V6_I_ASID (1 << 18)
+
+#define TLB_V6_BP (1 << 19)
+
+/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
+#define TLB_V7_UIS_PAGE (1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP (1 << 23)
+
+#define TLB_BARRIER (1 << 28)
+#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
+#define TLB_DCLEAN (1 << 30)
+#define TLB_WB (1 << 31)
+
+/*
+ * MMU TLB Model
+ * =============
+ *
+ * We have the following to choose from:
+ * v4 - ARMv4 without write buffer
+ * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
+ * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
+ * fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
+ * fa - Faraday (v4 with write buffer with UTLB)
+ * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
+ * v7wbi - identical to v6wbi
+ */
+#undef _TLB
+#undef MULTI_TLB
+
+#ifdef CONFIG_SMP_ON_UP
+#define MULTI_TLB 1
+#endif
+
+#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
+
+#ifdef CONFIG_CPU_TLB_V4WT
+# define v4_possible_flags v4_tlb_flags
+# define v4_always_flags v4_tlb_flags
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v4
+# endif
+#else
+# define v4_possible_flags 0
+# define v4_always_flags (-1UL)
+#endif
+
+#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
+ TLB_V4_U_FULL | TLB_V4_U_PAGE)
+
+#ifdef CONFIG_CPU_TLB_FA
+# define fa_possible_flags fa_tlb_flags
+# define fa_always_flags fa_tlb_flags
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB fa
+# endif
+#else
+# define fa_possible_flags 0
+# define fa_always_flags (-1UL)
+#endif
+
+#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
+ TLB_V4_I_FULL | TLB_V4_D_FULL | \
+ TLB_V4_I_PAGE | TLB_V4_D_PAGE)
+
+#ifdef CONFIG_CPU_TLB_V4WBI
+# define v4wbi_possible_flags v4wbi_tlb_flags
+# define v4wbi_always_flags v4wbi_tlb_flags
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v4wbi
+# endif
+#else
+# define v4wbi_possible_flags 0
+# define v4wbi_always_flags (-1UL)
+#endif
+
+#define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
+ TLB_V4_I_FULL | TLB_V4_D_FULL | \
+ TLB_V4_I_PAGE | TLB_V4_D_PAGE)
+
+#ifdef CONFIG_CPU_TLB_FEROCEON
+# define fr_possible_flags fr_tlb_flags
+# define fr_always_flags fr_tlb_flags
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v4wbi
+# endif
+#else
+# define fr_possible_flags 0
+# define fr_always_flags (-1UL)
+#endif
+
+#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
+ TLB_V4_I_FULL | TLB_V4_D_FULL | \
+ TLB_V4_D_PAGE)
+
+#ifdef CONFIG_CPU_TLB_V4WB
+# define v4wb_possible_flags v4wb_tlb_flags
+# define v4wb_always_flags v4wb_tlb_flags
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v4wb
+# endif
+#else
+# define v4wb_possible_flags 0
+# define v4wb_always_flags (-1UL)
+#endif
+
+#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
+ TLB_V6_I_FULL | TLB_V6_D_FULL | \
+ TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
+ TLB_V6_I_ASID | TLB_V6_D_ASID | \
+ TLB_V6_BP)
+
+#ifdef CONFIG_CPU_TLB_V6
+# define v6wbi_possible_flags v6wbi_tlb_flags
+# define v6wbi_always_flags v6wbi_tlb_flags
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v6wbi
+# endif
+#else
+# define v6wbi_possible_flags 0
+# define v6wbi_always_flags (-1UL)
+#endif
+
+#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
+ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+ TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
+#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
+ TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+ TLB_V6_U_ASID | TLB_V6_BP)
+
+#ifdef CONFIG_CPU_TLB_V7
+
+# ifdef CONFIG_SMP_ON_UP
+# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
+# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
+# elif defined(CONFIG_SMP)
+# define v7wbi_possible_flags v7wbi_tlb_flags_smp
+# define v7wbi_always_flags v7wbi_tlb_flags_smp
+# else
+# define v7wbi_possible_flags v7wbi_tlb_flags_up
+# define v7wbi_always_flags v7wbi_tlb_flags_up
+# endif
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v7wbi
+# endif
+#else
+# define v7wbi_possible_flags 0
+# define v7wbi_always_flags (-1UL)
+#endif
+
+#ifndef _TLB
+#error Unknown TLB model
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+struct cpu_tlb_fns {
+ void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
+ void (*flush_kern_range)(unsigned long, unsigned long);
+ unsigned long tlb_flags;
+};
+
+/*
+ * Select the calling method
+ */
+#ifdef MULTI_TLB
+
+#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
+#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
+
+#else
+
+#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
+#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
+
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
+
+#endif
+
+extern struct cpu_tlb_fns cpu_tlb;
+
+#define __cpu_tlb_flags cpu_tlb.tlb_flags
+
+/*
+ * TLB Management
+ * ==============
+ *
+ * The arch/arm/mm/tlb-*.S files implement these methods.
+ *
+ * The TLB specific code is expected to perform whatever tests it
+ * needs to determine if it should invalidate the TLB for each
+ * call. Start addresses are inclusive and end addresses are
+ * exclusive; it is safe to round these addresses down.
+ *
+ * flush_tlb_all()
+ *
+ * Invalidate the entire TLB.
+ *
+ * flush_tlb_mm(mm)
+ *
+ * Invalidate all TLB entries in a particular address
+ * space.
+ * - mm - mm_struct describing address space
+ *
+ * flush_tlb_range(vma,start,end)
+ *
+ * Invalidate a range of TLB entries in the specified
+ * address space.
+ * - mm - mm_struct describing address space
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ *
+ * flush_tlb_page(vma, uaddr)
+ *
+ * Invalidate the specified page in the specified address range.
+ * - vma - vm_area_struct describing address range
+ * - vaddr - virtual address (may not be aligned)
+ */
+
+/*
+ * We optimise the code below by:
+ * - building a set of TLB flags that might be set in __cpu_tlb_flags
+ * - building a set of TLB flags that will always be set in __cpu_tlb_flags
+ * - if we're going to need __cpu_tlb_flags, access it once and only once
+ *
+ * This allows us to build optimal assembly for the single-CPU type case,
+ * and as close to optimal given the compiler constrants for multi-CPU
+ * case. We could do better for the multi-CPU case if the compiler
+ * implemented the "%?" method, but this has been discontinued due to too
+ * many people getting it wrong.
+ */
+#define possible_tlb_flags (v4_possible_flags | \
+ v4wbi_possible_flags | \
+ fr_possible_flags | \
+ v4wb_possible_flags | \
+ fa_possible_flags | \
+ v6wbi_possible_flags | \
+ v7wbi_possible_flags)
+
+#define always_tlb_flags (v4_always_flags & \
+ v4wbi_always_flags & \
+ fr_always_flags & \
+ v4wb_always_flags & \
+ fa_always_flags & \
+ v6wbi_always_flags & \
+ v7wbi_always_flags)
+
+#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
+
+#define __tlb_op(f, insnarg, arg) \
+ do { \
+ if (always_tlb_flags & (f)) \
+ asm("mcr " insnarg \
+ : : "r" (arg) : "cc"); \
+ else if (possible_tlb_flags & (f)) \
+ asm("tst %1, %2\n\t" \
+ "mcrne " insnarg \
+ : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \
+ : "cc"); \
+ } while (0)
+
+#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
+#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
+
+static inline void __local_flush_tlb_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
+ tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
+ tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
+}
+
+static inline void local_flush_tlb_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(nsh);
+ isb();
+ }
+}
+
+static inline void __flush_tlb_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(ish);
+ isb();
+ }
+}
+
+static inline void __local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int zero = 0;
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
+ tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
+ tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
+ tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
+ }
+ }
+
+ tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
+ tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
+ tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_mm(mm);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void __flush_tlb_mm(struct mm_struct *mm)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_mm(mm);
+#ifdef CONFIG_ARM_ERRATA_720789
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
+#else
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
+#endif
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(ish);
+}
+
+static inline void
+__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+ cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+ tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
+ tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
+ tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
+ if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
+ asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+ }
+
+ tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
+ tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
+ tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_page(vma, uaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void
+__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_page(vma, uaddr);
+#ifdef CONFIG_ARM_ERRATA_720789
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
+#else
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
+#endif
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(ish);
+}
+
+static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
+ tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
+ tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
+ if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
+ asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+
+ tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
+ tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
+ tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_kernel_page(kaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(nsh);
+ isb();
+ }
+}
+
+static inline void __flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_kernel_page(kaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(ish);
+ isb();
+ }
+}
+
+/*
+ * Branch predictor maintenance is paired with full TLB invalidation, so
+ * there is no need for any barriers here.
+ */
+static inline void __local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_V6_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
+static inline void local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ __local_flush_bp_all();
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
+static inline void __flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ __local_flush_bp_all();
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+}
+
+/*
+ * flush_pmd_entry
+ *
+ * Flush a PMD entry (word aligned, or double-word aligned) to
+ * RAM if the TLB for the CPU we are running on requires this.
+ * This is typically used when we are creating PMD entries.
+ *
+ * clean_pmd_entry
+ *
+ * Clean (but don't drain the write buffer) if the CPU requires
+ * these operations. This is typically used when we are removing
+ * PMD entries.
+ */
+static inline void flush_pmd_entry(void *pmd)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
+ tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+}
+
+static inline void clean_pmd_entry(void *pmd)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
+ tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
+}
+
+#undef tlb_op
+#undef tlb_flag
+#undef always_tlb_flags
+#undef possible_tlb_flags
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
+#define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
+
+#ifndef CONFIG_SMP
+#define flush_tlb_all local_flush_tlb_all
+#define flush_tlb_mm local_flush_tlb_mm
+#define flush_tlb_page local_flush_tlb_page
+#define flush_tlb_kernel_page local_flush_tlb_kernel_page
+#define flush_tlb_range local_flush_tlb_range
+#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+#define flush_bp_all local_flush_bp_all
+#else
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
+extern void flush_tlb_kernel_page(unsigned long kaddr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
+#endif
+
+/*
+ * If PG_dcache_clean is not set for the page, we need to ensure that any
+ * cache entries for the kernels virtual memory range are written
+ * back to the page. On ARMv6 and later, the cache coherency is handled via
+ * the set_ptes() function.
+ */
+#if __LINUX_ARM_ARCH__ < 6
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
+#else
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ unsigned int nr)
+{
+}
+#endif
+
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
+#endif
+
+#elif defined(CONFIG_SMP) /* !CONFIG_MMU */
+
+#ifndef __ASSEMBLY__
+static inline void local_flush_tlb_all(void) { }
+static inline void local_flush_tlb_mm(struct mm_struct *mm) { }
+static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { }
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { }
+static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { }
+static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { }
+static inline void local_flush_bp_all(void) { }
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
+extern void flush_tlb_kernel_page(unsigned long kaddr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
+#endif /* __ASSEMBLY__ */
+
+#endif
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+ if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+ erratum_a15_798181_handler))
+ return erratum_a15_798181_handler();
+ return false;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
new file mode 100644
index 0000000000..3dcd0f71a0
--- /dev/null
+++ b/arch/arm/include/asm/tls.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMARM_TLS_H
+#define __ASMARM_TLS_H
+
+#include <linux/compiler.h>
+#include <asm/thread_info.h>
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+ .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
+ .endm
+
+ .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
+ mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
+ @ TLS register update is deferred until return to user space
+ mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register
+ str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
+ .endm
+
+ .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
+#ifdef CONFIG_SMP
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+ .subsection 1
+#endif
+.L0_\@:
+ ldr_va \tmp1, elf_hwcap
+ mov \tmp2, #0xffff0fff
+ tst \tmp1, #HWCAP_TLS @ hardware TLS available?
+ streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
+ beq .L2_\@
+ mcr p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
+#ifdef CONFIG_SMP
+ b .L1_\@
+ .previous
+#endif
+.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
+.L2_\@:
+ .endm
+
+ .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
+ mov \tmp1, #0xffff0fff
+ str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
+ .endm
+#else
+#include <asm/smp_plat.h>
+#endif
+
+#ifdef CONFIG_TLS_REG_EMUL
+#define tls_emu 1
+#define has_tls_reg 1
+#define defer_tls_reg_update 0
+#define switch_tls switch_tls_none
+#elif defined(CONFIG_CPU_V6)
+#define tls_emu 0
+#define has_tls_reg (elf_hwcap & HWCAP_TLS)
+#define defer_tls_reg_update is_smp()
+#define switch_tls switch_tls_v6
+#elif defined(CONFIG_CPU_32v6K)
+#define tls_emu 0
+#define has_tls_reg 1
+#define defer_tls_reg_update 1
+#define switch_tls switch_tls_v6k
+#else
+#define tls_emu 0
+#define has_tls_reg 0
+#define defer_tls_reg_update 0
+#define switch_tls switch_tls_software
+#endif
+
+#ifndef __ASSEMBLY__
+
+static inline void set_tls(unsigned long val)
+{
+ struct thread_info *thread;
+
+ thread = current_thread_info();
+
+ thread->tp_value[0] = val;
+
+ /*
+ * This code runs with preemption enabled and therefore must
+ * be reentrant with respect to switch_tls.
+ *
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ *
+ * If we're preempted here, switch_tls will load TPIDRURO from
+ * thread_info upon resuming execution and the following mcr
+ * is merely redundant.
+ */
+ barrier();
+
+ if (!tls_emu) {
+ if (has_tls_reg && !defer_tls_reg_update) {
+ asm("mcr p15, 0, %0, c13, c0, 3"
+ : : "r" (val));
+ } else if (!has_tls_reg) {
+#ifdef CONFIG_KUSER_HELPERS
+ /*
+ * User space must never try to access this
+ * directly. Expect your app to break
+ * eventually if you do so. The user helper
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
+ *((unsigned int *)0xffff0ff0) = val;
+#endif
+ }
+
+ }
+}
+
+static inline unsigned long get_tpuser(void)
+{
+ unsigned long reg = 0;
+
+ if (has_tls_reg && !tls_emu)
+ __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));
+
+ return reg;
+}
+
+static inline void set_tpuser(unsigned long val)
+{
+ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
+ * we need not update thread_info.
+ */
+ if (has_tls_reg && !tls_emu) {
+ asm("mcr p15, 0, %0, c13, c0, 2"
+ : : "r" (val));
+ }
+}
+
+static inline void flush_tls(void)
+{
+ set_tls(0);
+ set_tpuser(0);
+}
+
+#endif
+#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
new file mode 100644
index 0000000000..c7d2510e5a
--- /dev/null
+++ b/arch/arm/include/asm/topology.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_TOPOLOGY_H
+#define _ASM_ARM_TOPOLOGY_H
+
+#ifdef CONFIG_ARM_CPU_TOPOLOGY
+
+#include <linux/cpumask.h>
+#include <linux/arch_topology.h>
+
+/* big.LITTLE switcher is incompatible with frequency invariance */
+#ifndef CONFIG_BL_SWITCHER
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_set_freq_scale topology_set_freq_scale
+#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+#endif
+
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
+
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
+/* Replace task scheduler's default thermal pressure API */
+#define arch_scale_thermal_pressure topology_get_thermal_pressure
+#define arch_update_thermal_pressure topology_update_thermal_pressure
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
new file mode 100644
index 0000000000..0aaefe3e17
--- /dev/null
+++ b/arch/arm/include/asm/traps.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASMARM_TRAP_H
+#define _ASMARM_TRAP_H
+
+#include <linux/list.h>
+
+struct pt_regs;
+struct task_struct;
+
+struct undef_hook {
+ struct list_head node;
+ u32 instr_mask;
+ u32 instr_val;
+ u32 cpsr_mask;
+ u32 cpsr_val;
+ int (*fn)(struct pt_regs *regs, unsigned int instr);
+};
+
+void register_undef_hook(struct undef_hook *hook);
+void unregister_undef_hook(struct undef_hook *hook);
+
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+ extern char __irqentry_text_start[];
+ extern char __irqentry_text_end[];
+
+ return ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end;
+}
+
+extern void __init early_trap_init(void *);
+extern void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl);
+extern void ptrace_break(struct pt_regs *regs);
+
+extern void *vectors_page;
+
+asmlinkage void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl);
+asmlinkage void do_undefinstr(struct pt_regs *regs);
+asmlinkage void handle_fiq_as_nmi(struct pt_regs *regs);
+asmlinkage void bad_mode(struct pt_regs *regs, int reason);
+asmlinkage int arm_syscall(int no, struct pt_regs *regs);
+asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs);
+asmlinkage void __div0(void);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
new file mode 100644
index 0000000000..65da32e1f1
--- /dev/null
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_UACCESS_ASM_H__
+#define __ASM_UACCESS_ASM_H__
+
+#include <asm/asm-offsets.h>
+#include <asm/domain.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+ .macro csdb
+#ifdef CONFIG_THUMB2_KERNEL
+ .inst.w 0xf3af8014
+#else
+ .inst 0xe320f014
+#endif
+ .endm
+
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcscc \tmp, \tmp, \limit
+ bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
+#endif
+ .endm
+
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
+#define DACR(x...) x
+#else
+#define DACR(x...)
+#endif
+
+ /*
+ * Save the address limit on entry to a privileged exception.
+ *
+ * If we are using the DACR for kernel access by the user accessors
+ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
+ * back to client mode, whether or not \disable is set.
+ *
+ * If we are using SW PAN, set the DACR user domain to no access
+ * if \disable is set.
+ */
+ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
+ DACR( mrc p15, 0, \tmp0, c3, c0, 0)
+ DACR( str \tmp0, [sp, #SVC_DACR])
+ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
+ /* kernel=client, user=no access */
+ mov \tmp2, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
+ /* kernel=client */
+ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
+ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .endif
+ .endm
+
+ /* Restore the user access state previously saved by uaccess_entry */
+ .macro uaccess_exit, tsk, tmp0, tmp1
+ DACR( ldr \tmp0, [sp, #SVC_DACR])
+ DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ .endm
+
+#undef DACR
+
+#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
new file mode 100644
index 0000000000..bb5c818231
--- /dev/null
+++ b/arch/arm/include/asm/uaccess.h
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/uaccess.h
+ */
+#ifndef _ASMARM_UACCESS_H
+#define _ASMARM_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/string.h>
+#include <asm/page.h>
+#include <asm/domain.h>
+#include <asm/unaligned.h>
+#include <asm/unified.h>
+#include <asm/compiler.h>
+
+#include <asm/extable.h>
+
+/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static __always_inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ unsigned int old_domain = get_domain();
+
+ /* Set the current domain access to permit user accesses */
+ set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+ domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+ return old_domain;
+#else
+ return 0;
+#endif
+}
+
+static __always_inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
+#endif
+}
+
+/*
+ * These two are intentionally not defined anywhere - if the kernel
+ * code generates any references to them, that's a bug.
+ */
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
+#ifdef CONFIG_MMU
+
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+/*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size) \
+ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+ size_t size)
+{
+ void __user *safe_ptr = (void __user *)ptr;
+ unsigned long tmp;
+
+ asm volatile(
+ " .syntax unified\n"
+ " sub %1, %3, #1\n"
+ " subs %1, %1, %0\n"
+ " addhs %1, %1, #1\n"
+ " subshs %1, %1, %2\n"
+ " movlo %0, #0\n"
+ : "+r" (safe_ptr), "=&r" (tmp)
+ : "r" (size), "r" (TASK_SIZE)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
+ * Single-value transfer routines. They automatically use the right
+ * size if we just have the right pointer type. Note that the functions
+ * which read from user space (*get_*) need to take care not to leak
+ * kernel data even if the calling code is buggy and fails to check
+ * the return value. This means zeroing out the destination variable
+ * or buffer on error. Normally this is done out of line by the
+ * fixup code, but there are a few places where it intrudes on the
+ * main code path. When we only write to user space, there is no
+ * problem.
+ */
+extern int __get_user_1(void *);
+extern int __get_user_2(void *);
+extern int __get_user_4(void *);
+extern int __get_user_32t_8(void *);
+extern int __get_user_8(void *);
+extern int __get_user_64t_1(void *);
+extern int __get_user_64t_2(void *);
+extern int __get_user_64t_4(void *);
+
+#define __GUP_CLOBBER_1 "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4 "lr", "cc"
+#define __GUP_CLOBBER_32t_8 "lr", "cc"
+#define __GUP_CLOBBER_8 "lr", "cc"
+
+#define __get_user_x(__r2, __p, __e, __l, __s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __get_user_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
+
+/* narrowing a double-word get into a single 32bit word register: */
+#ifdef __ARMEB__
+#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
+ __get_user_x(__r2, __p, __e, __l, 32t_8)
+#else
+#define __get_user_x_32t __get_user_x
+#endif
+
+/*
+ * storing result into proper least significant word of 64bit target var,
+ * different only for big endian case where 64 bit __r2 lsw is r3:
+ */
+#ifdef __ARMEB__
+#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __get_user_64t_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
+#else
+#define __get_user_x_64t __get_user_x
+#endif
+
+
+#define __get_user_check(x, p) \
+ ({ \
+ unsigned long __limit = TASK_SIZE - 1; \
+ register typeof(*(p)) __user *__p asm("r0") = (p); \
+ register __inttype(x) __r2 asm("r2"); \
+ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
+ int __tmp_e; \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 1); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 1); \
+ break; \
+ case 2: \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 2); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 2); \
+ break; \
+ case 4: \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 4); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 4); \
+ break; \
+ case 8: \
+ if (sizeof((x)) < 8) \
+ __get_user_x_32t(__r2, __p, __e, __l, 4); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 8); \
+ break; \
+ default: __e = __get_user_bad(); break; \
+ } \
+ __tmp_e = __e; \
+ uaccess_restore(__ua_flags); \
+ x = (typeof(*(p))) __r2; \
+ __tmp_e; \
+ })
+
+#define get_user(x, p) \
+ ({ \
+ might_fault(); \
+ __get_user_check(x, p); \
+ })
+
+extern int __put_user_1(void *, unsigned int);
+extern int __put_user_2(void *, unsigned int);
+extern int __put_user_4(void *, unsigned int);
+extern int __put_user_8(void *, unsigned long long);
+
+#define __put_user_check(__pu_val, __ptr, __err, __s) \
+ ({ \
+ unsigned long __limit = TASK_SIZE - 1; \
+ register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
+ register const void __user *__p asm("r0") = __ptr; \
+ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
+ : "ip", "lr", "cc"); \
+ __err = __e; \
+ })
+
+#else /* CONFIG_MMU */
+
+#define get_user(x, p) __get_user(x, p)
+#define __put_user_check __put_user_nocheck
+
+#endif /* CONFIG_MMU */
+
+#include <asm-generic/access_ok.h>
+
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1, it is not worth fixing the non-
+ * verifying accessors, because we need to add verification of the
+ * address space there. Force these to use the standard get_user()
+ * version instead.
+ */
+#define __get_user(x, ptr) get_user(x, ptr)
+#else
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success. Note that these
+ * versions are void (ie, don't return a value as such).
+ */
+#define __get_user(x, ptr) \
+({ \
+ long __gu_err = 0; \
+ __get_user_err((x), (ptr), __gu_err, TUSER()); \
+ __gu_err; \
+})
+
+#define __get_user_err(x, ptr, err, __t) \
+do { \
+ unsigned long __gu_addr = (unsigned long)(ptr); \
+ unsigned long __gu_val; \
+ unsigned int __ua_flags; \
+ __chk_user_ptr(ptr); \
+ might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
+ case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
+ case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
+ default: (__gu_val) = __get_user_bad(); \
+ } \
+ uaccess_restore(__ua_flags); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+} while (0)
+#endif
+
+#define __get_user_asm(x, addr, err, instr) \
+ __asm__ __volatile__( \
+ "1: " instr " %1, [%2], #0\n" \
+ "2:\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, %3\n" \
+ " mov %1, #0\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection" \
+ : "+r" (err), "=&r" (x) \
+ : "r" (addr), "i" (-EFAULT) \
+ : "cc")
+
+#define __get_user_asm_byte(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrb" __t)
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrh" __t)
+
+#else
+
+#ifndef __ARMEB__
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
+({ \
+ unsigned long __b1, __b2; \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
+ (x) = __b1 | (__b2 << 8); \
+})
+#else
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
+({ \
+ unsigned long __b1, __b2; \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
+ (x) = (__b1 << 8) | __b2; \
+})
+#endif
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#define __get_user_asm_word(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldr" __t)
+
+#define __put_user_switch(x, ptr, __err, __fn) \
+ do { \
+ const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ unsigned int __ua_flags; \
+ might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
+ case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
+ case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
+ case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ uaccess_restore(__ua_flags); \
+ } while (0)
+
+#define put_user(x, ptr) \
+({ \
+ int __pu_err = 0; \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
+ __pu_err; \
+})
+
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
+#define __put_user(x, ptr) \
+({ \
+ long __pu_err = 0; \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
+ __pu_err; \
+})
+
+#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
+ do { \
+ unsigned long __pu_addr = (unsigned long)__pu_ptr; \
+ __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
+ } while (0)
+
+#define __put_user_nocheck_1 __put_user_asm_byte
+#define __put_user_nocheck_2 __put_user_asm_half
+#define __put_user_nocheck_4 __put_user_asm_word
+#define __put_user_nocheck_8 __put_user_asm_dword
+
+#endif /* !CONFIG_CPU_SPECTRE */
+
+#define __put_user_asm(x, __pu_addr, err, instr) \
+ __asm__ __volatile__( \
+ "1: " instr " %1, [%2], #0\n" \
+ "2:\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, %3\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection" \
+ : "+r" (err) \
+ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
+ : "cc")
+
+#define __put_user_asm_byte(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strb" __t)
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strh" __t)
+
+#else
+
+#ifndef __ARMEB__
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
+({ \
+ unsigned long __temp = (__force unsigned long)(x); \
+ __put_user_asm_byte(__temp, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
+})
+#else
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
+({ \
+ unsigned long __temp = (__force unsigned long)(x); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
+})
+#endif
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#define __put_user_asm_word(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "str" __t)
+
+#ifndef __ARMEB__
+#define __reg_oper0 "%R2"
+#define __reg_oper1 "%Q2"
+#else
+#define __reg_oper0 "%Q2"
+#define __reg_oper1 "%R2"
+#endif
+
+#define __put_user_asm_dword(x, __pu_addr, err, __t) \
+ __asm__ __volatile__( \
+ ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
+ ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
+ THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
+ THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
+ "3:\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: mov %0, %3\n" \
+ " b 3b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .popsection" \
+ : "+r" (err), "+r" (__pu_addr) \
+ : "r" (x), "i" (-EFAULT) \
+ : "cc")
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (src); \
+ unsigned long __src = (unsigned long)(__pk_ptr); \
+ type __val; \
+ int __err = 0; \
+ switch (sizeof(type)) { \
+ case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
+ case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
+ case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
+ case 8: { \
+ u32 *__v32 = (u32*)&__val; \
+ __get_user_asm_word(__v32[0], __src, __err, ""); \
+ if (__err) \
+ break; \
+ __get_user_asm_word(__v32[1], __src+4, __err, ""); \
+ break; \
+ } \
+ default: __err = __get_user_bad(); break; \
+ } \
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
+ put_unaligned(__val, (type *)(dst)); \
+ else \
+ *(type *)(dst) = __val; /* aligned by caller */ \
+ if (__err) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (dst); \
+ unsigned long __dst = (unsigned long)__pk_ptr; \
+ int __err = 0; \
+ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
+ ? get_unaligned((type *)(src)) \
+ : *(type *)(src); /* aligned by caller */ \
+ switch (sizeof(type)) { \
+ case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
+ case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
+ case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
+ case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ if (__err) \
+ goto err_label; \
+} while (0)
+
+#ifdef CONFIG_MMU
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned int __ua_flags;
+
+ __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+ unsigned int __ua_flags;
+ __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+#else
+ return arm_copy_to_user(to, from, n);
+#endif
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_clear_user(addr, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+#else
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
+#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
+#endif
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
+
+static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+{
+ if (access_ok(to, n))
+ n = __clear_user(to, n);
+ return n;
+}
+
+/* These are from lib/ code, and use __get_user() and friends */
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern __must_check long strnlen_user(const char __user *str, long n);
+
+#endif /* _ASMARM_UACCESS_H */
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
new file mode 100644
index 0000000000..4048c92d9c
--- /dev/null
+++ b/arch/arm/include/asm/ucontext.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASMARM_UCONTEXT_H
+#define _ASMARM_UCONTEXT_H
+
+#include <asm/fpstate.h>
+#include <asm/user.h>
+
+/*
+ * struct sigcontext only has room for the basic registers, but struct
+ * ucontext now has room for all registers which need to be saved and
+ * restored. Coprocessor registers are stored in uc_regspace. Each
+ * coprocessor's saved state should start with a documented 32-bit magic
+ * number, followed by a 32-bit word giving the coproccesor's saved size.
+ * uc_regspace may be expanded if necessary, although this takes some
+ * coordination with glibc.
+ */
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask;
+ /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
+ int __unused[32 - (sizeof (sigset_t) / sizeof (int))];
+ /* Last for extensibility. Eight byte aligned because some
+ coprocessors require eight byte alignment. */
+ unsigned long uc_regspace[128] __attribute__((__aligned__(8)));
+};
+
+#ifdef __KERNEL__
+
+/*
+ * Coprocessor save state. The magic values and specific
+ * coprocessor's layouts are part of the userspace ABI. Each one of
+ * these should be a multiple of eight bytes and aligned to eight
+ * bytes, to prevent unpredictable padding in the signal frame.
+ */
+
+/*
+ * Dummy padding block: if this magic is encountered, the block should
+ * be skipped using the corresponding size field.
+ */
+#define DUMMY_MAGIC 0xb0d9ed01
+
+#ifdef CONFIG_IWMMXT
+/* iwmmxt_area is 0x98 bytes long, preceded by 8 bytes of signature */
+#define IWMMXT_MAGIC 0x12ef842a
+#define IWMMXT_STORAGE_SIZE (IWMMXT_SIZE + 8)
+
+struct iwmmxt_sigframe {
+ unsigned long magic;
+ unsigned long size;
+ struct iwmmxt_struct storage;
+} __attribute__((__aligned__(8)));
+#endif /* CONFIG_IWMMXT */
+
+#ifdef CONFIG_VFP
+#define VFP_MAGIC 0x56465001
+
+struct vfp_sigframe
+{
+ unsigned long magic;
+ unsigned long size;
+ struct user_vfp ufp;
+ struct user_vfp_exc ufp_exc;
+} __attribute__((__aligned__(8)));
+
+/*
+ * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
+ * 4 bytes padding.
+ */
+#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
+
+#endif /* CONFIG_VFP */
+
+/*
+ * Auxiliary signal frame. This saves stuff like FP state.
+ * The layout of this structure is not part of the user ABI,
+ * because the config options aren't. uc_regspace is really
+ * one of these.
+ */
+struct aux_sigframe {
+#ifdef CONFIG_IWMMXT
+ struct iwmmxt_sigframe iwmmxt;
+#endif
+#ifdef CONFIG_VFP
+ struct vfp_sigframe vfp;
+#endif
+ /* Something that isn't a valid magic number for any coprocessor. */
+ unsigned long end_magic;
+} __attribute__((__aligned__(8)));
+
+#endif
+
+#endif /* !_ASMARM_UCONTEXT_H */
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
new file mode 100644
index 0000000000..ce9689118d
--- /dev/null
+++ b/arch/arm/include/asm/unified.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/asm-arm/unified.h - Unified Assembler Syntax helper macros
+ *
+ * Copyright (C) 2008 ARM Limited
+ */
+
+#ifndef __ASM_UNIFIED_H
+#define __ASM_UNIFIED_H
+
+#if defined(__ASSEMBLY__)
+ .syntax unified
+#else
+__asm__(".syntax unified");
+#endif
+
+#ifdef CONFIG_CPU_V7M
+#define AR_CLASS(x...)
+#define M_CLASS(x...) x
+#else
+#define AR_CLASS(x...) x
+#define M_CLASS(x...)
+#endif
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+/* The CPSR bit describing the instruction set (Thumb) */
+#define PSR_ISETSTATE PSR_T_BIT
+
+#define ARM(x...)
+#define THUMB(x...) x
+#ifdef __ASSEMBLY__
+#define W(instr) instr.w
+#else
+#define WASM(instr) #instr ".w"
+#endif
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
+/* The CPSR bit describing the instruction set (ARM) */
+#define PSR_ISETSTATE 0
+
+#define ARM(x...) x
+#define THUMB(x...)
+#ifdef __ASSEMBLY__
+#define W(instr) instr
+#else
+#define WASM(instr) #instr
+#endif
+
+#endif /* CONFIG_THUMB2_KERNEL */
+
+#endif /* !__ASM_UNIFIED_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
new file mode 100644
index 0000000000..3676e82cf9
--- /dev/null
+++ b/arch/arm/include/asm/unistd.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/unistd.h
+ *
+ * Copyright (C) 2001-2005 Russell King
+ *
+ * Please forward _all_ changes to this file to rmk@arm.linux.org.uk,
+ * no matter what the change is. Thanks!
+ */
+#ifndef __ASM_ARM_UNISTD_H
+#define __ASM_ARM_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+#include <asm/unistd-nr.h>
+
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_OLD_MMAP
+#define __ARCH_WANT_SYS_OLD_SELECT
+#define __ARCH_WANT_SYS_UTIME32
+
+#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
+#define __ARCH_WANT_SYS_TIME32
+#define __ARCH_WANT_SYS_IPC
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_SYS_SOCKETCALL
+#endif
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+/*
+ * Unimplemented (or alternatively implemented) syscalls
+ */
+#define __IGNORE_fadvise64_64
+
+#ifdef __ARM_EABI__
+/*
+ * The following syscalls are obsolete and no longer available for EABI:
+ * __NR_time
+ * __NR_umount
+ * __NR_stime
+ * __NR_alarm
+ * __NR_utime
+ * __NR_getrlimit
+ * __NR_select
+ * __NR_readdir
+ * __NR_mmap
+ * __NR_socketcall
+ * __NR_syscall
+ * __NR_ipc
+ */
+#define __IGNORE_getrlimit
+#endif
+
+#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
new file mode 100644
index 0000000000..d60b09a5ac
--- /dev/null
+++ b/arch/arm/include/asm/unwind.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/unwind.h
+ *
+ * Copyright (C) 2008 ARM Limited
+ */
+
+#ifndef __ASM_UNWIND_H
+#define __ASM_UNWIND_H
+
+#ifndef __ASSEMBLY__
+
+/* Unwind reason code according the the ARM EABI documents */
+enum unwind_reason_code {
+ URC_OK = 0, /* operation completed successfully */
+ URC_CONTINUE_UNWIND = 8,
+ URC_FAILURE = 9 /* unspecified failure of some kind */
+};
+
+struct unwind_idx {
+ unsigned long addr_offset;
+ unsigned long insn;
+};
+
+struct unwind_table {
+ struct list_head list;
+ struct list_head mod_list;
+ const struct unwind_idx *start;
+ const struct unwind_idx *origin;
+ const struct unwind_idx *stop;
+ unsigned long begin_addr;
+ unsigned long end_addr;
+};
+
+extern struct unwind_table *unwind_table_add(unsigned long start,
+ unsigned long size,
+ unsigned long text_addr,
+ unsigned long text_size);
+extern void unwind_table_del(struct unwind_table *tab);
+extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
+
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr2(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARM_UNWIND
+#define UNWIND(code...) code
+#else
+#define UNWIND(code...)
+#endif
+
+#endif /* __ASM_UNWIND_H */
diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h
new file mode 100644
index 0000000000..6a61b28749
--- /dev/null
+++ b/arch/arm/include/asm/uprobes.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <asm/probes.h>
+#include <asm/opcodes.h>
+
+typedef u32 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 4
+#define UPROBE_XOL_SLOT_BYTES 64
+
+#define UPROBE_SWBP_ARM_INSN 0xe7f001f9
+#define UPROBE_SS_ARM_INSN 0xe7f001fa
+#define UPROBE_SWBP_INSN __opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN)
+#define UPROBE_SWBP_INSN_SIZE 4
+
+struct arch_uprobe_task {
+ u32 backup;
+ unsigned long saved_trap_no;
+};
+
+struct arch_uprobe {
+ u8 insn[MAX_UINSN_BYTES];
+ unsigned long ixol[2];
+ uprobe_opcode_t bpinsn;
+ bool simulate;
+ u32 pcreg;
+ void (*prehandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ void (*posthandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ struct arch_probes_insn asi;
+};
+
+#endif
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
new file mode 100644
index 0000000000..167d44b550
--- /dev/null
+++ b/arch/arm/include/asm/user.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_USER_H
+#define _ARM_USER_H
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+/* Core file format: The core file is written in such a way that gdb
+ can understand it and provide useful information to the user (under
+ linux we use the 'trad-core' bfd). There are quite a number of
+ obstacles to being able to view the contents of the floating point
+ registers, and until these are solved you will not be able to view the
+ contents of them. Actually, you can read in the core file and look at
+ the contents of the user struct to find out what the floating point
+ registers contain.
+ The actual file contents are as follows:
+ UPAGE: 1 page consisting of a user struct that tells gdb what is present
+ in the file. Directly after this is a copy of the task_struct, which
+ is currently not used by gdb, but it may come in useful at some point.
+ All of the registers are stored as part of the upage. The upage should
+ always be only one page.
+ DATA: The data area is stored. We use current->end_text to
+ current->brk to pick up all of the user variables, plus any memory
+ that may have been malloced. No attempt is made to determine if a page
+ is demand-zero or if a page is totally unused, we just cover the entire
+ range. All of the addresses are rounded in such a way that an integral
+ number of pages is written.
+ STACK: We need the stack information in order to get a meaningful
+ backtrace. We need to write the data from (esp) to
+ current->start_stack, so we round each of these off in order to be able
+ to write an integer number of pages.
+ The minimum core file size is 3 pages, or 12288 bytes.
+*/
+
+struct user_fp {
+ struct fp_reg {
+ unsigned int sign1:1;
+ unsigned int unused:15;
+ unsigned int sign2:1;
+ unsigned int exponent:14;
+ unsigned int j:1;
+ unsigned int mantissa1:31;
+ unsigned int mantissa0:32;
+ } fpregs[8];
+ unsigned int fpsr:32;
+ unsigned int fpcr:32;
+ unsigned char ftype[8];
+ unsigned int init_flag;
+};
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+ this will be used by gdb to figure out where the data and stack segments
+ are within the file, and what virtual addresses to use. */
+struct user{
+/* We start with the registers, to mimic the way that "memory" is returned
+ from the ptrace(3,...) function. */
+ struct pt_regs regs; /* Where the registers are actually stored */
+/* ptrace does not yet supply these. Someday.... */
+ int u_fpvalid; /* True if math co-processor being used. */
+ /* for this mess. Not yet used. */
+/* The rest of this junk is to help gdb figure out what goes where */
+ unsigned long int u_tsize; /* Text segment size (pages). */
+ unsigned long int u_dsize; /* Data segment size (pages). */
+ unsigned long int u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ long int signal; /* Signal that caused the core dump. */
+ int reserved; /* No longer used */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+ int u_debugreg[8]; /* No longer used */
+ struct user_fp u_fp; /* FP state */
+ struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
+ /* the FP registers. */
+};
+
+/*
+ * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
+ * are ignored by the ptrace system call and the signal handler.
+ */
+struct user_vfp {
+ unsigned long long fpregs[32];
+ unsigned long fpscr;
+};
+
+/*
+ * VFP exception registers exposed to user space during signal delivery.
+ * Fields not relavant to the current VFP architecture are ignored.
+ */
+struct user_vfp_exc {
+ unsigned long fpexc;
+ unsigned long fpinst;
+ unsigned long fpinst2;
+};
+
+#endif /* _ARM_USER_H */
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
new file mode 100644
index 0000000000..4512f7e191
--- /dev/null
+++ b/arch/arm/include/asm/v7m.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common defines for v7m cpus
+ */
+#define V7M_SCS_ICTR IOMEM(0xe000e004)
+#define V7M_SCS_ICTR_INTLINESNUM_MASK 0x0000000f
+
+#define BASEADDR_V7M_SCB IOMEM(0xe000ed00)
+
+#define V7M_SCB_CPUID 0x00
+
+#define V7M_SCB_ICSR 0x04
+#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
+#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
+#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
+#define V7M_SCB_ICSR_VECTACTIVE 0x000001ff
+
+#define V7M_SCB_VTOR 0x08
+
+#define V7M_SCB_AIRCR 0x0c
+#define V7M_SCB_AIRCR_VECTKEY (0x05fa << 16)
+#define V7M_SCB_AIRCR_SYSRESETREQ (1 << 2)
+
+#define V7M_SCB_SCR 0x10
+#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
+
+#define V7M_SCB_CCR 0x14
+#define V7M_SCB_CCR_STKALIGN (1 << 9)
+#define V7M_SCB_CCR_DC (1 << 16)
+#define V7M_SCB_CCR_IC (1 << 17)
+#define V7M_SCB_CCR_BP (1 << 18)
+
+#define V7M_SCB_SHPR2 0x1c
+#define V7M_SCB_SHPR3 0x20
+
+#define V7M_SCB_SHCSR 0x24
+#define V7M_SCB_SHCSR_USGFAULTENA (1 << 18)
+#define V7M_SCB_SHCSR_BUSFAULTENA (1 << 17)
+#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
+
+#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
+#define V7M_xPSR_EXCEPTIONNO V7M_SCB_ICSR_VECTACTIVE
+
+/*
+ * When branching to an address that has bits [31:28] == 0xf an exception return
+ * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP
+ * extension Bit [4] defines if the exception frame has space allocated for FP
+ * state information, SBOP otherwise. Bit [3] defines the mode that is returned
+ * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used
+ * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
+ */
+#define EXC_RET_STACK_MASK 0x00000004
+#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
+
+/* Cache related definitions */
+
+#define V7M_SCB_CLIDR 0x78 /* Cache Level ID register */
+#define V7M_SCB_CTR 0x7c /* Cache Type register */
+#define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */
+#define V7M_SCB_CSSELR 0x84 /* Cache size selection register */
+
+/* Memory-mapped MPU registers for M-class */
+#define MPU_TYPE 0x90
+#define MPU_CTRL 0x94
+#define MPU_CTRL_ENABLE 1
+#define MPU_CTRL_PRIVDEFENA (1 << 2)
+
+#define PMSAv7_RNR 0x98
+#define PMSAv7_RBAR 0x9c
+#define PMSAv7_RASR 0xa0
+
+#define PMSAv8_RNR 0x98
+#define PMSAv8_RBAR 0x9c
+#define PMSAv8_RLAR 0xa0
+#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n))
+#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n))
+#define PMSAv8_MAIR0 0xc0
+#define PMSAv8_MAIR1 0xc4
+
+/* Cache opeartions */
+#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
+#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
+#define V7M_SCB_DCIMVAC 0x25c /* D-cache invalidate by MVA to PoC */
+#define V7M_SCB_DCISW 0x260 /* D-cache invalidate by set-way */
+#define V7M_SCB_DCCMVAU 0x264 /* D-cache clean by MVA to PoU */
+#define V7M_SCB_DCCMVAC 0x268 /* D-cache clean by MVA to PoC */
+#define V7M_SCB_DCCSW 0x26c /* D-cache clean by set-way */
+#define V7M_SCB_DCCIMVAC 0x270 /* D-cache clean and invalidate by MVA to PoC */
+#define V7M_SCB_DCCISW 0x274 /* D-cache clean and invalidate by set-way */
+#define V7M_SCB_BPIALL 0x278 /* D-cache clean and invalidate by set-way */
+
+#ifndef __ASSEMBLY__
+
+enum reboot_mode;
+
+void armv7m_restart(enum reboot_mode mode, const char *cmd);
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
new file mode 100644
index 0000000000..422c3afa80
--- /dev/null
+++ b/arch/arm/include/asm/vdso.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
+#ifdef CONFIG_VDSO
+
+void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
+
+extern unsigned int vdso_total_pages;
+
+#else /* CONFIG_VDSO */
+
+static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
+{
+}
+
+#define vdso_total_pages 0
+
+#endif /* CONFIG_VDSO */
+
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_getres(clockid_t clock_id, struct old_timespec32 *res);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/arm/include/asm/vdso/clocksource.h b/arch/arm/include/asm/vdso/clocksource.h
new file mode 100644
index 0000000000..50c0b19fb7
--- /dev/null
+++ b/arch/arm/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/vdso/cp15.h b/arch/arm/include/asm/vdso/cp15.h
new file mode 100644
index 0000000000..bed16fa186
--- /dev/null
+++ b/arch/arm/include/asm/vdso/cp15.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_CP15_H
+#define __ASM_VDSO_CP15_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_CP15
+
+#include <linux/stringify.h>
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
+#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
+#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
+#endif /* CONFIG_CPU_CP15 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CP15_H */
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
new file mode 100644
index 0000000000..2134cbd546
--- /dev/null
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 ARM Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/errno.h>
+#include <asm/unistd.h>
+#include <asm/vdso/cp15.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+extern struct vdso_data *__get_datapage(void);
+
+static __always_inline int gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("r1") = _tz;
+ register struct __kernel_old_timeval *tv asm("r0") = _tv;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_gettimeofday;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres_time64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static inline bool arm_vdso_hres_capable(void)
+{
+ return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
+}
+#define __arch_vdso_hres_capable arm_vdso_hres_capable
+
+static __always_inline u64 __arch_get_hw_counter(int clock_mode,
+ const struct vdso_data *vd)
+{
+#ifdef CONFIG_ARM_ARCH_TIMER
+ u64 cycle_now;
+
+ /*
+ * Core checks for mode already, so this raced against a concurrent
+ * update. Return something. Core will do another round and then
+ * see the mode change and fallback to the syscall.
+ */
+ if (clock_mode == VDSO_CLOCKMODE_NONE)
+ return 0;
+
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
+
+ return cycle_now;
+#else
+ /* Make GCC happy. This is compiled out anyway */
+ return 0;
+#endif
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return __get_datapage();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm/include/asm/vdso/processor.h b/arch/arm/include/asm/vdso/processor.h
new file mode 100644
index 0000000000..45efb3ff51
--- /dev/null
+++ b/arch/arm/include/asm/vdso/processor.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
+#define cpu_relax() \
+ do { \
+ smp_mb(); \
+ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
+ } while (0)
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
new file mode 100644
index 0000000000..47e41ae8cc
--- /dev/null
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+#include <asm/cacheflush.h>
+
+extern struct vdso_data *vdso_data;
+extern bool cntvct_ok;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__arm_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __arm_get_k_vdso_data
+
+static __always_inline
+void __arm_sync_vdso_data(struct vdso_data *vdata)
+{
+ flush_dcache_page(virt_to_page(vdata));
+}
+#define __arch_sync_vdso_data __arm_sync_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
new file mode 100644
index 0000000000..bef68f5992
--- /dev/null
+++ b/arch/arm/include/asm/vdso_datapage.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Adapted from arm64 version.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+#ifndef __ASM_VDSO_DATAPAGE_H
+#define __ASM_VDSO_DATAPAGE_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+#include <asm/page.h>
+
+union vdso_data_store {
+ struct vdso_data data[CS_BASES];
+ u8 page[PAGE_SIZE];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm/include/asm/vermagic.h b/arch/arm/include/asm/vermagic.h
new file mode 100644
index 0000000000..62ce94e26a
--- /dev/null
+++ b/arch/arm/include/asm/vermagic.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+
+/*
+ * Add the ARM architecture version to the version magic string
+ */
+#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+
+/* Add __virt_to_phys patching state as well */
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+#else
+#define MODULE_ARCH_VERMAGIC_P2V ""
+#endif
+
+/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
+#ifdef CONFIG_THUMB2_KERNEL
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
+#else
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_ARCH_VERMAGIC_ARMVSN \
+ MODULE_ARCH_VERMAGIC_ARMTHUMB \
+ MODULE_ARCH_VERMAGIC_P2V
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
new file mode 100644
index 0000000000..157ea34261
--- /dev/null
+++ b/arch/arm/include/asm/vfp.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/vfp.h
+ *
+ * VFP register definitions.
+ * First, the standard VFP set.
+ */
+
+#ifndef __ASM_VFP_H
+#define __ASM_VFP_H
+
+#ifndef CONFIG_AS_VFP_VMRS_FPINST
+#define FPSID cr0
+#define FPSCR cr1
+#define MVFR1 cr6
+#define MVFR0 cr7
+#define FPEXC cr8
+#define FPINST cr9
+#define FPINST2 cr10
+#endif
+
+/* FPSID bits */
+#define FPSID_IMPLEMENTER_BIT (24)
+#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT)
+#define FPSID_SOFTWARE (1<<23)
+#define FPSID_FORMAT_BIT (21)
+#define FPSID_FORMAT_MASK (0x3 << FPSID_FORMAT_BIT)
+#define FPSID_NODOUBLE (1<<20)
+#define FPSID_ARCH_BIT (16)
+#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT)
+#define FPSID_CPUID_ARCH_MASK (0x7F << FPSID_ARCH_BIT)
+#define FPSID_PART_BIT (8)
+#define FPSID_PART_MASK (0xFF << FPSID_PART_BIT)
+#define FPSID_VARIANT_BIT (4)
+#define FPSID_VARIANT_MASK (0xF << FPSID_VARIANT_BIT)
+#define FPSID_REV_BIT (0)
+#define FPSID_REV_MASK (0xF << FPSID_REV_BIT)
+
+/* FPEXC bits */
+#define FPEXC_EX (1 << 31)
+#define FPEXC_EN (1 << 30)
+#define FPEXC_DEX (1 << 29)
+#define FPEXC_FP2V (1 << 28)
+#define FPEXC_VV (1 << 27)
+#define FPEXC_TFV (1 << 26)
+#define FPEXC_LENGTH_BIT (8)
+#define FPEXC_LENGTH_MASK (7 << FPEXC_LENGTH_BIT)
+#define FPEXC_IDF (1 << 7)
+#define FPEXC_IXF (1 << 4)
+#define FPEXC_UFF (1 << 3)
+#define FPEXC_OFF (1 << 2)
+#define FPEXC_DZF (1 << 1)
+#define FPEXC_IOF (1 << 0)
+#define FPEXC_TRAP_MASK (FPEXC_IDF|FPEXC_IXF|FPEXC_UFF|FPEXC_OFF|FPEXC_DZF|FPEXC_IOF)
+
+/* FPSCR bits */
+#define FPSCR_DEFAULT_NAN (1<<25)
+#define FPSCR_FLUSHTOZERO (1<<24)
+#define FPSCR_ROUND_NEAREST (0<<22)
+#define FPSCR_ROUND_PLUSINF (1<<22)
+#define FPSCR_ROUND_MINUSINF (2<<22)
+#define FPSCR_ROUND_TOZERO (3<<22)
+#define FPSCR_RMODE_BIT (22)
+#define FPSCR_RMODE_MASK (3 << FPSCR_RMODE_BIT)
+#define FPSCR_STRIDE_BIT (20)
+#define FPSCR_STRIDE_MASK (3 << FPSCR_STRIDE_BIT)
+#define FPSCR_LENGTH_BIT (16)
+#define FPSCR_LENGTH_MASK (7 << FPSCR_LENGTH_BIT)
+#define FPSCR_IOE (1<<8)
+#define FPSCR_DZE (1<<9)
+#define FPSCR_OFE (1<<10)
+#define FPSCR_UFE (1<<11)
+#define FPSCR_IXE (1<<12)
+#define FPSCR_IDE (1<<15)
+#define FPSCR_IOC (1<<0)
+#define FPSCR_DZC (1<<1)
+#define FPSCR_OFC (1<<2)
+#define FPSCR_UFC (1<<3)
+#define FPSCR_IXC (1<<4)
+#define FPSCR_IDC (1<<7)
+
+/* MVFR0 bits */
+#define MVFR0_A_SIMD_BIT (0)
+#define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT)
+#define MVFR0_SP_BIT (4)
+#define MVFR0_SP_MASK (0xf << MVFR0_SP_BIT)
+#define MVFR0_DP_BIT (8)
+#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
+
+/* MVFR1 bits */
+#define MVFR1_ASIMDHP_BIT (20)
+#define MVFR1_ASIMDHP_MASK (0xf << MVFR1_ASIMDHP_BIT)
+#define MVFR1_FPHP_BIT (24)
+#define MVFR1_FPHP_MASK (0xf << MVFR1_FPHP_BIT)
+
+/* Bit patterns for decoding the packaged operation descriptors */
+#define VFPOPDESC_LENGTH_BIT (9)
+#define VFPOPDESC_LENGTH_MASK (0x07 << VFPOPDESC_LENGTH_BIT)
+#define VFPOPDESC_UNUSED_BIT (24)
+#define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT)
+#define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK))
+
+#ifndef __ASSEMBLY__
+void vfp_disable(void);
+#endif
+
+#endif /* __ASM_VFP_H */
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
new file mode 100644
index 0000000000..ba0d4cb537
--- /dev/null
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/vfpmacros.h
+ *
+ * Assembler-only file containing VFP macros and register definitions.
+ */
+#include <asm/hwcap.h>
+
+#include <asm/vfp.h>
+
+#ifdef CONFIG_AS_VFP_VMRS_FPINST
+ .macro VFPFMRX, rd, sysreg, cond
+ vmrs\cond \rd, \sysreg
+ .endm
+
+ .macro VFPFMXR, sysreg, rd, cond
+ vmsr\cond \sysreg, \rd
+ .endm
+#else
+ @ Macros to allow building with old toolkits (with no VFP support)
+ .macro VFPFMRX, rd, sysreg, cond
+ MRC\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMRX \rd, \sysreg
+ .endm
+
+ .macro VFPFMXR, sysreg, rd, cond
+ MCR\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMXR \sysreg, \rd
+ .endm
+#endif
+
+ @ read all the working registers back into the VFP
+ .macro VFPFLDMIA, base, tmp
+ .fpu vfpv2
+#if __LINUX_ARM_ARCH__ < 6
+ fldmiax \base!, {d0-d15}
+#else
+ vldmia \base!, {d0-d15}
+#endif
+#ifdef CONFIG_VFPv3
+ .fpu vfpv3
+#if __LINUX_ARM_ARCH__ <= 6
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPD32
+ vldmiane \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
+#else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+ and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
+ cmp \tmp, #2 @ 32 x 64bit registers?
+ vldmiaeq \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+#endif
+#endif
+ .endm
+
+ @ write all the working registers out of the VFP
+ .macro VFPFSTMIA, base, tmp
+#if __LINUX_ARM_ARCH__ < 6
+ fstmiax \base!, {d0-d15}
+#else
+ vstmia \base!, {d0-d15}
+#endif
+#ifdef CONFIG_VFPv3
+ .fpu vfpv3
+#if __LINUX_ARM_ARCH__ <= 6
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPD32
+ vstmiane \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
+#else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+ and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
+ cmp \tmp, #2 @ 32 x 64bit registers?
+ vstmiaeq \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+#endif
+#endif
+ .endm
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
new file mode 100644
index 0000000000..7c0bee5785
--- /dev/null
+++ b/arch/arm/include/asm/vga.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASMARM_VGA_H
+#define ASMARM_VGA_H
+
+#include <linux/io.h>
+
+extern unsigned long vga_base;
+
+#define VGA_MAP_MEM(x,s) (vga_base + (x))
+
+#define vga_readb(x) (*((volatile unsigned char *)x))
+#define vga_writeb(x,y) (*((volatile unsigned char *)y) = (x))
+
+#endif
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
new file mode 100644
index 0000000000..dd9697b2bd
--- /dev/null
+++ b/arch/arm/include/asm/virt.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2012 Linaro Limited.
+ */
+
+#ifndef VIRT_H
+#define VIRT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Flag indicating that the kernel was not entered in the same mode on every
+ * CPU. The zImage loader stashes this value in an SPSR, so we need an
+ * architecturally defined flag bit here.
+ */
+#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
+
+#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_ARM_VIRT_EXT
+/*
+ * __boot_cpu_mode records what mode the primary CPU was booted in.
+ * A correctly-implemented bootloader must start all CPUs in the same mode:
+ * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate
+ * that some CPU(s) were booted in a different mode.
+ *
+ * This allows the kernel to flag an error when the secondaries have come up.
+ */
+extern int __boot_cpu_mode;
+
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ sync_cache_r(&__boot_cpu_mode);
+}
+
+#else
+#define __boot_cpu_mode (SVC_MODE)
+#define sync_boot_mode()
+#endif
+
+#ifndef ZIMAGE
+void hyp_mode_check(void);
+
+/* Reports the availability of HYP mode */
+static inline bool is_hyp_mode_available(void)
+{
+ return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE &&
+ !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH));
+}
+
+/* Check if the bootloader has booted CPUs in different modes */
+static inline bool is_hyp_mode_mismatched(void)
+{
+ return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
+}
+
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ return false;
+}
+
+#endif
+
+#else
+
+/* Only assembly code should need those */
+
+#define HVC_SET_VECTORS 0
+#define HVC_SOFT_RESTART 1
+
+#endif /* __ASSEMBLY__ */
+
+#define HVC_STUB_ERR 0xbadca11
+
+#endif /* ! VIRT_H */
diff --git a/arch/arm/include/asm/vmalloc.h b/arch/arm/include/asm/vmalloc.h
new file mode 100644
index 0000000000..a9b3718b86
--- /dev/null
+++ b/arch/arm/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARM_VMALLOC_H
+#define _ASM_ARM_VMALLOC_H
+
+#endif /* _ASM_ARM_VMALLOC_H */
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
new file mode 100644
index 0000000000..4c8632d5c4
--- /dev/null
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/vmlinux.lds.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x) x
+#else
+#define ARM_CPU_DISCARD(x) x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
+#define ARM_EXIT_KEEP(x) x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x) x
+#endif
+
+#ifdef CONFIG_MMU
+#define ARM_MMU_KEEP(x) x
+#define ARM_MMU_DISCARD(x)
+#else
+#define ARM_MMU_KEEP(x)
+#define ARM_MMU_DISCARD(x) x
+#endif
+
+/*
+ * ld.lld does not support NOCROSSREFS:
+ * https://github.com/ClangBuiltLinux/linux/issues/1609
+ */
+#ifdef CONFIG_LD_IS_LLD
+#define NOCROSSREFS
+#endif
+
+/* Set start/end symbol names to the LMA for the section */
+#define ARM_LMA(sym, section) \
+ sym##_start = LOADADDR(section); \
+ sym##_end = LOADADDR(section) + SIZEOF(section)
+
+#define PROC_INFO \
+ . = ALIGN(4); \
+ __proc_info_begin = .; \
+ *(.proc.info.init) \
+ __proc_info_end = .;
+
+#define IDMAP_TEXT \
+ ALIGN_FUNCTION(); \
+ __idmap_text_start = .; \
+ *(.idmap.text) \
+ __idmap_text_end = .; \
+
+#define ARM_DISCARD \
+ *(.ARM.exidx.exit.text) \
+ *(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
+ ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
+ ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
+ ARM_EXIT_DISCARD(EXIT_TEXT) \
+ ARM_EXIT_DISCARD(EXIT_DATA) \
+ EXIT_CALL \
+ ARM_MMU_DISCARD(*(.text.fixup)) \
+ ARM_MMU_DISCARD(*(__ex_table)) \
+ COMMON_DISCARDS
+
+/*
+ * Sections that should stay zero sized, which is safer to explicitly
+ * check instead of blindly discarding.
+ */
+#define ARM_ASSERTS \
+ .plt : { \
+ *(.iplt) *(.rel.iplt) *(.iplt) *(.igot.plt) \
+ } \
+ ASSERT(SIZEOF(.plt) == 0, \
+ "Unexpected run-time procedure linkages detected!")
+
+#define ARM_DETAILS \
+ ELF_DETAILS \
+ .ARM.attributes 0 : { *(.ARM.attributes) }
+
+#define ARM_STUBS_TEXT \
+ *(.gnu.warning) \
+ *(.glue_7) \
+ *(.glue_7t) \
+ *(.vfp11_veneer) \
+ *(.v4_bx)
+
+#define ARM_TEXT \
+ IDMAP_TEXT \
+ __entry_text_start = .; \
+ *(.entry.text) \
+ __entry_text_end = .; \
+ IRQENTRY_TEXT \
+ SOFTIRQENTRY_TEXT \
+ TEXT_TEXT \
+ SCHED_TEXT \
+ LOCK_TEXT \
+ KPROBES_TEXT \
+ ARM_STUBS_TEXT \
+ . = ALIGN(4); \
+ *(.got) /* Global offset table */ \
+ ARM_CPU_KEEP(PROC_INFO)
+
+/* Stack unwinding tables */
+#define ARM_UNWIND_SECTIONS \
+ . = ALIGN(8); \
+ .ARM.unwind_idx : { \
+ __start_unwind_idx = .; \
+ *(.ARM.exidx*) \
+ __stop_unwind_idx = .; \
+ } \
+ .ARM.unwind_tab : { \
+ __start_unwind_tab = .; \
+ *(.ARM.extab*) \
+ __stop_unwind_tab = .; \
+ }
+
+/*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+#define ARM_VECTORS \
+ __vectors_lma = .; \
+ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
+ .vectors { \
+ *(.vectors) \
+ } \
+ .vectors.bhb.loop8 { \
+ *(.vectors.bhb.loop8) \
+ } \
+ .vectors.bhb.bpiall { \
+ *(.vectors.bhb.bpiall) \
+ } \
+ } \
+ ARM_LMA(__vectors, .vectors); \
+ ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
+ ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
+ . = __vectors_lma + SIZEOF(.vectors) + \
+ SIZEOF(.vectors.bhb.loop8) + \
+ SIZEOF(.vectors.bhb.bpiall); \
+ \
+ __stubs_lma = .; \
+ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
+ *(.stubs) \
+ } \
+ ARM_LMA(__stubs, .stubs); \
+ . = __stubs_lma + SIZEOF(.stubs); \
+ \
+ PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+
+#define ARM_TCM \
+ __itcm_start = ALIGN(4); \
+ .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \
+ __sitcm_text = .; \
+ *(.tcm.text) \
+ *(.tcm.rodata) \
+ . = ALIGN(4); \
+ __eitcm_text = .; \
+ } \
+ . = __itcm_start + SIZEOF(.text_itcm); \
+ \
+ __dtcm_start = .; \
+ .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \
+ __sdtcm_data = .; \
+ *(.tcm.data) \
+ . = ALIGN(4); \
+ __edtcm_data = .; \
+ } \
+ . = __dtcm_start + SIZEOF(.data_dtcm);
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
new file mode 100644
index 0000000000..352ab21352
--- /dev/null
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_WORD_AT_A_TIME_H
+#define __ASM_ARM_WORD_AT_A_TIME_H
+
+#ifndef __ARMEB__
+
+/*
+ * Little-endian word-at-a-time zero byte handling.
+ * Heavily based on the x86 algorithm.
+ */
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ unsigned long ret;
+
+#if __LINUX_ARM_ARCH__ >= 5
+ /* We have clz available. */
+ ret = fls(mask) >> 3;
+#else
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ ret = (0x0ff0001 + mask) >> 23;
+ /* Fix the 1 for 00 case */
+ ret &= mask;
+#endif
+
+ return ret;
+}
+
+#define zero_bytemask(mask) (mask)
+
+#else /* __ARMEB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: ldr %0, [%2]\n"
+ "2:\n"
+ " .pushsection .text.fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: and %1, %2, #0x3\n"
+ " bic %2, %2, #0x3\n"
+ " ldr %0, [%2]\n"
+ " lsl %1, %1, #0x3\n"
+#ifndef __ARMEB__
+ " lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
+ " b 2b\n"
+ " .popsection\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .long 1b, 3b\n"
+ " .popsection"
+ : "=&r" (ret), "=&r" (offset)
+ : "r" (addr), "Qo" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* DCACHE_WORD_ACCESS */
+#endif /* __ASM_ARM_WORD_AT_A_TIME_H */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
new file mode 100644
index 0000000000..c83086f745
--- /dev/null
+++ b/arch/arm/include/asm/xen/events.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_XEN_EVENTS_H
+#define _ASM_ARM_XEN_EVENTS_H
+
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+
+enum ipi_vector {
+ XEN_PLACEHOLDER_VECTOR,
+
+ /* Xen IPIs go here */
+ XEN_NR_IPIS,
+};
+
+static inline int xen_irqs_disabled(struct pt_regs *regs)
+{
+ return raw_irqs_disabled_flags(regs->ARM_cpsr);
+}
+
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
+ atomic64_t, \
+ counter), (val))
+
+/* Rebind event channel is supported by default */
+static inline bool xen_support_evtchn_rebind(void)
+{
+ return true;
+}
+
+#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
new file mode 100644
index 0000000000..3522cbaed3
--- /dev/null
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -0,0 +1 @@
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
new file mode 100644
index 0000000000..d6e7709d06
--- /dev/null
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -0,0 +1 @@
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
new file mode 100644
index 0000000000..88c0d75da1
--- /dev/null
+++ b/arch/arm/include/asm/xen/interface.h
@@ -0,0 +1 @@
+#include <xen/arm/interface.h>
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
new file mode 100644
index 0000000000..dc7f6e91aa
--- /dev/null
+++ b/arch/arm/include/asm/xen/page.h
@@ -0,0 +1,6 @@
+#include <xen/arm/page.h>
+
+static inline bool xen_kernel_unmapped_at_usr(void)
+{
+ return false;
+}
diff --git a/arch/arm/include/asm/xen/swiotlb-xen.h b/arch/arm/include/asm/xen/swiotlb-xen.h
new file mode 100644
index 0000000000..455ade5d53
--- /dev/null
+++ b/arch/arm/include/asm/xen/swiotlb-xen.h
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
new file mode 100644
index 0000000000..7ebb7eb0bd
--- /dev/null
+++ b/arch/arm/include/asm/xen/xen-ops.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <xen/arm/xen-ops.h>
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
new file mode 100644
index 0000000000..934b549905
--- /dev/null
+++ b/arch/arm/include/asm/xor.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/xor.h
+ *
+ * Copyright (C) 2001 Russell King
+ */
+#include <linux/hardirq.h>
+#include <asm-generic/xor.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+
+#define __XOR(a1, a2) a1 ^= a2
+
+#define GET_BLOCK_2(dst) \
+ __asm__("ldmia %0, {%1, %2}" \
+ : "=r" (dst), "=r" (a1), "=r" (a2) \
+ : "0" (dst))
+
+#define GET_BLOCK_4(dst) \
+ __asm__("ldmia %0, {%1, %2, %3, %4}" \
+ : "=r" (dst), "=r" (a1), "=r" (a2), "=r" (a3), "=r" (a4) \
+ : "0" (dst))
+
+#define XOR_BLOCK_2(src) \
+ __asm__("ldmia %0!, {%1, %2}" \
+ : "=r" (src), "=r" (b1), "=r" (b2) \
+ : "0" (src)); \
+ __XOR(a1, b1); __XOR(a2, b2);
+
+#define XOR_BLOCK_4(src) \
+ __asm__("ldmia %0!, {%1, %2, %3, %4}" \
+ : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \
+ : "0" (src)); \
+ __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4)
+
+#define PUT_BLOCK_2(dst) \
+ __asm__ __volatile__("stmia %0!, {%2, %3}" \
+ : "=r" (dst) \
+ : "0" (dst), "r" (a1), "r" (a2))
+
+#define PUT_BLOCK_4(dst) \
+ __asm__ __volatile__("stmia %0!, {%2, %3, %4, %5}" \
+ : "=r" (dst) \
+ : "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
+
+static void
+xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 4;
+ register unsigned int a1 __asm__("r4");
+ register unsigned int a2 __asm__("r5");
+ register unsigned int a3 __asm__("r6");
+ register unsigned int a4 __asm__("r10");
+ register unsigned int b1 __asm__("r8");
+ register unsigned int b2 __asm__("r9");
+ register unsigned int b3 __asm__("ip");
+ register unsigned int b4 __asm__("lr");
+
+ do {
+ GET_BLOCK_4(p1);
+ XOR_BLOCK_4(p2);
+ PUT_BLOCK_4(p1);
+ } while (--lines);
+}
+
+static void
+xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 4;
+ register unsigned int a1 __asm__("r4");
+ register unsigned int a2 __asm__("r5");
+ register unsigned int a3 __asm__("r6");
+ register unsigned int a4 __asm__("r10");
+ register unsigned int b1 __asm__("r8");
+ register unsigned int b2 __asm__("r9");
+ register unsigned int b3 __asm__("ip");
+ register unsigned int b4 __asm__("lr");
+
+ do {
+ GET_BLOCK_4(p1);
+ XOR_BLOCK_4(p2);
+ XOR_BLOCK_4(p3);
+ PUT_BLOCK_4(p1);
+ } while (--lines);
+}
+
+static void
+xor_arm4regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 2;
+ register unsigned int a1 __asm__("r8");
+ register unsigned int a2 __asm__("r9");
+ register unsigned int b1 __asm__("ip");
+ register unsigned int b2 __asm__("lr");
+
+ do {
+ GET_BLOCK_2(p1);
+ XOR_BLOCK_2(p2);
+ XOR_BLOCK_2(p3);
+ XOR_BLOCK_2(p4);
+ PUT_BLOCK_2(p1);
+ } while (--lines);
+}
+
+static void
+xor_arm4regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 2;
+ register unsigned int a1 __asm__("r8");
+ register unsigned int a2 __asm__("r9");
+ register unsigned int b1 __asm__("ip");
+ register unsigned int b2 __asm__("lr");
+
+ do {
+ GET_BLOCK_2(p1);
+ XOR_BLOCK_2(p2);
+ XOR_BLOCK_2(p3);
+ XOR_BLOCK_2(p4);
+ XOR_BLOCK_2(p5);
+ PUT_BLOCK_2(p1);
+ } while (--lines);
+}
+
+static struct xor_block_template xor_block_arm4regs = {
+ .name = "arm4regs",
+ .do_2 = xor_arm4regs_2,
+ .do_3 = xor_arm4regs_3,
+ .do_4 = xor_arm4regs_4,
+ .do_5 = xor_arm4regs_5,
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_arm4regs); \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ NEON_TEMPLATES; \
+ } while (0)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+extern struct xor_block_template const xor_block_neon_inner;
+
+static void
+xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_2(bytes, p1, p2);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_2(bytes, p1, p2);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_3(bytes, p1, p2, p3);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_3(bytes, p1, p2, p3);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_4(bytes, p1, p2, p3, p4);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
+ kernel_neon_end();
+ }
+}
+
+static struct xor_block_template xor_block_neon = {
+ .name = "neon",
+ .do_2 = xor_neon_2,
+ .do_3 = xor_neon_3,
+ .do_4 = xor_neon_4,
+ .do_5 = xor_neon_5
+};
+
+#define NEON_TEMPLATES \
+ do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
+#else
+#define NEON_TEMPLATES
+#endif