summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/arm64/kernel/.gitignore2
-rw-r--r--arch/arm64/kernel/Makefile71
-rw-r--r--arch/arm64/kernel/acpi.c408
-rw-r--r--arch/arm64/kernel/acpi_numa.c132
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c131
-rw-r--r--arch/arm64/kernel/alternative.c263
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c638
-rw-r--r--arch/arm64/kernel/asm-offsets.c148
-rw-r--r--arch/arm64/kernel/cacheinfo.c103
-rw-r--r--arch/arm64/kernel/cpu-reset.S56
-rw-r--r--arch/arm64/kernel/cpu-reset.h32
-rw-r--r--arch/arm64/kernel/cpu_errata.c576
-rw-r--r--arch/arm64/kernel/cpu_ops.c118
-rw-r--r--arch/arm64/kernel/cpufeature.c2881
-rw-r--r--arch/arm64/kernel/cpuidle.c105
-rw-r--r--arch/arm64/kernel/cpuinfo.c425
-rw-r--r--arch/arm64/kernel/crash_core.c33
-rw-r--r--arch/arm64/kernel/crash_dump.c70
-rw-r--r--arch/arm64/kernel/debug-monitors.c461
-rw-r--r--arch/arm64/kernel/efi-entry.S68
-rw-r--r--arch/arm64/kernel/efi-header.S152
-rw-r--r--arch/arm64/kernel/efi-rt-wrapper.S59
-rw-r--r--arch/arm64/kernel/efi.c172
-rw-r--r--arch/arm64/kernel/entry-common.c485
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S74
-rw-r--r--arch/arm64/kernel/entry-ftrace.S351
-rw-r--r--arch/arm64/kernel/entry.S1240
-rw-r--r--arch/arm64/kernel/fpsimd.c1449
-rw-r--r--arch/arm64/kernel/ftrace.c315
-rw-r--r--arch/arm64/kernel/head.S1005
-rw-r--r--arch/arm64/kernel/hibernate-asm.S166
-rw-r--r--arch/arm64/kernel/hibernate.c724
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c1031
-rw-r--r--arch/arm64/kernel/hyp-stub.S120
-rw-r--r--arch/arm64/kernel/image-vars.h119
-rw-r--r--arch/arm64/kernel/image.h67
-rw-r--r--arch/arm64/kernel/insn.c1699
-rw-r--r--arch/arm64/kernel/io.c97
-rw-r--r--arch/arm64/kernel/irq.c69
-rw-r--r--arch/arm64/kernel/jump_label.c38
-rw-r--r--arch/arm64/kernel/kaslr.c218
-rw-r--r--arch/arm64/kernel/kexec_image.c147
-rw-r--r--arch/arm64/kernel/kgdb.c357
-rw-r--r--arch/arm64/kernel/kuser32.S64
-rw-r--r--arch/arm64/kernel/machine_kexec.c353
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c348
-rw-r--r--arch/arm64/kernel/module-plts.c378
-rw-r--r--arch/arm64/kernel/module.c528
-rw-r--r--arch/arm64/kernel/mte.c341
-rw-r--r--arch/arm64/kernel/paravirt.c175
-rw-r--r--arch/arm64/kernel/pci.c220
-rw-r--r--arch/arm64/kernel/perf_callchain.c192
-rw-r--r--arch/arm64/kernel/perf_event.c1311
-rw-r--r--arch/arm64/kernel/perf_regs.c80
-rw-r--r--arch/arm64/kernel/pointer_auth.c48
-rw-r--r--arch/arm64/kernel/probes/Makefile6
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c168
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h33
-rw-r--r--arch/arm64/kernel/probes/kprobes.c471
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S82
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c201
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.h20
-rw-r--r--arch/arm64/kernel/probes/uprobes.c208
-rw-r--r--arch/arm64/kernel/process.c741
-rw-r--r--arch/arm64/kernel/proton-pack.c1131
-rw-r--r--arch/arm64/kernel/psci.c123
-rw-r--r--arch/arm64/kernel/ptrace.c1913
-rw-r--r--arch/arm64/kernel/reloc_test_core.c77
-rw-r--r--arch/arm64/kernel/reloc_test_syms.S85
-rw-r--r--arch/arm64/kernel/relocate_kernel.S115
-rw-r--r--arch/arm64/kernel/return_address.c54
-rw-r--r--arch/arm64/kernel/scs.c16
-rw-r--r--arch/arm64/kernel/sdei.c242
-rw-r--r--arch/arm64/kernel/setup.c440
-rw-r--r--arch/arm64/kernel/signal.c981
-rw-r--r--arch/arm64/kernel/signal32.c459
-rw-r--r--arch/arm64/kernel/sigreturn32.S46
-rw-r--r--arch/arm64/kernel/sleep.S149
-rw-r--r--arch/arm64/kernel/smccc-call.S45
-rw-r--r--arch/arm64/kernel/smp.c1131
-rw-r--r--arch/arm64/kernel/smp_spin_table.c125
-rw-r--r--arch/arm64/kernel/stacktrace.c221
-rw-r--r--arch/arm64/kernel/suspend.c149
-rw-r--r--arch/arm64/kernel/sys.c61
-rw-r--r--arch/arm64/kernel/sys32.c135
-rw-r--r--arch/arm64/kernel/sys_compat.c120
-rw-r--r--arch/arm64/kernel/syscall.c203
-rw-r--r--arch/arm64/kernel/time.c71
-rw-r--r--arch/arm64/kernel/topology.c272
-rw-r--r--arch/arm64/kernel/trace-events-emulation.h36
-rw-r--r--arch/arm64/kernel/traps.c997
-rw-r--r--arch/arm64/kernel/vdso.c500
-rw-r--r--arch/arm64/kernel/vdso/.gitignore2
-rw-r--r--arch/arm64/kernel/vdso/Makefile84
-rwxr-xr-xarch/arm64/kernel/vdso/gen_vdso_offsets.sh16
-rw-r--r--arch/arm64/kernel/vdso/note.S23
-rw-r--r--arch/arm64/kernel/vdso/sigreturn.S80
-rw-r--r--arch/arm64/kernel/vdso/vdso.S24
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S96
-rw-r--r--arch/arm64/kernel/vdso/vgettimeofday.c25
-rw-r--r--arch/arm64/kernel/vdso32/.gitignore3
-rw-r--r--arch/arm64/kernel/vdso32/Makefile205
-rw-r--r--arch/arm64/kernel/vdso32/note.c15
-rw-r--r--arch/arm64/kernel/vdso32/vdso.S19
-rw-r--r--arch/arm64/kernel/vdso32/vdso.lds.S73
-rw-r--r--arch/arm64/kernel/vdso32/vgettimeofday.c45
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S308
107 files changed, 33758 insertions, 0 deletions
diff --git a/arch/arm64/kernel/.gitignore b/arch/arm64/kernel/.gitignore
new file mode 100644
index 000000000..bbb90f92d
--- /dev/null
+++ b/arch/arm64/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
new file mode 100644
index 000000000..bbaf0bc4a
--- /dev/null
+++ b/arch/arm64/kernel/Makefile
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+
+CFLAGS_armv8_deprecated.o := -I$(src)
+
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
+
+# Object file lists.
+obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
+ entry-common.o entry-fpsimd.o process.o ptrace.o \
+ setup.o signal.o sys.o stacktrace.o time.o traps.o \
+ io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \
+ return_address.o cpuinfo.o cpu_errata.o \
+ cpufeature.o alternative.o cacheinfo.o \
+ smp.o smp_spin_table.o topology.o smccc-call.o \
+ syscall.o proton-pack.o
+
+targets += efi-entry.o
+
+OBJCOPYFLAGS := --prefix-symbols=__efistub_
+$(obj)/%.stub.o: $(obj)/%.o FORCE
+ $(call if_changed,objcopy)
+
+obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
+ sys_compat.o
+obj-$(CONFIG_COMPAT) += sigreturn32.o
+obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
+obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \
+ efi-rt-wrapper.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
+obj-$(CONFIG_ACPI) += acpi.o
+obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
+obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o
+obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
+ cpu-reset.o
+obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
+obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
+arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_CRASH_CORE) += crash_core.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
+obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
+obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
+obj-$(CONFIG_ARM64_MTE) += mte.o
+
+obj-y += vdso/ probes/
+obj-$(CONFIG_COMPAT_VDSO) += vdso32/
+head-y := head.o
+extra-y += $(head-y) vmlinux.lds
+
+ifeq ($(CONFIG_DEBUG_EFI),y)
+AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
+endif
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
new file mode 100644
index 000000000..cada0b816
--- /dev/null
+++ b/arch/arm64/kernel/acpi.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM64 Specific Low-Level ACPI Boot Support
+ *
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ * Author: Naresh Bhat <naresh.bhat@linaro.org>
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/acpi.h>
+#include <linux/cpumask.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irq_work.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/smp.h>
+#include <linux/serial_core.h>
+#include <linux/pgtable.h>
+
+#include <acpi/ghes.h>
+#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
+#include <asm/smp_plat.h>
+
+int acpi_noirq = 1; /* skip ACPI IRQ initialization */
+int acpi_disabled = 1;
+EXPORT_SYMBOL(acpi_disabled);
+
+int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
+EXPORT_SYMBOL(acpi_pci_disabled);
+
+static bool param_acpi_off __initdata;
+static bool param_acpi_on __initdata;
+static bool param_acpi_force __initdata;
+
+static int __init parse_acpi(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /* "acpi=off" disables both ACPI table parsing and interpreter */
+ if (strcmp(arg, "off") == 0)
+ param_acpi_off = true;
+ else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */
+ param_acpi_on = true;
+ else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
+ param_acpi_force = true;
+ else
+ return -EINVAL; /* Core will print when we return error */
+
+ return 0;
+}
+early_param("acpi", parse_acpi);
+
+static int __init dt_scan_depth1_nodes(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ /*
+ * Ignore anything not directly under the root node; we'll
+ * catch its parent instead.
+ */
+ if (depth != 1)
+ return 0;
+
+ if (strcmp(uname, "chosen") == 0)
+ return 0;
+
+ if (strcmp(uname, "hypervisor") == 0 &&
+ of_flat_dt_is_compatible(node, "xen,xen"))
+ return 0;
+
+ /*
+ * This node at depth 1 is neither a chosen node nor a xen node,
+ * which we do not expect.
+ */
+ return 1;
+}
+
+/*
+ * __acpi_map_table() will be called before page_init(), so early_ioremap()
+ * or early_memremap() should be called here to for ACPI table mapping.
+ */
+void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
+{
+ if (!size)
+ return NULL;
+
+ return early_memremap(phys, size);
+}
+
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
+{
+ if (!map || !size)
+ return;
+
+ early_memunmap(map, size);
+}
+
+bool __init acpi_psci_present(void)
+{
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
+}
+
+/* Whether HVC must be used instead of SMC as the PSCI conduit */
+bool acpi_psci_use_hvc(void)
+{
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
+}
+
+/*
+ * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
+ * checks on it
+ *
+ * Return 0 on success, <0 on failure
+ */
+static int __init acpi_fadt_sanity_check(void)
+{
+ struct acpi_table_header *table;
+ struct acpi_table_fadt *fadt;
+ acpi_status status;
+ int ret = 0;
+
+ /*
+ * FADT is required on arm64; retrieve it to check its presence
+ * and carry out revision and ACPI HW reduced compliancy tests
+ */
+ status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get FADT table, %s\n", msg);
+ return -ENODEV;
+ }
+
+ fadt = (struct acpi_table_fadt *)table;
+
+ /*
+ * Revision in table header is the FADT Major revision, and there
+ * is a minor revision of FADT which was introduced by ACPI 5.1,
+ * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
+ * boot protocol configuration data.
+ */
+ if (table->revision < 5 ||
+ (table->revision == 5 && fadt->minor_revision < 1)) {
+ pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
+ table->revision, fadt->minor_revision);
+
+ if (!fadt->arm_boot_flags) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_err("FADT has ARM boot flags set, assuming 5.1\n");
+ }
+
+ if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
+ pr_err("FADT not ACPI hardware reduced compliant\n");
+ ret = -EINVAL;
+ }
+
+out:
+ /*
+ * acpi_get_table() creates FADT table mapping that
+ * should be released after parsing and before resuming boot
+ */
+ acpi_put_table(table);
+ return ret;
+}
+
+/*
+ * acpi_boot_table_init() called from setup_arch(), always.
+ * 1. find RSDP and get its address, and then find XSDT
+ * 2. extract all tables and checksums them all
+ * 3. check ACPI FADT revision
+ * 4. check ACPI FADT HW reduced flag
+ *
+ * We can parse ACPI boot-time tables such as MADT after
+ * this function is called.
+ *
+ * On return ACPI is enabled if either:
+ *
+ * - ACPI tables are initialized and sanity checks passed
+ * - acpi=force was passed in the command line and ACPI was not disabled
+ * explicitly through acpi=off command line parameter
+ *
+ * ACPI is disabled on function return otherwise
+ */
+void __init acpi_boot_table_init(void)
+{
+ /*
+ * Enable ACPI instead of device tree unless
+ * - ACPI has been disabled explicitly (acpi=off), or
+ * - the device tree is not empty (it has more than just a /chosen node,
+ * and a /hypervisor node when running on Xen)
+ * and ACPI has not been [force] enabled (acpi=on|force)
+ */
+ if (param_acpi_off ||
+ (!param_acpi_on && !param_acpi_force &&
+ of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
+ goto done;
+
+ /*
+ * ACPI is disabled at this point. Enable it in order to parse
+ * the ACPI tables and carry out sanity checks
+ */
+ enable_acpi();
+
+ /*
+ * If ACPI tables are initialized and FADT sanity checks passed,
+ * leave ACPI enabled and carry on booting; otherwise disable ACPI
+ * on initialization error.
+ * If acpi=force was passed on the command line it forces ACPI
+ * to be enabled even if its initialization failed.
+ */
+ if (acpi_table_init() || acpi_fadt_sanity_check()) {
+ pr_err("Failed to init ACPI tables\n");
+ if (!param_acpi_force)
+ disable_acpi();
+ }
+
+done:
+ if (acpi_disabled) {
+ if (earlycon_acpi_spcr_enable)
+ early_init_dt_scan_chosen_stdout();
+ } else {
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
+ if (IS_ENABLED(CONFIG_ACPI_BGRT))
+ acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
+ }
+}
+
+pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
+{
+ /*
+ * According to "Table 8 Map: EFI memory types to AArch64 memory
+ * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
+ * mapped to a corresponding MAIR attribute encoding.
+ * The EFI memory attribute advises all possible capabilities
+ * of a memory region. We use the most efficient capability.
+ */
+
+ u64 attr;
+
+ attr = efi_mem_attributes(addr);
+ if (attr & EFI_MEMORY_WB)
+ return PAGE_KERNEL;
+ if (attr & EFI_MEMORY_WT)
+ return __pgprot(PROT_NORMAL_WT);
+ if (attr & EFI_MEMORY_WC)
+ return __pgprot(PROT_NORMAL_NC);
+ return __pgprot(PROT_DEVICE_nGnRnE);
+}
+
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+{
+ efi_memory_desc_t *md, *region = NULL;
+ pgprot_t prot;
+
+ if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
+ return NULL;
+
+ for_each_efi_memory_desc(md) {
+ u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+ if (phys < md->phys_addr || phys >= end)
+ continue;
+
+ if (phys + size > end) {
+ pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
+ return NULL;
+ }
+ region = md;
+ break;
+ }
+
+ /*
+ * It is fine for AML to remap regions that are not represented in the
+ * EFI memory map at all, as it only describes normal memory, and MMIO
+ * regions that require a virtual mapping to make them accessible to
+ * the EFI runtime services.
+ */
+ prot = __pgprot(PROT_DEVICE_nGnRnE);
+ if (region) {
+ switch (region->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+ if (memblock_is_map_memory(phys) ||
+ !memblock_is_region_memory(phys, size)) {
+ pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
+ return NULL;
+ }
+ /*
+ * Mapping kernel memory is permitted if the region in
+ * question is covered by a single memblock with the
+ * NOMAP attribute set: this enables the use of ACPI
+ * table overrides passed via initramfs, which are
+ * reserved in memory using arch_reserve_mem_area()
+ * below. As this particular use case only requires
+ * read access, fall through to the R/O mapping case.
+ */
+ fallthrough;
+
+ case EFI_RUNTIME_SERVICES_CODE:
+ /*
+ * This would be unusual, but not problematic per se,
+ * as long as we take care not to create a writable
+ * mapping for executable code.
+ */
+ prot = PAGE_KERNEL_RO;
+ break;
+
+ case EFI_ACPI_RECLAIM_MEMORY:
+ /*
+ * ACPI reclaim memory is used to pass firmware tables
+ * and other data that is intended for consumption by
+ * the OS only, which may decide it wants to reclaim
+ * that memory and use it for something else. We never
+ * do that, but we usually add it to the linear map
+ * anyway, in which case we should use the existing
+ * mapping.
+ */
+ if (memblock_is_map_memory(phys))
+ return (void __iomem *)__phys_to_virt(phys);
+ fallthrough;
+
+ default:
+ if (region->attribute & EFI_MEMORY_WB)
+ prot = PAGE_KERNEL;
+ else if (region->attribute & EFI_MEMORY_WT)
+ prot = __pgprot(PROT_NORMAL_WT);
+ else if (region->attribute & EFI_MEMORY_WC)
+ prot = __pgprot(PROT_NORMAL_NC);
+ }
+ }
+ return __ioremap(phys, size, prot);
+}
+
+/*
+ * Claim Synchronous External Aborts as a firmware first notification.
+ *
+ * Used by KVM and the arch do_sea handler.
+ * @regs may be NULL when called from process context.
+ */
+int apei_claim_sea(struct pt_regs *regs)
+{
+ int err = -ENOENT;
+ bool return_to_irqs_enabled;
+ unsigned long current_flags;
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return err;
+
+ current_flags = local_daif_save_flags();
+
+ /* current_flags isn't useful here as daif doesn't tell us about pNMI */
+ return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
+
+ if (regs)
+ return_to_irqs_enabled = interrupts_enabled(regs);
+
+ /*
+ * SEA can interrupt SError, mask it and describe this as an NMI so
+ * that APEI defers the handling.
+ */
+ local_daif_restore(DAIF_ERRCTX);
+ nmi_enter();
+ err = ghes_notify_sea();
+ nmi_exit();
+
+ /*
+ * APEI NMI-like notifications are deferred to irq_work. Unless
+ * we interrupted irqs-masked code, we can do that now.
+ */
+ if (!err) {
+ if (return_to_irqs_enabled) {
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ __irq_enter();
+ irq_work_run();
+ __irq_exit();
+ } else {
+ pr_warn_ratelimited("APEI work queued but not completed");
+ err = -EINPROGRESS;
+ }
+ }
+
+ local_daif_restore(current_flags);
+
+ return err;
+}
+
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+ memblock_mark_nomap(addr, size);
+}
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
new file mode 100644
index 000000000..7ff800045
--- /dev/null
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI 5.1 based NUMA setup for ARM64
+ * Lots of code was borrowed from arch/x86/mm/srat.c
+ *
+ * Copyright 2004 Andi Kleen, SuSE Labs.
+ * Copyright (C) 2013-2016, Linaro Ltd.
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ *
+ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
+ *
+ * Called from acpi_numa_init while reading the SRAT and SLIT tables.
+ * Assumes all memory regions belonging to a single proximity domain
+ * are in one chunk. Holes between them will be included in the node.
+ */
+
+#define pr_fmt(fmt) "ACPI: NUMA: " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/topology.h>
+
+#include <asm/numa.h>
+
+static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
+
+int __init acpi_numa_get_nid(unsigned int cpu)
+{
+ return acpi_early_node_map[cpu];
+}
+
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
+
+ return -EINVAL;
+}
+
+static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_gicc_affinity *pa;
+ int cpu, pxm, node;
+
+ if (srat_disabled())
+ return -EINVAL;
+
+ pa = (struct acpi_srat_gicc_affinity *)header;
+ if (!pa)
+ return -EINVAL;
+
+ if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
+ return 0;
+
+ pxm = pa->proximity_domain;
+ node = pxm_to_node(pxm);
+
+ /*
+ * If we can't map the UID to a logical cpu this
+ * means that the UID is not part of possible cpus
+ * so we do not need a NUMA mapping for it, skip
+ * the SRAT entry and keep parsing.
+ */
+ cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
+ if (cpu < 0)
+ return 0;
+
+ acpi_early_node_map[cpu] = node;
+ pr_info("SRAT: PXM %d -> MPIDR 0x%llx -> Node %d\n", pxm,
+ cpu_logical_map(cpu), node);
+
+ return 0;
+}
+
+void __init acpi_map_cpus_to_nodes(void)
+{
+ acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GICC_AFFINITY,
+ acpi_parse_gicc_pxm, 0);
+}
+
+/* Callback for Proximity Domain -> ACPI processor UID mapping */
+void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
+{
+ int pxm, node;
+
+ if (srat_disabled())
+ return;
+
+ if (pa->header.length < sizeof(struct acpi_srat_gicc_affinity)) {
+ pr_err("SRAT: Invalid SRAT header length: %d\n",
+ pa->header.length);
+ bad_srat();
+ return;
+ }
+
+ if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
+ return;
+
+ pxm = pa->proximity_domain;
+ node = acpi_map_pxm_to_node(pxm);
+
+ if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ pr_err("SRAT: Too many proximity domains %d\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ node_set(node, numa_nodes_parsed);
+}
+
+int __init arm64_acpi_numa_init(void)
+{
+ int ret;
+
+ ret = acpi_numa_init();
+ if (ret) {
+ pr_info("Failed to initialise from firmware\n");
+ return ret;
+ }
+
+ return srat_disabled() ? -EINVAL : 0;
+}
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
new file mode 100644
index 000000000..e7c941d83
--- /dev/null
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM64 ACPI Parking Protocol implementation
+ *
+ * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * Mark Salter <msalter@redhat.com>
+ */
+#include <linux/acpi.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#include <asm/cpu_ops.h>
+
+struct parking_protocol_mailbox {
+ __le32 cpu_id;
+ __le32 reserved;
+ __le64 entry_point;
+};
+
+struct cpu_mailbox_entry {
+ struct parking_protocol_mailbox __iomem *mailbox;
+ phys_addr_t mailbox_addr;
+ u8 version;
+ u8 gic_cpu_id;
+};
+
+static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS];
+
+void __init acpi_set_mailbox_entry(int cpu,
+ struct acpi_madt_generic_interrupt *p)
+{
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+ cpu_entry->mailbox_addr = p->parked_address;
+ cpu_entry->version = p->parking_version;
+ cpu_entry->gic_cpu_id = p->cpu_interface_number;
+}
+
+bool acpi_parking_protocol_valid(int cpu)
+{
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+ return cpu_entry->mailbox_addr && cpu_entry->version;
+}
+
+static int acpi_parking_protocol_cpu_init(unsigned int cpu)
+{
+ pr_debug("%s: ACPI parked addr=%llx\n", __func__,
+ cpu_mailbox_entries[cpu].mailbox_addr);
+
+ return 0;
+}
+
+static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
+{
+ return 0;
+}
+
+static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
+{
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+ struct parking_protocol_mailbox __iomem *mailbox;
+ u32 cpu_id;
+
+ /*
+ * Map mailbox memory with attribute device nGnRE (ie ioremap -
+ * this deviates from the parking protocol specifications since
+ * the mailboxes are required to be mapped nGnRnE; the attribute
+ * discrepancy is harmless insofar as the protocol specification
+ * is concerned).
+ * If the mailbox is mistakenly allocated in the linear mapping
+ * by FW ioremap will fail since the mapping will be prevented
+ * by the kernel (it clashes with the linear mapping attributes
+ * specifications).
+ */
+ mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+ if (!mailbox)
+ return -EIO;
+
+ cpu_id = readl_relaxed(&mailbox->cpu_id);
+ /*
+ * Check if firmware has set-up the mailbox entry properly
+ * before kickstarting the respective cpu.
+ */
+ if (cpu_id != ~0U) {
+ iounmap(mailbox);
+ return -ENXIO;
+ }
+
+ /*
+ * stash the mailbox address mapping to use it for further FW
+ * checks in the postboot method
+ */
+ cpu_entry->mailbox = mailbox;
+
+ /*
+ * We write the entry point and cpu id as LE regardless of the
+ * native endianness of the kernel. Therefore, any boot-loaders
+ * that read this address need to convert this address to the
+ * Boot-Loader's endianness before jumping.
+ */
+ writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
+ writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
+
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ return 0;
+}
+
+static void acpi_parking_protocol_cpu_postboot(void)
+{
+ int cpu = smp_processor_id();
+ struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+ struct parking_protocol_mailbox __iomem *mailbox = cpu_entry->mailbox;
+ u64 entry_point;
+
+ entry_point = readq_relaxed(&mailbox->entry_point);
+ /*
+ * Check if firmware has cleared the entry_point as expected
+ * by the protocol specification.
+ */
+ WARN_ON(entry_point);
+}
+
+const struct cpu_operations acpi_parking_protocol_ops = {
+ .name = "parking-protocol",
+ .cpu_init = acpi_parking_protocol_cpu_init,
+ .cpu_prepare = acpi_parking_protocol_cpu_prepare,
+ .cpu_boot = acpi_parking_protocol_cpu_boot,
+ .cpu_postboot = acpi_parking_protocol_cpu_postboot
+};
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
new file mode 100644
index 000000000..5f8e4c2df
--- /dev/null
+++ b/arch/arm64/kernel/alternative.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * alternative runtime patching
+ * inspired by the x86 version
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "alternatives: " fmt
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+#include <asm/insn.h>
+#include <asm/sections.h>
+#include <linux/stop_machine.h>
+
+#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
+#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
+#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+
+static int all_alternatives_applied;
+
+static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
+
+struct alt_region {
+ struct alt_instr *begin;
+ struct alt_instr *end;
+};
+
+bool alternative_is_applied(u16 cpufeature)
+{
+ if (WARN_ON(cpufeature >= ARM64_NCAPS))
+ return false;
+
+ return test_bit(cpufeature, applied_alternatives);
+}
+
+/*
+ * Check if the target PC is within an alternative block.
+ */
+static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
+{
+ unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+ return !(pc >= replptr && pc <= (replptr + alt->alt_len));
+}
+
+#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
+
+static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
+{
+ u32 insn;
+
+ insn = le32_to_cpu(*altinsnptr);
+
+ if (aarch64_insn_is_branch_imm(insn)) {
+ s32 offset = aarch64_get_branch_offset(insn);
+ unsigned long target;
+
+ target = (unsigned long)altinsnptr + offset;
+
+ /*
+ * If we're branching inside the alternate sequence,
+ * do not rewrite the instruction, as it is already
+ * correct. Otherwise, generate the new instruction.
+ */
+ if (branch_insn_requires_update(alt, target)) {
+ offset = target - (unsigned long)insnptr;
+ insn = aarch64_set_branch_offset(insn, offset);
+ }
+ } else if (aarch64_insn_is_adrp(insn)) {
+ s32 orig_offset, new_offset;
+ unsigned long target;
+
+ /*
+ * If we're replacing an adrp instruction, which uses PC-relative
+ * immediate addressing, adjust the offset to reflect the new
+ * PC. adrp operates on 4K aligned addresses.
+ */
+ orig_offset = aarch64_insn_adrp_get_offset(insn);
+ target = align_down(altinsnptr, SZ_4K) + orig_offset;
+ new_offset = target - align_down(insnptr, SZ_4K);
+ insn = aarch64_insn_adrp_set_offset(insn, new_offset);
+ } else if (aarch64_insn_uses_literal(insn)) {
+ /*
+ * Disallow patching unhandled instructions using PC relative
+ * literal addresses
+ */
+ BUG();
+ }
+
+ return insn;
+}
+
+static noinstr void patch_alternative(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ __le32 *replptr;
+ int i;
+
+ replptr = ALT_REPL_PTR(alt);
+ for (i = 0; i < nr_inst; i++) {
+ u32 insn;
+
+ insn = get_alt_insn(alt, origptr + i, replptr + i);
+ updptr[i] = cpu_to_le32(insn);
+ }
+}
+
+/*
+ * We provide our own, private D-cache cleaning function so that we don't
+ * accidentally call into the cache.S code, which is patched by us at
+ * runtime.
+ */
+static void clean_dcache_range_nopatch(u64 start, u64 end)
+{
+ u64 cur, d_size, ctr_el0;
+
+ ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
+ d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
+ CTR_DMINLINE_SHIFT);
+ cur = start & ~(d_size - 1);
+ do {
+ /*
+ * We must clean+invalidate to the PoC in order to avoid
+ * Cortex-A53 errata 826319, 827319, 824069 and 819472
+ * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
+ */
+ asm volatile("dc civac, %0" : : "r" (cur) : "memory");
+ } while (cur += d_size, cur < end);
+}
+
+static void __apply_alternatives(void *alt_region, bool is_module,
+ unsigned long *feature_mask)
+{
+ struct alt_instr *alt;
+ struct alt_region *region = alt_region;
+ __le32 *origptr, *updptr;
+ alternative_cb_t alt_cb;
+
+ for (alt = region->begin; alt < region->end; alt++) {
+ int nr_inst;
+
+ if (!test_bit(alt->cpufeature, feature_mask))
+ continue;
+
+ /* Use ARM64_CB_PATCH as an unconditional patch */
+ if (alt->cpufeature < ARM64_CB_PATCH &&
+ !cpus_have_cap(alt->cpufeature))
+ continue;
+
+ if (alt->cpufeature == ARM64_CB_PATCH)
+ BUG_ON(alt->alt_len != 0);
+ else
+ BUG_ON(alt->alt_len != alt->orig_len);
+
+ pr_info_once("patching kernel code\n");
+
+ origptr = ALT_ORIG_PTR(alt);
+ updptr = is_module ? origptr : lm_alias(origptr);
+ nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
+
+ if (alt->cpufeature < ARM64_CB_PATCH)
+ alt_cb = patch_alternative;
+ else
+ alt_cb = ALT_REPL_PTR(alt);
+
+ alt_cb(alt, origptr, updptr, nr_inst);
+
+ if (!is_module) {
+ clean_dcache_range_nopatch((u64)origptr,
+ (u64)(origptr + nr_inst));
+ }
+ }
+
+ /*
+ * The core module code takes care of cache maintenance in
+ * flush_module_icache().
+ */
+ if (!is_module) {
+ dsb(ish);
+ __flush_icache_all();
+ isb();
+
+ /* Ignore ARM64_CB bit from feature mask */
+ bitmap_or(applied_alternatives, applied_alternatives,
+ feature_mask, ARM64_NCAPS);
+ bitmap_and(applied_alternatives, applied_alternatives,
+ cpu_hwcaps, ARM64_NCAPS);
+ }
+}
+
+/*
+ * We might be patching the stop_machine state machine, so implement a
+ * really simple polling protocol here.
+ */
+static int __apply_alternatives_multi_stop(void *unused)
+{
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+ };
+
+ /* We always have a CPU 0 at this point (__init) */
+ if (smp_processor_id()) {
+ while (!READ_ONCE(all_alternatives_applied))
+ cpu_relax();
+ isb();
+ } else {
+ DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
+
+ bitmap_complement(remaining_capabilities, boot_capabilities,
+ ARM64_NPATCHABLE);
+
+ BUG_ON(all_alternatives_applied);
+ __apply_alternatives(&region, false, remaining_capabilities);
+ /* Barriers provided by the cache flushing */
+ WRITE_ONCE(all_alternatives_applied, 1);
+ }
+
+ return 0;
+}
+
+void __init apply_alternatives_all(void)
+{
+ /* better not try code patching on a live SMP system */
+ stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
+}
+
+/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+void __init apply_boot_alternatives(void)
+{
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+ };
+
+ /* If called on non-boot cpu things could go wrong */
+ WARN_ON(smp_processor_id() != 0);
+
+ __apply_alternatives(&region, false, &boot_capabilities[0]);
+}
+
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length)
+{
+ struct alt_region region = {
+ .begin = start,
+ .end = start + length,
+ };
+ DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
+
+ bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
+
+ __apply_alternatives(&region, true, &all_capabilities[0]);
+}
+#endif
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
new file mode 100644
index 000000000..f0ba854f0
--- /dev/null
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/perf_event.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpufeature.h>
+#include <asm/insn.h>
+#include <asm/sysreg.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace-events-emulation.h"
+
+/*
+ * The runtime support for deprecated instruction support can be in one of
+ * following three states -
+ *
+ * 0 = undef
+ * 1 = emulate (software emulation)
+ * 2 = hw (supported in hardware)
+ */
+enum insn_emulation_mode {
+ INSN_UNDEF,
+ INSN_EMULATE,
+ INSN_HW,
+};
+
+enum legacy_insn_status {
+ INSN_DEPRECATED,
+ INSN_OBSOLETE,
+ INSN_UNAVAILABLE,
+};
+
+struct insn_emulation {
+ const char *name;
+ enum legacy_insn_status status;
+ bool (*try_emulate)(struct pt_regs *regs,
+ u32 insn);
+ int (*set_hw_mode)(bool enable);
+
+ int current_mode;
+ int min;
+ int max;
+
+ /*
+ * sysctl for this emulation + a sentinal entry.
+ */
+ struct ctl_table sysctl[2];
+};
+
+#define ARM_OPCODE_CONDTEST_FAIL 0
+#define ARM_OPCODE_CONDTEST_PASS 1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+static unsigned int __maybe_unused aarch32_check_condition(u32 opcode, u32 psr)
+{
+ u32 cc_bits = opcode >> 28;
+
+ if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+ if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
+ return ARM_OPCODE_CONDTEST_PASS;
+ else
+ return ARM_OPCODE_CONDTEST_FAIL;
+ }
+ return ARM_OPCODE_CONDTEST_UNCOND;
+}
+
+#ifdef CONFIG_SWP_EMULATION
+/*
+ * Implement emulation of the SWP/SWPB instructions using load-exclusive and
+ * store-exclusive.
+ *
+ * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ * Where: Rt = destination
+ * Rt2 = source
+ * Rn = address
+ */
+
+/*
+ * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
+ */
+
+/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
+#define __SWP_LL_SC_LOOPS 4
+
+#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
+do { \
+ uaccess_enable(); \
+ __asm__ __volatile__( \
+ " mov %w3, %w7\n" \
+ "0: ldxr"B" %w2, [%4]\n" \
+ "1: stxr"B" %w0, %w1, [%4]\n" \
+ " cbz %w0, 2f\n" \
+ " sub %w3, %w3, #1\n" \
+ " cbnz %w3, 0b\n" \
+ " mov %w0, %w5\n" \
+ " b 3f\n" \
+ "2:\n" \
+ " mov %w1, %w2\n" \
+ "3:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: mov %w0, %w6\n" \
+ " b 3b\n" \
+ " .popsection" \
+ _ASM_EXTABLE(0b, 4b) \
+ _ASM_EXTABLE(1b, 4b) \
+ : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
+ : "r" ((unsigned long)addr), "i" (-EAGAIN), \
+ "i" (-EFAULT), \
+ "i" (__SWP_LL_SC_LOOPS) \
+ : "memory"); \
+ uaccess_disable(); \
+} while (0)
+
+#define __user_swp_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "")
+#define __user_swpb_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "b")
+
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+static int emulate_swpX(unsigned int address, unsigned int *data,
+ unsigned int type)
+{
+ unsigned int res = 0;
+
+ if ((type != TYPE_SWPB) && (address & 0x3)) {
+ /* SWP to unaligned address not permitted */
+ pr_debug("SWP instruction on unaligned pointer!\n");
+ return -EFAULT;
+ }
+
+ while (1) {
+ unsigned long temp, temp2;
+
+ if (type == TYPE_SWPB)
+ __user_swpb_asm(*data, address, res, temp, temp2);
+ else
+ __user_swp_asm(*data, address, res, temp, temp2);
+
+ if (likely(res != -EAGAIN) || signal_pending(current))
+ break;
+
+ cond_resched();
+ }
+
+ return res;
+}
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, u32 instr)
+{
+ u32 destreg, data, type, address = 0;
+ const void __user *user_ptr;
+ int rn, rt2, res = 0;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ type = instr & TYPE_SWPB;
+
+ switch (aarch32_check_condition(instr, regs->pstate)) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ goto ret;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a SWP, undef */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
+ rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
+ rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
+
+ address = (u32)regs->user_regs.regs[rn];
+ data = (u32)regs->user_regs.regs[rt2];
+ destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
+
+ pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
+ rn, address, destreg,
+ aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
+
+ /* Check access in reasonable access range for both SWP and SWPB */
+ user_ptr = (const void __user *)(unsigned long)(address & ~3);
+ if (!access_ok(user_ptr, 4)) {
+ pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
+ address);
+ goto fault;
+ }
+
+ res = emulate_swpX(address, &data, type);
+ if (res == -EFAULT)
+ goto fault;
+ else if (res == 0)
+ regs->user_regs.regs[destreg] = data;
+
+ret:
+ if (type == TYPE_SWPB)
+ trace_instruction_emulation("swpb", regs->pc);
+ else
+ trace_instruction_emulation("swp", regs->pc);
+
+ pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return 0;
+
+fault:
+ pr_debug("SWP{B} emulation: access caused memory abort!\n");
+ arm64_notify_segfault(address);
+
+ return 0;
+}
+
+static bool try_emulate_swp(struct pt_regs *regs, u32 insn)
+{
+ /* SWP{B} only exists in ARM state and does not exist in Thumb */
+ if (!compat_user_mode(regs) || compat_thumb_mode(regs))
+ return false;
+
+ if ((insn & 0x0fb00ff0) != 0x01000090)
+ return false;
+
+ return swp_handler(regs, insn) == 0;
+}
+
+static struct insn_emulation insn_swp = {
+ .name = "swp",
+ .status = INSN_OBSOLETE,
+ .try_emulate = try_emulate_swp,
+ .set_hw_mode = NULL,
+};
+#endif /* CONFIG_SWP_EMULATION */
+
+#ifdef CONFIG_CP15_BARRIER_EMULATION
+static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
+{
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ switch (aarch32_check_condition(instr, regs->pstate)) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ goto ret;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a barrier instruction */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
+ switch (aarch32_insn_mcr_extract_crm(instr)) {
+ case 10:
+ /*
+ * dmb - mcr p15, 0, Rt, c7, c10, 5
+ * dsb - mcr p15, 0, Rt, c7, c10, 4
+ */
+ if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
+ dmb(sy);
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
+ } else {
+ dsb(sy);
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
+ }
+ break;
+ case 5:
+ /*
+ * isb - mcr p15, 0, Rt, c7, c5, 4
+ *
+ * Taking an exception or returning from one acts as an
+ * instruction barrier. So no explicit barrier needed here.
+ */
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
+ break;
+ }
+
+ret:
+ pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return 0;
+}
+
+static int cp15_barrier_set_hw_mode(bool enable)
+{
+ if (enable)
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN);
+ else
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0);
+ return 0;
+}
+
+static bool try_emulate_cp15_barrier(struct pt_regs *regs, u32 insn)
+{
+ if (!compat_user_mode(regs) || compat_thumb_mode(regs))
+ return false;
+
+ if ((insn & 0x0fff0fdf) == 0x0e070f9a)
+ return cp15barrier_handler(regs, insn) == 0;
+
+ if ((insn & 0x0fff0fff) == 0x0e070f95)
+ return cp15barrier_handler(regs, insn) == 0;
+
+ return false;
+}
+
+static struct insn_emulation insn_cp15_barrier = {
+ .name = "cp15_barrier",
+ .status = INSN_DEPRECATED,
+ .try_emulate = try_emulate_cp15_barrier,
+ .set_hw_mode = cp15_barrier_set_hw_mode,
+};
+#endif /* CONFIG_CP15_BARRIER_EMULATION */
+
+#ifdef CONFIG_SETEND_EMULATION
+static int setend_set_hw_mode(bool enable)
+{
+ if (!cpu_supports_mixed_endian_el0())
+ return -EINVAL;
+
+ if (enable)
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0);
+ else
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED);
+ return 0;
+}
+
+static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
+{
+ char *insn;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ if (big_endian) {
+ insn = "setend be";
+ regs->pstate |= PSR_AA32_E_BIT;
+ } else {
+ insn = "setend le";
+ regs->pstate &= ~PSR_AA32_E_BIT;
+ }
+
+ trace_instruction_emulation(insn, regs->pc);
+ pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ return 0;
+}
+
+static int a32_setend_handler(struct pt_regs *regs, u32 instr)
+{
+ int rc = compat_setend_handler(regs, (instr >> 9) & 1);
+ arm64_skip_faulting_instruction(regs, 4);
+ return rc;
+}
+
+static int t16_setend_handler(struct pt_regs *regs, u32 instr)
+{
+ int rc = compat_setend_handler(regs, (instr >> 3) & 1);
+ arm64_skip_faulting_instruction(regs, 2);
+ return rc;
+}
+
+static bool try_emulate_setend(struct pt_regs *regs, u32 insn)
+{
+ if (compat_thumb_mode(regs) &&
+ (insn & 0xfffffff7) == 0x0000b650)
+ return t16_setend_handler(regs, insn) == 0;
+
+ if (compat_user_mode(regs) &&
+ (insn & 0xfffffdff) == 0xf1010000)
+ return a32_setend_handler(regs, insn) == 0;
+
+ return false;
+}
+
+static struct insn_emulation insn_setend = {
+ .name = "setend",
+ .status = INSN_DEPRECATED,
+ .try_emulate = try_emulate_setend,
+ .set_hw_mode = setend_set_hw_mode,
+};
+#endif /* CONFIG_SETEND_EMULATION */
+
+static struct insn_emulation *insn_emulations[] = {
+#ifdef CONFIG_SWP_EMULATION
+ &insn_swp,
+#endif
+#ifdef CONFIG_CP15_BARRIER_EMULATION
+ &insn_cp15_barrier,
+#endif
+#ifdef CONFIG_SETEND_EMULATION
+ &insn_setend,
+#endif
+};
+
+static DEFINE_MUTEX(insn_emulation_mutex);
+
+static void enable_insn_hw_mode(void *data)
+{
+ struct insn_emulation *insn = (struct insn_emulation *)data;
+ if (insn->set_hw_mode)
+ insn->set_hw_mode(true);
+}
+
+static void disable_insn_hw_mode(void *data)
+{
+ struct insn_emulation *insn = (struct insn_emulation *)data;
+ if (insn->set_hw_mode)
+ insn->set_hw_mode(false);
+}
+
+/* Run set_hw_mode(mode) on all active CPUs */
+static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
+{
+ if (!insn->set_hw_mode)
+ return -EINVAL;
+ if (enable)
+ on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
+ else
+ on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
+ return 0;
+}
+
+/*
+ * Run set_hw_mode for all insns on a starting CPU.
+ * Returns:
+ * 0 - If all the hooks ran successfully.
+ * -EINVAL - At least one hook is not supported by the CPU.
+ */
+static int run_all_insn_set_hw_mode(unsigned int cpu)
+{
+ int i;
+ int rc = 0;
+ unsigned long flags;
+
+ /*
+ * Disable IRQs to serialize against an IPI from
+ * run_all_cpu_set_hw_mode(), ensuring the HW is programmed to the most
+ * recent enablement state if the two race with one another.
+ */
+ local_irq_save(flags);
+ for (i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
+ struct insn_emulation *insn = insn_emulations[i];
+ bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
+ if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
+ pr_warn("CPU[%u] cannot support the emulation of %s",
+ cpu, insn->name);
+ rc = -EINVAL;
+ }
+ }
+ local_irq_restore(flags);
+
+ return rc;
+}
+
+static int update_insn_emulation_mode(struct insn_emulation *insn,
+ enum insn_emulation_mode prev)
+{
+ int ret = 0;
+
+ switch (prev) {
+ case INSN_UNDEF: /* Nothing to be done */
+ break;
+ case INSN_EMULATE:
+ break;
+ case INSN_HW:
+ if (!run_all_cpu_set_hw_mode(insn, false))
+ pr_notice("Disabled %s support\n", insn->name);
+ break;
+ }
+
+ switch (insn->current_mode) {
+ case INSN_UNDEF:
+ break;
+ case INSN_EMULATE:
+ break;
+ case INSN_HW:
+ ret = run_all_cpu_set_hw_mode(insn, true);
+ if (!ret)
+ pr_notice("Enabled %s support\n", insn->name);
+ break;
+ }
+
+ return ret;
+}
+
+static int emulation_proc_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = 0;
+ struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
+ enum insn_emulation_mode prev_mode = insn->current_mode;
+
+ mutex_lock(&insn_emulation_mutex);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write || prev_mode == insn->current_mode)
+ goto ret;
+
+ ret = update_insn_emulation_mode(insn, prev_mode);
+ if (ret) {
+ /* Mode change failed, revert to previous mode. */
+ WRITE_ONCE(insn->current_mode, prev_mode);
+ update_insn_emulation_mode(insn, INSN_UNDEF);
+ }
+ret:
+ mutex_unlock(&insn_emulation_mutex);
+ return ret;
+}
+
+static void __init register_insn_emulation(struct insn_emulation *insn)
+{
+ struct ctl_table *sysctl;
+
+ insn->min = INSN_UNDEF;
+
+ switch (insn->status) {
+ case INSN_DEPRECATED:
+ insn->current_mode = INSN_EMULATE;
+ /* Disable the HW mode if it was turned on at early boot time */
+ run_all_cpu_set_hw_mode(insn, false);
+ insn->max = INSN_HW;
+ break;
+ case INSN_OBSOLETE:
+ insn->current_mode = INSN_UNDEF;
+ insn->max = INSN_EMULATE;
+ break;
+ case INSN_UNAVAILABLE:
+ insn->current_mode = INSN_UNDEF;
+ insn->max = INSN_UNDEF;
+ break;
+ }
+
+ /* Program the HW if required */
+ update_insn_emulation_mode(insn, INSN_UNDEF);
+
+ if (insn->status != INSN_UNAVAILABLE) {
+ sysctl = &insn->sysctl[0];
+
+ sysctl->mode = 0644;
+ sysctl->maxlen = sizeof(int);
+
+ sysctl->procname = insn->name;
+ sysctl->data = &insn->current_mode;
+ sysctl->extra1 = &insn->min;
+ sysctl->extra2 = &insn->max;
+ sysctl->proc_handler = emulation_proc_handler;
+
+ register_sysctl("abi", sysctl);
+ }
+}
+
+bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
+ struct insn_emulation *ie = insn_emulations[i];
+
+ if (ie->status == INSN_UNAVAILABLE)
+ continue;
+
+ /*
+ * A trap may race with the mode being changed
+ * INSN_EMULATE<->INSN_HW. Try to emulate the instruction to
+ * avoid a spurious UNDEF.
+ */
+ if (READ_ONCE(ie->current_mode) == INSN_UNDEF)
+ continue;
+
+ if (ie->try_emulate(regs, insn))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Invoked as core_initcall, which guarantees that the instruction
+ * emulation is ready for userspace.
+ */
+static int __init armv8_deprecated_init(void)
+{
+ int i;
+
+#ifdef CONFIG_SETEND_EMULATION
+ if (!system_supports_mixed_endian_el0()) {
+ insn_setend.status = INSN_UNAVAILABLE;
+ pr_info("setend instruction emulation is not supported on this system\n");
+ }
+
+#endif
+ for (i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
+ struct insn_emulation *ie = insn_emulations[i];
+
+ if (ie->status == INSN_UNAVAILABLE)
+ continue;
+
+ register_insn_emulation(ie);
+ }
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
+ "arm64/isndep:starting",
+ run_all_insn_set_hw_mode, NULL);
+ return 0;
+}
+
+core_initcall(armv8_deprecated_init);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
new file mode 100644
index 000000000..7d32fc959
--- /dev/null
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/asm-offsets.c
+ *
+ * Copyright (C) 1995-2003 Russell King
+ * 2001-2002 Keith Owens
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/arm_sdei.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/kvm_host.h>
+#include <linux/preempt.h>
+#include <linux/suspend.h>
+#include <asm/cpufeature.h>
+#include <asm/fixmap.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/signal32.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
+
+int main(void)
+{
+ DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ BLANK();
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
+#endif
+#ifdef CONFIG_SHADOW_CALL_STACK
+ DEFINE(TSK_TI_SCS_BASE, offsetof(struct task_struct, thread_info.scs_base));
+ DEFINE(TSK_TI_SCS_SP, offsetof(struct task_struct, thread_info.scs_sp));
+#endif
+ DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
+#ifdef CONFIG_STACKPROTECTOR
+ DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
+ BLANK();
+ DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
+#ifdef CONFIG_ARM64_PTR_AUTH
+ DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
+ DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
+#endif
+ BLANK();
+ DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
+ DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
+ DEFINE(S_X4, offsetof(struct pt_regs, regs[4]));
+ DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
+ DEFINE(S_X8, offsetof(struct pt_regs, regs[8]));
+ DEFINE(S_X10, offsetof(struct pt_regs, regs[10]));
+ DEFINE(S_X12, offsetof(struct pt_regs, regs[12]));
+ DEFINE(S_X14, offsetof(struct pt_regs, regs[14]));
+ DEFINE(S_X16, offsetof(struct pt_regs, regs[16]));
+ DEFINE(S_X18, offsetof(struct pt_regs, regs[18]));
+ DEFINE(S_X20, offsetof(struct pt_regs, regs[20]));
+ DEFINE(S_X22, offsetof(struct pt_regs, regs[22]));
+ DEFINE(S_X24, offsetof(struct pt_regs, regs[24]));
+ DEFINE(S_X26, offsetof(struct pt_regs, regs[26]));
+ DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
+ DEFINE(S_FP, offsetof(struct pt_regs, regs[29]));
+ DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
+ DEFINE(S_SP, offsetof(struct pt_regs, sp));
+ DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
+ DEFINE(S_PC, offsetof(struct pt_regs, pc));
+ DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
+ DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
+ DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
+ DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ BLANK();
+#ifdef CONFIG_COMPAT
+ DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
+ DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0));
+ BLANK();
+#endif
+ DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
+ BLANK();
+ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
+ DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
+ BLANK();
+ DEFINE(VM_EXEC, VM_EXEC);
+ BLANK();
+ DEFINE(PAGE_SZ, PAGE_SIZE);
+ BLANK();
+ DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
+ DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+ BLANK();
+ DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
+ BLANK();
+ DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
+ DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
+ BLANK();
+#ifdef CONFIG_KVM
+ DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
+ DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
+ DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
+ DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
+ DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
+ DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
+ DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
+ DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
+ DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
+#endif
+#ifdef CONFIG_CPU_PM
+ DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
+ DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
+ DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
+ DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
+ DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
+#endif
+ DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
+ DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
+ BLANK();
+ DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
+ DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
+ DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
+ BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
+#endif
+#ifdef CONFIG_ARM_SDE_INTERFACE
+ DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs));
+ DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority));
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
+ DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib));
+ DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda));
+ DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb));
+ DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga));
+ DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
+ BLANK();
+#endif
+ return 0;
+}
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
new file mode 100644
index 000000000..97c42be71
--- /dev/null
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM64 cacheinfo support
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#include <linux/acpi.h>
+#include <linux/cacheinfo.h>
+#include <linux/of.h>
+
+#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */
+/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
+#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
+#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
+#define CLIDR_CTYPE(clidr, level) \
+ (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+
+int cache_line_size(void)
+{
+ if (coherency_max_size != 0)
+ return coherency_max_size;
+
+ return cache_line_size_of_cpu();
+}
+EXPORT_SYMBOL_GPL(cache_line_size);
+
+static inline enum cache_type get_cache_type(int level)
+{
+ u64 clidr;
+
+ if (level > MAX_CACHE_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+ clidr = read_sysreg(clidr_el1);
+ return CLIDR_CTYPE(clidr, level);
+}
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ enum cache_type type, unsigned int level)
+{
+ this_leaf->level = level;
+ this_leaf->type = type;
+}
+
+int init_cache_level(unsigned int cpu)
+{
+ unsigned int ctype, level, leaves;
+ int fw_level;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
+ ctype = get_cache_type(level);
+ if (ctype == CACHE_TYPE_NOCACHE) {
+ level--;
+ break;
+ }
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ }
+
+ if (acpi_disabled)
+ fw_level = of_find_last_cache_level(cpu);
+ else
+ fw_level = acpi_find_last_cache_level(cpu);
+
+ if (fw_level < 0)
+ return fw_level;
+
+ if (level < fw_level) {
+ /*
+ * some external caches not specified in CLIDR_EL1
+ * the information may be available in the device tree
+ * only unified external caches are considered here
+ */
+ leaves += (fw_level - level);
+ level = fw_level;
+ }
+
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ unsigned int level, idx;
+ enum cache_type type;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ type = get_cache_type(level);
+ if (type == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, type, level);
+ }
+ }
+ return 0;
+}
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
new file mode 100644
index 000000000..37721eb6f
--- /dev/null
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * CPU reset routines
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Huawei Futurewei Technologies.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/sysreg.h>
+#include <asm/virt.h>
+
+.text
+.pushsection .idmap.text, "awx"
+
+/*
+ * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
+ * cpu_soft_restart.
+ *
+ * @el2_switch: Flag to indicate a switch to EL2 is needed.
+ * @entry: Location to jump to for soft reset.
+ * arg0: First argument passed to @entry. (relocation list)
+ * arg1: Second argument passed to @entry.(physical kernel entry)
+ * arg2: Third argument passed to @entry. (physical dtb address)
+ *
+ * Put the CPU into the same state as it would be if it had been reset, and
+ * branch to what would be the reset vector. It must be executed with the
+ * flat identity mapping.
+ */
+SYM_CODE_START(__cpu_soft_restart)
+ /* Clear sctlr_el1 flags. */
+ mrs x12, sctlr_el1
+ mov_q x13, SCTLR_ELx_FLAGS
+ bic x12, x12, x13
+ pre_disable_mmu_workaround
+ /*
+ * either disable EL1&0 translation regime or disable EL2&0 translation
+ * regime if HCR_EL2.E2H == 1
+ */
+ msr sctlr_el1, x12
+ isb
+
+ cbz x0, 1f // el2_switch?
+ mov x0, #HVC_SOFT_RESTART
+ hvc #0 // no return
+
+1: mov x8, x1 // entry
+ mov x0, x2 // arg0
+ mov x1, x3 // arg1
+ mov x2, x4 // arg2
+ br x8
+SYM_CODE_END(__cpu_soft_restart)
+
+.popsection
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
new file mode 100644
index 000000000..ed50e9587
--- /dev/null
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * CPU reset routines
+ *
+ * Copyright (C) 2015 Huawei Futurewei Technologies.
+ */
+
+#ifndef _ARM64_CPU_RESET_H
+#define _ARM64_CPU_RESET_H
+
+#include <asm/virt.h>
+
+void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
+ unsigned long arg0, unsigned long arg1, unsigned long arg2);
+
+static inline void __noreturn cpu_soft_restart(unsigned long entry,
+ unsigned long arg0,
+ unsigned long arg1,
+ unsigned long arg2)
+{
+ typeof(__cpu_soft_restart) *restart;
+
+ unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
+ is_hyp_mode_available();
+ restart = (void *)__pa_symbol(__cpu_soft_restart);
+
+ cpu_install_idmap();
+ restart(el2_switch, entry, arg0, arg1, arg2);
+ unreachable();
+}
+
+#endif
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
new file mode 100644
index 000000000..5d6f19bc6
--- /dev/null
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Contains CPU specific errata definitions
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpufeature.h>
+#include <asm/kvm_asm.h>
+#include <asm/smp_plat.h>
+
+static bool __maybe_unused
+is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ const struct arm64_midr_revidr *fix;
+ u32 midr = read_cpuid_id(), revidr;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ if (!is_midr_in_range(midr, &entry->midr_range))
+ return false;
+
+ midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
+ revidr = read_cpuid(REVIDR_EL1);
+ for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
+ if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
+ return false;
+
+ return true;
+}
+
+static bool __maybe_unused
+is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+}
+
+static bool __maybe_unused
+is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u32 model;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ model = read_cpuid_id();
+ model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
+ MIDR_ARCHITECTURE_MASK;
+
+ return model == entry->midr_range.model;
+}
+
+static bool
+has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+ u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
+ u64 ctr_raw, ctr_real;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ /*
+ * We want to make sure that all the CPUs in the system expose
+ * a consistent CTR_EL0 to make sure that applications behaves
+ * correctly with migration.
+ *
+ * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
+ *
+ * 1) It is safe if the system doesn't support IDC, as CPU anyway
+ * reports IDC = 0, consistent with the rest.
+ *
+ * 2) If the system has IDC, it is still safe as we trap CTR_EL0
+ * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
+ *
+ * So, we need to make sure either the raw CTR_EL0 or the effective
+ * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
+ */
+ ctr_raw = read_cpuid_cachetype() & mask;
+ ctr_real = read_cpuid_effective_cachetype() & mask;
+
+ return (ctr_real != sys) && (ctr_raw != sys);
+}
+
+static void
+cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
+{
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+ bool enable_uct_trap = false;
+
+ /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
+ if ((read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask))
+ enable_uct_trap = true;
+
+ /* ... or if the system is affected by an erratum */
+ if (cap->capability == ARM64_WORKAROUND_1542419)
+ enable_uct_trap = true;
+
+ if (enable_uct_trap)
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+}
+
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static bool
+has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
+}
+#endif
+
+static void __maybe_unused
+cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
+}
+
+#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
+ .matches = is_affected_midr_range, \
+ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+
+#define CAP_MIDR_ALL_VERSIONS(model) \
+ .matches = is_affected_midr_range, \
+ .midr_range = MIDR_ALL_VERSIONS(model)
+
+#define MIDR_FIXED(rev, revidr_mask) \
+ .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
+
+#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
+ CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+
+#define CAP_MIDR_RANGE_LIST(list) \
+ .matches = is_affected_midr_range_list, \
+ .midr_range_list = list
+
+/* Errata affecting a range of revisions of given model variant */
+#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
+ ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
+
+/* Errata affecting a single variant/revision of a model */
+#define ERRATA_MIDR_REV(model, var, rev) \
+ ERRATA_MIDR_RANGE(model, var, rev, var, rev)
+
+/* Errata affecting all variants/revisions of a given a model */
+#define ERRATA_MIDR_ALL_VERSIONS(model) \
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
+ CAP_MIDR_ALL_VERSIONS(model)
+
+/* Errata affecting a list of midr ranges, with same work around */
+#define ERRATA_MIDR_RANGE_LIST(midr_list) \
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
+ CAP_MIDR_RANGE_LIST(midr_list)
+
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ {},
+};
+
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ int i;
+
+ if (!is_affected_midr_range_list(entry, scope) ||
+ !is_hyp_mode_available())
+ return false;
+
+ for_each_possible_cpu(i) {
+ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool __maybe_unused
+has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u32 midr = read_cpuid_id();
+ bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
+ const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range(midr, &range) && has_dic;
+}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+
+static const struct midr_range ca57_a72[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ {},
+};
+
+#endif
+
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
+static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+ {
+ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
+ },
+ {
+ .midr_range.model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1286807
+ {
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+ },
+ {
+ /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
+ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
+ },
+#endif
+ {},
+};
+#endif
+
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+const struct midr_range cavium_erratum_27456_cpus[] = {
+ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
+ /* Cavium ThunderX, T81 pass 1.0 */
+ MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_CAVIUM_ERRATUM_30115
+static const struct midr_range cavium_erratum_30115_cpus[] = {
+ /* Cavium ThunderX, T88 pass 1.x - 2.2 */
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
+ /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
+ MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
+ /* Cavium ThunderX, T83 pass 1.0 */
+ MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
+ {
+ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+ },
+ {
+ .midr_range.model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+static const struct midr_range workaround_clean_cache[] = {
+#if defined(CONFIG_ARM64_ERRATUM_826319) || \
+ defined(CONFIG_ARM64_ERRATUM_827319) || \
+ defined(CONFIG_ARM64_ERRATUM_824069)
+ /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_819472
+ /* Cortex-A53 r0p[01] : ARM errata 819472 */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
+#endif
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+/*
+ * - 1188873 affects r0p0 to r2p0
+ * - 1418040 affects r0p0 to r3p1
+ */
+static const struct midr_range erratum_1418040_list[] = {
+ /* Cortex-A76 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Neoverse-N1 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_845719
+static const struct midr_range erratum_845719_list[] = {
+ /* Cortex-A53 r0p[01234] */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ /* Brahma-B53 r0p[0] */
+ MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+ /* Kryo2XX Silver rAp4 */
+ MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_843419
+static const struct arm64_cpu_capabilities erratum_843419_list[] = {
+ {
+ /* Cortex-A53 r0p[01234] */
+ .matches = is_affected_midr_range,
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ MIDR_FIXED(0x4, BIT(8)),
+ },
+ {
+ /* Brahma-B53 r0p[0] */
+ .matches = is_affected_midr_range,
+ ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+ },
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
+static const struct midr_range erratum_speculative_at_list[] = {
+#ifdef CONFIG_ARM64_ERRATUM_1165522
+ /* Cortex A76 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1319367
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1530923
+ /* Cortex A55 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
+#endif
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static const struct midr_range erratum_1463225[] = {
+ /* Cortex-A76 r0p0 - r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+static struct midr_range broken_aarch32_aes[] = {
+ MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ {},
+};
+#endif
+
+const struct arm64_cpu_capabilities arm64_errata[] = {
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+ {
+ .desc = "ARM errata 826319, 827319, 824069, or 819472",
+ .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+ ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
+ .cpu_enable = cpu_enable_cache_maint_trap,
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_832075
+ {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 832075",
+ .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
+ 0, 0,
+ 1, 2),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_834220
+ {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 834220",
+ .capability = ARM64_WORKAROUND_834220,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
+ 0, 0,
+ 1, 2),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_843419
+ {
+ .desc = "ARM erratum 843419",
+ .capability = ARM64_WORKAROUND_843419,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = erratum_843419_list,
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ {
+ .desc = "ARM erratum 845719",
+ .capability = ARM64_WORKAROUND_845719,
+ ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
+ },
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23154
+ {
+ /* Cavium ThunderX, pass 1.x */
+ .desc = "Cavium erratum 23154",
+ .capability = ARM64_WORKAROUND_CAVIUM_23154,
+ ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
+ },
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+ {
+ .desc = "Cavium erratum 27456",
+ .capability = ARM64_WORKAROUND_CAVIUM_27456,
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
+ },
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_30115
+ {
+ .desc = "Cavium erratum 30115",
+ .capability = ARM64_WORKAROUND_CAVIUM_30115,
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
+ },
+#endif
+ {
+ .desc = "Mismatched cache type (CTR_EL0)",
+ .capability = ARM64_MISMATCHED_CACHE_TYPE,
+ .matches = has_mismatched_cache_type,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .cpu_enable = cpu_enable_trap_ctr_access,
+ },
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+ {
+ .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
+ .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = qcom_erratum_1003_list,
+ },
+#endif
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
+ {
+ .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
+ .capability = ARM64_WORKAROUND_REPEAT_TLBI,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = arm64_repeat_tlbi_list,
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_858921
+ {
+ /* Cortex-A73 all versions */
+ .desc = "ARM erratum 858921",
+ .capability = ARM64_WORKAROUND_858921,
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ },
+#endif
+ {
+ .desc = "Spectre-v2",
+ .capability = ARM64_SPECTRE_V2,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_spectre_v2,
+ .cpu_enable = spectre_v2_enable_mitigation,
+ },
+#ifdef CONFIG_RANDOMIZE_BASE
+ {
+ .desc = "EL2 vector hardening",
+ .capability = ARM64_HARDEN_EL2_VECTORS,
+ ERRATA_MIDR_RANGE_LIST(ca57_a72),
+ },
+#endif
+ {
+ .desc = "Spectre-v4",
+ .capability = ARM64_SPECTRE_V4,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_spectre_v4,
+ .cpu_enable = spectre_v4_enable_mitigation,
+ },
+ {
+ .desc = "Spectre-BHB",
+ .capability = ARM64_SPECTRE_BHB,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = is_spectre_bhb_affected,
+ .cpu_enable = spectre_bhb_enable_mitigation,
+ },
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+ {
+ .desc = "ARM erratum 1418040",
+ .capability = ARM64_WORKAROUND_1418040,
+ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+ /*
+ * We need to allow affected CPUs to come in late, but
+ * also need the non-affected CPUs to be able to come
+ * in at any point in time. Wonderful.
+ */
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ },
+#endif
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
+ {
+ .desc = "ARM errata 1165522, 1319367, or 1530923",
+ .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
+ ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+ {
+ .desc = "ARM erratum 1463225",
+ .capability = ARM64_WORKAROUND_1463225,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_cortex_a76_erratum_1463225,
+ .midr_range_list = erratum_1463225,
+ },
+#endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ .matches = needs_tx2_tvm_workaround,
+ },
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1542419
+ {
+ /* we depend on the firmware portion for correctness */
+ .desc = "ARM erratum 1542419 (kernel portion)",
+ .capability = ARM64_WORKAROUND_1542419,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_neoverse_n1_erratum_1542419,
+ .cpu_enable = cpu_enable_trap_ctr_access,
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1508412
+ {
+ /* we depend on the firmware portion for correctness */
+ .desc = "ARM erratum 1508412 (kernel portion)",
+ .capability = ARM64_WORKAROUND_1508412,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
+ 0, 0,
+ 1, 0),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_2457168
+ {
+ .desc = "ARM erratum 2457168",
+ .capability = ARM64_WORKAROUND_2457168,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ /* Cortex-A510 r0p0-r1p1 */
+ CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+ {
+ .desc = "ARM erratum 1742098",
+ .capability = ARM64_WORKAROUND_1742098,
+ CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ },
+#endif
+ {
+ }
+};
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
new file mode 100644
index 000000000..e133011f6
--- /dev/null
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU kernel entry/exit control
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cache.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <asm/acpi.h>
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+
+extern const struct cpu_operations smp_spin_table_ops;
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+extern const struct cpu_operations acpi_parking_protocol_ops;
+#endif
+extern const struct cpu_operations cpu_psci_ops;
+
+static const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+
+static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
+ &smp_spin_table_ops,
+ &cpu_psci_ops,
+ NULL,
+};
+
+static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+ &acpi_parking_protocol_ops,
+#endif
+ &cpu_psci_ops,
+ NULL,
+};
+
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
+{
+ const struct cpu_operations *const *ops;
+
+ ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
+
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
+
+ ops++;
+ }
+
+ return NULL;
+}
+
+static const char *__init cpu_read_enable_method(int cpu)
+{
+ const char *enable_method;
+
+ if (acpi_disabled) {
+ struct device_node *dn = of_get_cpu_node(cpu, NULL);
+
+ if (!dn) {
+ if (!cpu)
+ pr_err("Failed to find device node for boot cpu\n");
+ return NULL;
+ }
+
+ enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g.
+ * when spin-table is used for secondaries).
+ * Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%pOF: missing enable-method property\n",
+ dn);
+ }
+ of_node_put(dn);
+ } else {
+ enable_method = acpi_get_enable_method(cpu);
+ if (!enable_method) {
+ /*
+ * In ACPI systems the boot CPU does not require
+ * checking the enable method since for some
+ * boot protocol (ie parking protocol) it need not
+ * be initialized. Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("Unsupported ACPI enable-method\n");
+ }
+ }
+
+ return enable_method;
+}
+/*
+ * Read a cpu's enable method and record it in cpu_ops.
+ */
+int __init init_cpu_ops(int cpu)
+{
+ const char *enable_method = cpu_read_enable_method(cpu);
+
+ if (!enable_method)
+ return -ENODEV;
+
+ cpu_ops[cpu] = cpu_get_ops(enable_method);
+ if (!cpu_ops[cpu]) {
+ pr_warn("Unsupported enable-method: %s\n", enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+const struct cpu_operations *get_cpu_ops(int cpu)
+{
+ return cpu_ops[cpu];
+}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
new file mode 100644
index 000000000..1f0a2deaf
--- /dev/null
+++ b/arch/arm64/kernel/cpufeature.c
@@ -0,0 +1,2881 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Contains CPU feature definitions
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * A note for the weary kernel hacker: the code here is confusing and hard to
+ * follow! That's partly because it's solving a nasty problem, but also because
+ * there's a little bit of over-abstraction that tends to obscure what's going
+ * on behind a maze of helper functions and macros.
+ *
+ * The basic problem is that hardware folks have started gluing together CPUs
+ * with distinct architectural features; in some cases even creating SoCs where
+ * user-visible instructions are available only on a subset of the available
+ * cores. We try to address this by snapshotting the feature registers of the
+ * boot CPU and comparing these with the feature registers of each secondary
+ * CPU when bringing them up. If there is a mismatch, then we update the
+ * snapshot state to indicate the lowest-common denominator of the feature,
+ * known as the "safe" value. This snapshot state can be queried to view the
+ * "sanitised" value of a feature register.
+ *
+ * The sanitised register values are used to decide which capabilities we
+ * have in the system. These may be in the form of traditional "hwcaps"
+ * advertised to userspace or internal "cpucaps" which are used to configure
+ * things like alternative patching and static keys. While a feature mismatch
+ * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch
+ * may prevent a CPU from being onlined at all.
+ *
+ * Some implementation details worth remembering:
+ *
+ * - Mismatched features are *always* sanitised to a "safe" value, which
+ * usually indicates that the feature is not supported.
+ *
+ * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
+ * warning when onlining an offending CPU and the kernel will be tainted
+ * with TAINT_CPU_OUT_OF_SPEC.
+ *
+ * - Features marked as FTR_VISIBLE have their sanitised value visible to
+ * userspace. FTR_VISIBLE features in registers that are only visible
+ * to EL0 by trapping *must* have a corresponding HWCAP so that late
+ * onlining of CPUs cannot lead to features disappearing at runtime.
+ *
+ * - A "feature" is typically a 4-bit register field. A "capability" is the
+ * high-level description derived from the sanitised field value.
+ *
+ * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
+ * scheme for fields in ID registers") to understand when feature fields
+ * may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly).
+ *
+ * - KVM exposes its own view of the feature registers to guest operating
+ * systems regardless of FTR_VISIBLE. This is typically driven from the
+ * sanitised register values to allow virtual CPUs to be migrated between
+ * arbitrary physical CPUs, but some features not present on the host are
+ * also advertised and emulated. Look at sys_reg_descs[] for the gory
+ * details.
+ *
+ * - If the arm64_ftr_bits[] for a register has a missing field, then this
+ * field is treated as STRICT RES0, including for read_sanitised_ftr_reg().
+ * This is stronger than FTR_HIDDEN and can be used to hide features from
+ * KVM guests.
+ */
+
+#define pr_fmt(fmt) "CPU features: " fmt
+
+#include <linux/bsearch.h>
+#include <linux/cpumask.h>
+#include <linux/crash_dump.h>
+#include <linux/percpu.h>
+#include <linux/sort.h>
+#include <linux/stop_machine.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+
+#include <asm/cpu.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_ops.h>
+#include <asm/fpsimd.h>
+#include <asm/hwcap.h>
+#include <asm/mmu_context.h>
+#include <asm/mte.h>
+#include <asm/processor.h>
+#include <asm/sysreg.h>
+#include <asm/traps.h>
+#include <asm/vectors.h>
+#include <asm/virt.h>
+
+/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
+static unsigned long elf_hwcap __read_mostly;
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_LPAE)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+unsigned int compat_elf_hwcap2 __read_mostly;
+#endif
+
+DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
+static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
+
+/* Need also bit for ARM64_CB_PATCH */
+DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+
+bool arm64_use_ng_mappings = false;
+EXPORT_SYMBOL(arm64_use_ng_mappings);
+
+DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
+
+/*
+ * Flag to indicate if we have computed the system wide
+ * capabilities based on the boot time active CPUs. This
+ * will be used to determine if a new booting CPU should
+ * go through the verification process to make sure that it
+ * supports the system capabilities, without using a hotplug
+ * notifier. This is also used to decide if we could use
+ * the fast path for checking constant CPU caps.
+ */
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+static inline void finalize_system_capabilities(void)
+{
+ static_branch_enable(&arm64_const_caps_ready);
+}
+
+void dump_cpu_features(void)
+{
+ /* file-wide pr_fmt adds "CPU features: " prefix */
+ pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
+}
+
+DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcap_keys);
+
+#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+ { \
+ .sign = SIGNED, \
+ .visible = VISIBLE, \
+ .strict = STRICT, \
+ .type = TYPE, \
+ .shift = SHIFT, \
+ .width = WIDTH, \
+ .safe_val = SAFE_VAL, \
+ }
+
+/* Define a feature with unsigned values */
+#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+ __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+
+/* Define a feature with a signed value */
+#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+ __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+
+#define ARM64_FTR_END \
+ { \
+ .width = 0, \
+ }
+
+/* meta feature for alternatives */
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
+
+static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
+
+static bool __system_matches_cap(unsigned int n);
+
+/*
+ * NOTE: Any changes to the visibility of features should be kept in
+ * sync with the documentation of the CPU feature register ABI.
+ */
+static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
+ /*
+ * Page size not being supported at Stage-2 is not fatal. You
+ * just give up KVM if PAGE_SIZE isn't supported there. Go fix
+ * your favourite nesting hypervisor.
+ *
+ * There is a small corner case where the hypervisor explicitly
+ * advertises a given granule size at Stage-2 (value 2) on some
+ * vCPUs, and uses the fallback to Stage-1 (value 0) for other
+ * vCPUs. Although this is not forbidden by the architecture, it
+ * indicates that the hypervisor is being silly (or buggy).
+ *
+ * We make no effort to cope with this and pretend that if these
+ * fields are inconsistent across vCPUs, then it isn't worth
+ * trying to bring KVM up.
+ */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
+ /*
+ * We already refuse to boot CPUs that don't support our configured
+ * page size, so we can only detect mismatches for a page size other
+ * than the one we're currently using. Unfortunately, SoCs like this
+ * exist in the wild so, even though we don't like it, we'll have to go
+ * along with it and treat them as non-strict.
+ */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+ /* Linux shouldn't care about secure memory */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+ /*
+ * Differing PARange is fine as long as all peripherals and memory are mapped
+ * within the minimum PARange of all CPUs
+ */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_ctr[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+ /*
+ * Linux can handle differing I-cache policies. Userspace JITs will
+ * make use of *minLine.
+ * If we have differing I-cache policies, report it as the weakest - VIPT.
+ */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT), /* L1Ip */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
+ .name = "SYS_CTR_EL0",
+ .ftr_bits = ftr_ctr
+};
+
+static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
+ /*
+ * We can instantiate multiple PMU instances with different levels
+ * of support.
+ */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_mvfr2[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_dczid[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_DZP_SHIFT, 1, 1),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_isar0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_isar5[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
+
+ /*
+ * SpecSEI = 1 indicates that the PE might generate an SError on an
+ * external abort on speculative read. It is safe to assume that an
+ * SError might be generated than it will not be. Hence it has been
+ * classified as FTR_HIGHER_SAFE.
+ */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_isar4[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_isar6[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_pfr0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_pfr1[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_pfr2[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_dfr0[] = {
+ /* [31:28] TraceFilt */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_dfr1[] = {
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_zcr[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
+ ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
+ ARM64_FTR_END,
+};
+
+/*
+ * Common ftr bits for a 32bit register with all hidden, strict
+ * attributes, with 4bit feature fields and a default safe value of
+ * 0. Covers the following 32bit registers:
+ * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
+ */
+static const struct arm64_ftr_bits ftr_generic_32bits[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
+ ARM64_FTR_END,
+};
+
+/* Table for a single 32bit feature value */
+static const struct arm64_ftr_bits ftr_single32[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_raz[] = {
+ ARM64_FTR_END,
+};
+
+#define ARM64_FTR_REG(id, table) { \
+ .sys_id = id, \
+ .reg = &(struct arm64_ftr_reg){ \
+ .name = #id, \
+ .ftr_bits = &((table)[0]), \
+ }}
+
+static const struct __ftr_reg_entry {
+ u32 sys_id;
+ struct arm64_ftr_reg *reg;
+} arm64_ftr_regs[] = {
+
+ /* Op1 = 0, CRn = 0, CRm = 1 */
+ ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
+ ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1),
+ ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
+ ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
+ ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
+
+ /* Op1 = 0, CRn = 0, CRm = 2 */
+ ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0),
+ ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4),
+ ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
+ ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
+ ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
+
+ /* Op1 = 0, CRn = 0, CRm = 3 */
+ ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
+ ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
+ ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
+ ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),
+
+ /* Op1 = 0, CRn = 0, CRm = 4 */
+ ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
+ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
+
+ /* Op1 = 0, CRn = 0, CRm = 5 */
+ ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
+ ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
+
+ /* Op1 = 0, CRn = 0, CRm = 6 */
+ ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
+ ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
+ ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
+
+ /* Op1 = 0, CRn = 0, CRm = 7 */
+ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
+ ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+ ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
+
+ /* Op1 = 0, CRn = 1, CRm = 2 */
+ ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+
+ /* Op1 = 3, CRn = 0, CRm = 0 */
+ { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
+ ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
+
+ /* Op1 = 3, CRn = 14, CRm = 0 */
+ ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
+};
+
+static int search_cmp_ftr_reg(const void *id, const void *regp)
+{
+ return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
+}
+
+/*
+ * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
+ * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the
+ * ascending order of sys_id, we use binary search to find a matching
+ * entry.
+ *
+ * returns - Upon success, matching ftr_reg entry for id.
+ * - NULL on failure. It is upto the caller to decide
+ * the impact of a failure.
+ */
+static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
+{
+ const struct __ftr_reg_entry *ret;
+
+ ret = bsearch((const void *)(unsigned long)sys_id,
+ arm64_ftr_regs,
+ ARRAY_SIZE(arm64_ftr_regs),
+ sizeof(arm64_ftr_regs[0]),
+ search_cmp_ftr_reg);
+ if (ret)
+ return ret->reg;
+ return NULL;
+}
+
+/*
+ * get_arm64_ftr_reg - Looks up a feature register entry using
+ * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn().
+ *
+ * returns - Upon success, matching ftr_reg entry for id.
+ * - NULL on failure but with an WARN_ON().
+ */
+static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
+{
+ struct arm64_ftr_reg *reg;
+
+ reg = get_arm64_ftr_reg_nowarn(sys_id);
+
+ /*
+ * Requesting a non-existent register search is an error. Warn
+ * and let the caller handle it.
+ */
+ WARN_ON(!reg);
+ return reg;
+}
+
+static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
+ s64 ftr_val)
+{
+ u64 mask = arm64_ftr_mask(ftrp);
+
+ reg &= ~mask;
+ reg |= (ftr_val << ftrp->shift) & mask;
+ return reg;
+}
+
+static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
+ s64 cur)
+{
+ s64 ret = 0;
+
+ switch (ftrp->type) {
+ case FTR_EXACT:
+ ret = ftrp->safe_val;
+ break;
+ case FTR_LOWER_SAFE:
+ ret = new < cur ? new : cur;
+ break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ fallthrough;
+ case FTR_HIGHER_SAFE:
+ ret = new > cur ? new : cur;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static void __init sort_ftr_regs(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
+ const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
+ const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
+ unsigned int j = 0;
+
+ /*
+ * Features here must be sorted in descending order with respect
+ * to their shift values and should not overlap with each other.
+ */
+ for (; ftr_bits->width != 0; ftr_bits++, j++) {
+ unsigned int width = ftr_reg->ftr_bits[j].width;
+ unsigned int shift = ftr_reg->ftr_bits[j].shift;
+ unsigned int prev_shift;
+
+ WARN((shift + width) > 64,
+ "%s has invalid feature at shift %d\n",
+ ftr_reg->name, shift);
+
+ /*
+ * Skip the first feature. There is nothing to
+ * compare against for now.
+ */
+ if (j == 0)
+ continue;
+
+ prev_shift = ftr_reg->ftr_bits[j - 1].shift;
+ WARN((shift + width) > prev_shift,
+ "%s has feature overlap at shift %d\n",
+ ftr_reg->name, shift);
+ }
+
+ /*
+ * Skip the first register. There is nothing to
+ * compare against for now.
+ */
+ if (i == 0)
+ continue;
+ /*
+ * Registers here must be sorted in ascending order with respect
+ * to sys_id for subsequent binary search in get_arm64_ftr_reg()
+ * to work correctly.
+ */
+ BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
+ }
+}
+
+/*
+ * Initialise the CPU feature register from Boot CPU values.
+ * Also initiliases the strict_mask for the register.
+ * Any bits that are not covered by an arm64_ftr_bits entry are considered
+ * RES0 for the system-wide value, and must strictly match.
+ */
+static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+{
+ u64 val = 0;
+ u64 strict_mask = ~0x0ULL;
+ u64 user_mask = 0;
+ u64 valid_mask = 0;
+
+ const struct arm64_ftr_bits *ftrp;
+ struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
+
+ if (!reg)
+ return;
+
+ for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
+ u64 ftr_mask = arm64_ftr_mask(ftrp);
+ s64 ftr_new = arm64_ftr_value(ftrp, new);
+
+ val = arm64_ftr_set_value(ftrp, val, ftr_new);
+
+ valid_mask |= ftr_mask;
+ if (!ftrp->strict)
+ strict_mask &= ~ftr_mask;
+ if (ftrp->visible)
+ user_mask |= ftr_mask;
+ else
+ reg->user_val = arm64_ftr_set_value(ftrp,
+ reg->user_val,
+ ftrp->safe_val);
+ }
+
+ val &= valid_mask;
+
+ reg->sys_val = val;
+ reg->strict_mask = strict_mask;
+ reg->user_mask = user_mask;
+}
+
+extern const struct arm64_cpu_capabilities arm64_errata[];
+static const struct arm64_cpu_capabilities arm64_features[];
+
+static void __init
+init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
+{
+ for (; caps->matches; caps++) {
+ if (WARN(caps->capability >= ARM64_NCAPS,
+ "Invalid capability %d\n", caps->capability))
+ continue;
+ if (WARN(cpu_hwcaps_ptrs[caps->capability],
+ "Duplicate entry for capability %d\n",
+ caps->capability))
+ continue;
+ cpu_hwcaps_ptrs[caps->capability] = caps;
+ }
+}
+
+static void __init init_cpu_hwcaps_indirect_list(void)
+{
+ init_cpu_hwcaps_indirect_list_from_array(arm64_features);
+ init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
+}
+
+static void __init setup_boot_cpu_capabilities(void);
+
+void __init init_cpu_features(struct cpuinfo_arm64 *info)
+{
+ /* Before we start using the tables, make sure it is sorted */
+ sort_ftr_regs();
+
+ init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
+ init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
+ init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
+ init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
+ init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
+ init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
+ init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
+
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+ init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+ init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
+ init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+ init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+ init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+ init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+ init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+ init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+ init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
+ init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+ init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+ init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+ init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+ init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
+ init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
+ init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+ init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+ init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
+ init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+ init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+ init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+ }
+
+ if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+ init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
+ sve_init_vq_map();
+ }
+
+ /*
+ * Initialize the indirect array of CPU hwcaps capabilities pointers
+ * before we handle the boot CPU below.
+ */
+ init_cpu_hwcaps_indirect_list();
+
+ /*
+ * Detect and enable early CPU capabilities based on the boot CPU,
+ * after we have initialised the CPU feature infrastructure.
+ */
+ setup_boot_cpu_capabilities();
+}
+
+static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
+{
+ const struct arm64_ftr_bits *ftrp;
+
+ for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
+ s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
+ s64 ftr_new = arm64_ftr_value(ftrp, new);
+
+ if (ftr_cur == ftr_new)
+ continue;
+ /* Find a safe value */
+ ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
+ reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
+ }
+
+}
+
+static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
+{
+ struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
+
+ if (!regp)
+ return 0;
+
+ update_cpu_ftr_reg(regp, val);
+ if ((boot & regp->strict_mask) == (val & regp->strict_mask))
+ return 0;
+ pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
+ regp->name, boot, cpu, val);
+ return 1;
+}
+
+static void relax_cpu_ftr_reg(u32 sys_id, int field)
+{
+ const struct arm64_ftr_bits *ftrp;
+ struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
+
+ if (!regp)
+ return;
+
+ for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
+ if (ftrp->shift == field) {
+ regp->strict_mask &= ~arm64_ftr_mask(ftrp);
+ break;
+ }
+ }
+
+ /* Bogus field? */
+ WARN_ON(!ftrp->width);
+}
+
+static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
+ struct cpuinfo_arm64 *boot)
+{
+ int taint = 0;
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ /*
+ * If we don't have AArch32 at all then skip the checks entirely
+ * as the register values may be UNKNOWN and we're not going to be
+ * using them for anything.
+ */
+ if (!id_aa64pfr0_32bit_el0(pfr0))
+ return taint;
+
+ /*
+ * If we don't have AArch32 at EL1, then relax the strictness of
+ * EL1-dependent register fields to avoid spurious sanity check fails.
+ */
+ if (!id_aa64pfr0_32bit_el1(pfr0)) {
+ relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT);
+ }
+
+ taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
+ info->reg_id_dfr0, boot->reg_id_dfr0);
+ taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
+ info->reg_id_dfr1, boot->reg_id_dfr1);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
+ info->reg_id_isar0, boot->reg_id_isar0);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
+ info->reg_id_isar1, boot->reg_id_isar1);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
+ info->reg_id_isar2, boot->reg_id_isar2);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
+ info->reg_id_isar3, boot->reg_id_isar3);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
+ info->reg_id_isar4, boot->reg_id_isar4);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
+ info->reg_id_isar5, boot->reg_id_isar5);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
+ info->reg_id_isar6, boot->reg_id_isar6);
+
+ /*
+ * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
+ * ACTLR formats could differ across CPUs and therefore would have to
+ * be trapped for virtualization anyway.
+ */
+ taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
+ info->reg_id_mmfr0, boot->reg_id_mmfr0);
+ taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
+ info->reg_id_mmfr1, boot->reg_id_mmfr1);
+ taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
+ info->reg_id_mmfr2, boot->reg_id_mmfr2);
+ taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
+ info->reg_id_mmfr3, boot->reg_id_mmfr3);
+ taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
+ info->reg_id_mmfr4, boot->reg_id_mmfr4);
+ taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
+ info->reg_id_mmfr5, boot->reg_id_mmfr5);
+ taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
+ info->reg_id_pfr0, boot->reg_id_pfr0);
+ taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
+ info->reg_id_pfr1, boot->reg_id_pfr1);
+ taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
+ info->reg_id_pfr2, boot->reg_id_pfr2);
+ taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
+ info->reg_mvfr0, boot->reg_mvfr0);
+ taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
+ info->reg_mvfr1, boot->reg_mvfr1);
+ taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
+ info->reg_mvfr2, boot->reg_mvfr2);
+
+ return taint;
+}
+
+/*
+ * Update system wide CPU feature registers with the values from a
+ * non-boot CPU. Also performs SANITY checks to make sure that there
+ * aren't any insane variations from that of the boot CPU.
+ */
+void update_cpu_features(int cpu,
+ struct cpuinfo_arm64 *info,
+ struct cpuinfo_arm64 *boot)
+{
+ int taint = 0;
+
+ /*
+ * The kernel can handle differing I-cache policies, but otherwise
+ * caches should look identical. Userspace JITs will make use of
+ * *minLine.
+ */
+ taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
+ info->reg_ctr, boot->reg_ctr);
+
+ /*
+ * Userspace may perform DC ZVA instructions. Mismatched block sizes
+ * could result in too much or too little memory being zeroed if a
+ * process is preempted and migrated between CPUs.
+ */
+ taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
+ info->reg_dczid, boot->reg_dczid);
+
+ /* If different, timekeeping will be broken (especially with KVM) */
+ taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
+ info->reg_cntfrq, boot->reg_cntfrq);
+
+ /*
+ * The kernel uses self-hosted debug features and expects CPUs to
+ * support identical debug features. We presently need CTX_CMPs, WRPs,
+ * and BRPs to be identical.
+ * ID_AA64DFR1 is currently RES0.
+ */
+ taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
+ info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
+ info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
+ /*
+ * Even in big.LITTLE, processors should be identical instruction-set
+ * wise.
+ */
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
+ info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
+ info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
+ info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
+
+ /*
+ * Differing PARange support is fine as long as all peripherals and
+ * memory are mapped within the minimum PARange of all CPUs.
+ * Linux should not care about secure memory.
+ */
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
+ info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
+ info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+ info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
+
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
+ info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
+ info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
+
+ taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
+ info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
+
+ if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+ taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
+ info->reg_zcr, boot->reg_zcr);
+
+ /* Probe vector lengths, unless we already gave up on SVE */
+ if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
+ !system_capabilities_finalized())
+ sve_update_vq_map();
+ }
+
+ /*
+ * This relies on a sanitised view of the AArch64 ID registers
+ * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
+ */
+ taint |= update_32bit_cpu_features(cpu, info, boot);
+
+ /*
+ * Mismatched CPU features are a recipe for disaster. Don't even
+ * pretend to support them.
+ */
+ if (taint) {
+ pr_warn_once("Unsupported CPU feature variation detected.\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
+}
+
+u64 read_sanitised_ftr_reg(u32 id)
+{
+ struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
+
+ if (!regp)
+ return 0;
+ return regp->sys_val;
+}
+EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
+
+#define read_sysreg_case(r) \
+ case r: return read_sysreg_s(r)
+
+/*
+ * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
+ * Read the system register on the current CPU
+ */
+static u64 __read_sysreg_by_encoding(u32 sys_id)
+{
+ switch (sys_id) {
+ read_sysreg_case(SYS_ID_PFR0_EL1);
+ read_sysreg_case(SYS_ID_PFR1_EL1);
+ read_sysreg_case(SYS_ID_PFR2_EL1);
+ read_sysreg_case(SYS_ID_DFR0_EL1);
+ read_sysreg_case(SYS_ID_DFR1_EL1);
+ read_sysreg_case(SYS_ID_MMFR0_EL1);
+ read_sysreg_case(SYS_ID_MMFR1_EL1);
+ read_sysreg_case(SYS_ID_MMFR2_EL1);
+ read_sysreg_case(SYS_ID_MMFR3_EL1);
+ read_sysreg_case(SYS_ID_MMFR4_EL1);
+ read_sysreg_case(SYS_ID_MMFR5_EL1);
+ read_sysreg_case(SYS_ID_ISAR0_EL1);
+ read_sysreg_case(SYS_ID_ISAR1_EL1);
+ read_sysreg_case(SYS_ID_ISAR2_EL1);
+ read_sysreg_case(SYS_ID_ISAR3_EL1);
+ read_sysreg_case(SYS_ID_ISAR4_EL1);
+ read_sysreg_case(SYS_ID_ISAR5_EL1);
+ read_sysreg_case(SYS_ID_ISAR6_EL1);
+ read_sysreg_case(SYS_MVFR0_EL1);
+ read_sysreg_case(SYS_MVFR1_EL1);
+ read_sysreg_case(SYS_MVFR2_EL1);
+
+ read_sysreg_case(SYS_ID_AA64PFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64PFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64DFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64DFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
+
+ read_sysreg_case(SYS_CNTFRQ_EL0);
+ read_sysreg_case(SYS_CTR_EL0);
+ read_sysreg_case(SYS_DCZID_EL0);
+
+ default:
+ BUG();
+ return 0;
+ }
+}
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+static bool
+feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
+{
+ int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
+
+ return val >= entry->min_field_value;
+}
+
+static bool
+has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u64 val;
+
+ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+ if (scope == SCOPE_SYSTEM)
+ val = read_sanitised_ftr_reg(entry->sys_reg);
+ else
+ val = __read_sysreg_by_encoding(entry->sys_reg);
+
+ return feature_matches(val, entry);
+}
+
+static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ bool has_sre;
+
+ if (!has_cpuid_feature(entry, scope))
+ return false;
+
+ has_sre = gic_enable_sre();
+ if (!has_sre)
+ pr_warn_once("%s present but disabled by higher exception level\n",
+ entry->desc);
+
+ return has_sre;
+}
+
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ u32 midr = read_cpuid_id();
+
+ /* Cavium ThunderX pass 1.x and 2.x */
+ return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
+}
+
+static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ return cpuid_feature_extract_signed_field(pfr0,
+ ID_AA64PFR0_FP_SHIFT) < 0;
+}
+
+static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u64 ctr;
+
+ if (scope == SCOPE_SYSTEM)
+ ctr = arm64_ftr_reg_ctrel0.sys_val;
+ else
+ ctr = read_cpuid_effective_cachetype();
+
+ return ctr & BIT(CTR_IDC_SHIFT);
+}
+
+static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
+ * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
+ * to the CTR_EL0 on this CPU and emulate it with the real/safe
+ * value.
+ */
+ if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+}
+
+static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u64 ctr;
+
+ if (scope == SCOPE_SYSTEM)
+ ctr = arm64_ftr_reg_ctrel0.sys_val;
+ else
+ ctr = read_cpuid_cachetype();
+
+ return ctr & BIT(CTR_DIC_SHIFT);
+}
+
+static bool __maybe_unused
+has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ /*
+ * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
+ * may share TLB entries with a CPU stuck in the crashed
+ * kernel.
+ */
+ if (is_kdump_kernel())
+ return false;
+
+ return has_cpuid_feature(entry, scope);
+}
+
+/*
+ * This check is triggered during the early boot before the cpufeature
+ * is initialised. Checking the status on the local CPU allows the boot
+ * CPU to detect the need for non-global mappings and thus avoiding a
+ * pagetable re-write after all the CPUs are booted. This check will be
+ * anyway run on individual CPUs, allowing us to get the consistent
+ * state once the SMP CPUs are up and thus make the switch to non-global
+ * mappings if required.
+ */
+bool kaslr_requires_kpti(void)
+{
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return false;
+
+ /*
+ * E0PD does a similar job to KPTI so can be used instead
+ * where available.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
+ u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+ if (cpuid_feature_extract_unsigned_field(mmfr2,
+ ID_AA64MMFR2_E0PD_SHIFT))
+ return false;
+ }
+
+ /*
+ * Systems affected by Cavium erratum 24756 are incompatible
+ * with KPTI.
+ */
+ if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+ extern const struct midr_range cavium_erratum_27456_cpus[];
+
+ if (is_midr_in_range_list(read_cpuid_id(),
+ cavium_erratum_27456_cpus))
+ return false;
+ }
+
+ return kaslr_offset() > 0;
+}
+
+static bool __meltdown_safe = true;
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ /* List of CPUs that are not vulnerable and don't need KPTI */
+ static const struct midr_range kpti_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ { /* sentinel */ }
+ };
+ char const *str = "kpti command line option";
+ bool meltdown_safe;
+
+ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+ /* Defer to CPU feature registers */
+ if (has_cpuid_feature(entry, scope))
+ meltdown_safe = true;
+
+ if (!meltdown_safe)
+ __meltdown_safe = false;
+
+ /*
+ * For reasons that aren't entirely clear, enabling KPTI on Cavium
+ * ThunderX leads to apparent I-cache corruption of kernel text, which
+ * ends as well as you might imagine. Don't even try.
+ */
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ str = "ARM64_WORKAROUND_CAVIUM_27456";
+ __kpti_forced = -1;
+ }
+
+ /* Useful for KASLR robustness */
+ if (kaslr_requires_kpti()) {
+ if (!__kpti_forced) {
+ str = "KASLR";
+ __kpti_forced = 1;
+ }
+ }
+
+ if (cpu_mitigations_off() && !__kpti_forced) {
+ str = "mitigations=off";
+ __kpti_forced = -1;
+ }
+
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+ return false;
+ }
+
+ /* Forced? */
+ if (__kpti_forced) {
+ pr_info_once("kernel page table isolation forced %s by %s\n",
+ __kpti_forced > 0 ? "ON" : "OFF", str);
+ return __kpti_forced > 0;
+ }
+
+ return !meltdown_safe;
+}
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+ typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+ extern kpti_remap_fn idmap_kpti_install_ng_mappings;
+ kpti_remap_fn *remap_fn;
+
+ int cpu = smp_processor_id();
+
+ if (__this_cpu_read(this_cpu_vector) == vectors) {
+ const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
+
+ __this_cpu_write(this_cpu_vector, v);
+ }
+
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (arm64_use_ng_mappings)
+ return;
+
+ remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+
+ cpu_install_idmap();
+ remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+ cpu_uninstall_idmap();
+
+ if (!cpu)
+ arm64_use_ng_mappings = true;
+
+ return;
+}
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+static int __init parse_kpti(char *str)
+{
+ bool enabled;
+ int ret = strtobool(str, &enabled);
+
+ if (ret)
+ return ret;
+
+ __kpti_forced = enabled ? 1 : -1;
+ return 0;
+}
+early_param("kpti", parse_kpti);
+
+#ifdef CONFIG_ARM64_HW_AFDBM
+static inline void __cpu_enable_hw_dbm(void)
+{
+ u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
+
+ write_sysreg(tcr, tcr_el1);
+ isb();
+ local_flush_tlb_all();
+}
+
+static bool cpu_has_broken_dbm(void)
+{
+ /* List of CPUs which have broken DBM support. */
+ static const struct midr_range cpus[] = {
+#ifdef CONFIG_ARM64_ERRATUM_1024718
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
+#endif
+ {},
+ };
+
+ return is_midr_in_range_list(read_cpuid_id(), cpus);
+}
+
+static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
+{
+ return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
+ !cpu_has_broken_dbm();
+}
+
+static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
+{
+ if (cpu_can_use_dbm(cap))
+ __cpu_enable_hw_dbm();
+}
+
+static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
+ int __unused)
+{
+ static bool detected = false;
+ /*
+ * DBM is a non-conflicting feature. i.e, the kernel can safely
+ * run a mix of CPUs with and without the feature. So, we
+ * unconditionally enable the capability to allow any late CPU
+ * to use the feature. We only enable the control bits on the
+ * CPU, if it actually supports.
+ *
+ * We have to make sure we print the "feature" detection only
+ * when at least one CPU actually uses it. So check if this CPU
+ * can actually use it and print the message exactly once.
+ *
+ * This is safe as all CPUs (including secondary CPUs - due to the
+ * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
+ * goes through the "matches" check exactly once. Also if a CPU
+ * matches the criteria, it is guaranteed that the CPU will turn
+ * the DBM on, as the capability is unconditionally enabled.
+ */
+ if (!detected && cpu_can_use_dbm(cap)) {
+ detected = true;
+ pr_info("detected: Hardware dirty bit management\n");
+ }
+
+ return true;
+}
+
+#endif
+
+#ifdef CONFIG_ARM64_AMU_EXTN
+
+/*
+ * The "amu_cpus" cpumask only signals that the CPU implementation for the
+ * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide
+ * information regarding all the events that it supports. When a CPU bit is
+ * set in the cpumask, the user of this feature can only rely on the presence
+ * of the 4 fixed counters for that CPU. But this does not guarantee that the
+ * counters are enabled or access to these counters is enabled by code
+ * executed at higher exception levels (firmware).
+ */
+static struct cpumask amu_cpus __read_mostly;
+
+bool cpu_has_amu_feat(int cpu)
+{
+ return cpumask_test_cpu(cpu, &amu_cpus);
+}
+
+/* Initialize the use of AMU counters for frequency invariance */
+extern void init_cpu_freq_invariance_counters(void);
+
+static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
+{
+ if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
+ pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
+ smp_processor_id());
+ cpumask_set_cpu(smp_processor_id(), &amu_cpus);
+
+ /* 0 reference values signal broken/disabled counters */
+ if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
+ init_cpu_freq_invariance_counters();
+ }
+}
+
+static bool has_amu(const struct arm64_cpu_capabilities *cap,
+ int __unused)
+{
+ /*
+ * The AMU extension is a non-conflicting feature: the kernel can
+ * safely run a mix of CPUs with and without support for the
+ * activity monitors extension. Therefore, unconditionally enable
+ * the capability to allow any late CPU to use the feature.
+ *
+ * With this feature unconditionally enabled, the cpu_enable
+ * function will be called for all CPUs that match the criteria,
+ * including secondary and hotplugged, marking this feature as
+ * present on that respective CPU. The enable function will also
+ * print a detection message.
+ */
+
+ return true;
+}
+#endif
+
+#ifdef CONFIG_ARM64_VHE
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ return is_kernel_in_hyp_mode();
+}
+
+static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * Copy register values that aren't redirected by hardware.
+ *
+ * Before code patching, we only set tpidr_el1, all CPUs need to copy
+ * this value to tpidr_el2 before we patch the code. Once we've done
+ * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
+ * do anything here.
+ */
+ if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
+ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+}
+#endif
+
+static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
+{
+ u64 val = read_sysreg_s(SYS_CLIDR_EL1);
+
+ /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
+ WARN_ON(val & (7 << 27 | 7 << 21));
+}
+
+#ifdef CONFIG_ARM64_PAN
+static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * We modify PSTATE. This won't work from irq context as the PSTATE
+ * is discarded once we return from the exception.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+}
+#endif /* CONFIG_ARM64_PAN */
+
+#ifdef CONFIG_ARM64_RAS_EXTN
+static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
+{
+ /* Firmware may have left a deferred SError in this register. */
+ write_sysreg_s(0, SYS_DISR_EL1);
+}
+#endif /* CONFIG_ARM64_RAS_EXTN */
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ int boot_val, sec_val;
+
+ /* We don't expect to be called with SCOPE_SYSTEM */
+ WARN_ON(scope == SCOPE_SYSTEM);
+ /*
+ * The ptr-auth feature levels are not intercompatible with lower
+ * levels. Hence we must match ptr-auth feature level of the secondary
+ * CPUs with that of the boot CPU. The level of boot cpu is fetched
+ * from the sanitised register whereas direct register read is done for
+ * the secondary CPUs.
+ * The sanitised feature state is guaranteed to match that of the
+ * boot CPU as a mismatched secondary CPU is parked before it gets
+ * a chance to update the state, with the capability.
+ */
+ boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
+ entry->field_pos, entry->sign);
+ if (scope & SCOPE_BOOT_CPU)
+ return boot_val >= entry->min_field_value;
+ /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */
+ sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
+ entry->field_pos, entry->sign);
+ return sec_val == boot_val;
+}
+
+static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
+ has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
+}
+
+static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
+ __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
+}
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#ifdef CONFIG_ARM64_E0PD
+static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
+{
+ if (this_cpu_has_cap(ARM64_HAS_E0PD))
+ sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
+}
+#endif /* CONFIG_ARM64_E0PD */
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+static bool enable_pseudo_nmi;
+
+static int __init early_enable_pseudo_nmi(char *p)
+{
+ return strtobool(p, &enable_pseudo_nmi);
+}
+early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
+
+static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
+}
+#endif
+
+#ifdef CONFIG_ARM64_BTI
+static void bti_enable(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * Use of X16/X17 for tail-calls and trampolines that jump to
+ * function entry points using BR is a requirement for
+ * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI.
+ * So, be strict and forbid other BRs using other registers to
+ * jump onto a PACIxSP instruction:
+ */
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
+ isb();
+}
+#endif /* CONFIG_ARM64_BTI */
+
+#ifdef CONFIG_ARM64_MTE
+static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+{
+ /*
+ * Clear the tags in the zero page. This needs to be done via the
+ * linear map which has the Tagged attribute.
+ */
+ if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
+ mte_clear_page_tags(lm_alias(empty_zero_page));
+}
+#endif /* CONFIG_ARM64_MTE */
+
+static void elf_hwcap_fixup(void)
+{
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
+ compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
+#endif /* ARM64_ERRATUM_1742098 */
+}
+
+/* Internal helper functions to match cpu capability type */
+static bool
+cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
+}
+
+static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ .desc = "GIC system register CPU interface",
+ .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_useable_gicv3_cpuif,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#ifdef CONFIG_ARM64_PAN
+ {
+ .desc = "Privileged Access Never",
+ .capability = ARM64_HAS_PAN,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64MMFR1_EL1,
+ .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ .cpu_enable = cpu_enable_pan,
+ },
+#endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+ {
+ .desc = "LSE atomic instructions",
+ .capability = ARM64_HAS_LSE_ATOMICS,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 2,
+ },
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
+ {
+ .desc = "Software prefetching using PRFM",
+ .capability = ARM64_HAS_NO_HW_PREFETCH,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .matches = has_no_hw_prefetch,
+ },
+#ifdef CONFIG_ARM64_UAO
+ {
+ .desc = "User Access Override",
+ .capability = ARM64_HAS_UAO,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .field_pos = ID_AA64MMFR2_UAO_SHIFT,
+ .min_field_value = 1,
+ /*
+ * We rely on stop_machine() calling uao_thread_switch() to set
+ * UAO immediately after patching.
+ */
+ },
+#endif /* CONFIG_ARM64_UAO */
+#ifdef CONFIG_ARM64_PAN
+ {
+ .capability = ARM64_ALT_PAN_NOT_UAO,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = cpufeature_pan_not_uao,
+ },
+#endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_VHE
+ {
+ .desc = "Virtualization Host Extensions",
+ .capability = ARM64_HAS_VIRT_HOST_EXTN,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = runs_at_el2,
+ .cpu_enable = cpu_copy_el2regs,
+ },
+#endif /* CONFIG_ARM64_VHE */
+ {
+ .desc = "32-bit EL0 Support",
+ .capability = ARM64_HAS_32BIT_EL0,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_EL0_SHIFT,
+ .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
+ },
+#ifdef CONFIG_KVM
+ {
+ .desc = "32-bit EL1 Support",
+ .capability = ARM64_HAS_32BIT_EL1,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_EL1_SHIFT,
+ .min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
+ },
+#endif
+ {
+ .desc = "Kernel page table isolation (KPTI)",
+ .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
+ /*
+ * The ID feature fields below are used to indicate that
+ * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
+ * more details.
+ */
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .field_pos = ID_AA64PFR0_CSV3_SHIFT,
+ .min_field_value = 1,
+ .matches = unmap_kernel_at_el0,
+ .cpu_enable = kpti_install_ng_mappings,
+ },
+ {
+ /* FP/SIMD is not implemented */
+ .capability = ARM64_HAS_NO_FPSIMD,
+ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
+ .min_field_value = 0,
+ .matches = has_no_fpsimd,
+ },
+#ifdef CONFIG_ARM64_PMEM
+ {
+ .desc = "Data cache clean to Point of Persistence",
+ .capability = ARM64_HAS_DCPOP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+ .min_field_value = 1,
+ },
+ {
+ .desc = "Data cache clean to Point of Deep Persistence",
+ .capability = ARM64_HAS_DCPODP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+ .min_field_value = 2,
+ },
+#endif
+#ifdef CONFIG_ARM64_SVE
+ {
+ .desc = "Scalable Vector Extension",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_SVE,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_SVE_SHIFT,
+ .min_field_value = ID_AA64PFR0_SVE,
+ .matches = has_cpuid_feature,
+ .cpu_enable = sve_kernel_enable,
+ },
+#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_RAS_EXTN
+ {
+ .desc = "RAS Extension Support",
+ .capability = ARM64_HAS_RAS_EXTN,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_RAS_SHIFT,
+ .min_field_value = ID_AA64PFR0_RAS_V1,
+ .cpu_enable = cpu_clear_disr,
+ },
+#endif /* CONFIG_ARM64_RAS_EXTN */
+#ifdef CONFIG_ARM64_AMU_EXTN
+ {
+ /*
+ * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
+ * Therefore, don't provide .desc as we don't want the detection
+ * message to be shown until at least one CPU is detected to
+ * support the feature.
+ */
+ .capability = ARM64_HAS_AMU_EXTN,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .matches = has_amu,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_AMU_SHIFT,
+ .min_field_value = ID_AA64PFR0_AMU,
+ .cpu_enable = cpu_amu_enable,
+ },
+#endif /* CONFIG_ARM64_AMU_EXTN */
+ {
+ .desc = "Data cache clean to the PoU not required for I/D coherence",
+ .capability = ARM64_HAS_CACHE_IDC,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cache_idc,
+ .cpu_enable = cpu_emulate_effective_ctr,
+ },
+ {
+ .desc = "Instruction cache invalidation not required for I/D coherence",
+ .capability = ARM64_HAS_CACHE_DIC,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cache_dic,
+ },
+ {
+ .desc = "Stage-2 Force Write-Back",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_STAGE2_FWB,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_FWB_SHIFT,
+ .min_field_value = 1,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_has_fwb,
+ },
+ {
+ .desc = "ARMv8.4 Translation Table Level",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_ARMv8_4_TTL,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_TTL_SHIFT,
+ .min_field_value = 1,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .desc = "TLB range maintenance instructions",
+ .capability = ARM64_HAS_TLB_RANGE,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_TLB_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = ID_AA64ISAR0_TLB_RANGE,
+ },
+#ifdef CONFIG_ARM64_HW_AFDBM
+ {
+ /*
+ * Since we turn this on always, we don't want the user to
+ * think that the feature is available when it may not be.
+ * So hide the description.
+ *
+ * .desc = "Hardware pagetable Dirty Bit Management",
+ *
+ */
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .capability = ARM64_HW_DBM,
+ .sys_reg = SYS_ID_AA64MMFR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
+ .min_field_value = 2,
+ .matches = has_hw_dbm,
+ .cpu_enable = cpu_enable_hw_dbm,
+ },
+#endif
+ {
+ .desc = "CRC32 instructions",
+ .capability = ARM64_HAS_CRC32,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
+ .min_field_value = 1,
+ },
+ {
+ .desc = "Speculative Store Bypassing Safe (SSBS)",
+ .capability = ARM64_SSBS,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
+ },
+#ifdef CONFIG_ARM64_CNP
+ {
+ .desc = "Common not Private translations",
+ .capability = ARM64_HAS_CNP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_useable_cnp,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_CNP_SHIFT,
+ .min_field_value = 1,
+ .cpu_enable = cpu_enable_cnp,
+ },
+#endif
+ {
+ .desc = "Speculation barrier (SB)",
+ .capability = ARM64_HAS_SB,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .field_pos = ID_AA64ISAR1_SB_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#ifdef CONFIG_ARM64_PTR_AUTH
+ {
+ .desc = "Address authentication (architected algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_APA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
+ .matches = has_address_auth_cpucap,
+ },
+ {
+ .desc = "Address authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_API_SHIFT,
+ .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
+ .matches = has_address_auth_cpucap,
+ },
+ {
+ .capability = ARM64_HAS_ADDRESS_AUTH,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = has_address_auth_metacap,
+ },
+ {
+ .desc = "Generic authentication (architected algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .desc = "Generic authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPI_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .capability = ARM64_HAS_GENERIC_AUTH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_generic_auth,
+ },
+#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ {
+ /*
+ * Depends on having GICv3
+ */
+ .desc = "IRQ priority masking",
+ .capability = ARM64_HAS_IRQ_PRIO_MASKING,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = can_use_gic_priorities,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#endif
+#ifdef CONFIG_ARM64_E0PD
+ {
+ .desc = "E0PD",
+ .capability = ARM64_HAS_E0PD,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
+ .matches = has_cpuid_feature,
+ .min_field_value = 1,
+ .cpu_enable = cpu_enable_e0pd,
+ },
+#endif
+#ifdef CONFIG_ARCH_RANDOM
+ {
+ .desc = "Random Number Generator",
+ .capability = ARM64_HAS_RNG,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#endif
+#ifdef CONFIG_ARM64_BTI
+ {
+ .desc = "Branch Target Identification",
+ .capability = ARM64_BTI,
+#ifdef CONFIG_ARM64_BTI_KERNEL
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+#else
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+#endif
+ .matches = has_cpuid_feature,
+ .cpu_enable = bti_enable,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_BT_SHIFT,
+ .min_field_value = ID_AA64PFR1_BT_BTI,
+ .sign = FTR_UNSIGNED,
+ },
+#endif
+#ifdef CONFIG_ARM64_MTE
+ {
+ .desc = "Memory Tagging Extension",
+ .capability = ARM64_MTE,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_MTE_SHIFT,
+ .min_field_value = ID_AA64PFR1_MTE,
+ .sign = FTR_UNSIGNED,
+ .cpu_enable = cpu_enable_mte,
+ },
+#endif /* CONFIG_ARM64_MTE */
+ {},
+};
+
+#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
+ .matches = has_cpuid_feature, \
+ .sys_reg = reg, \
+ .field_pos = field, \
+ .sign = s, \
+ .min_field_value = min_value,
+
+#define __HWCAP_CAP(name, cap_type, cap) \
+ .desc = name, \
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
+ .hwcap_type = cap_type, \
+ .hwcap = cap, \
+
+#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ HWCAP_CPUID_MATCH(reg, field, s, min_value) \
+ }
+
+#define HWCAP_MULTI_CAP(list, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = cpucap_multi_entry_cap_matches, \
+ .match_list = list, \
+ }
+
+#define HWCAP_CAP_MATCH(match, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = match, \
+ }
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
+ },
+ {},
+};
+
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
+ },
+ {},
+};
+#endif
+
+static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
+ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
+#ifdef CONFIG_ARM64_SVE
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
+#endif
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
+#ifdef CONFIG_ARM64_BTI
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
+#endif
+#ifdef CONFIG_ARM64_MTE
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
+#endif /* CONFIG_ARM64_MTE */
+ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
+ HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+ {},
+};
+
+#ifdef CONFIG_COMPAT
+static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
+{
+ /*
+ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
+ * in line with that of arm32 as in vfp_init(). We make sure that the
+ * check is future proof, by making sure value is non-zero.
+ */
+ u32 mvfr1;
+
+ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+ if (scope == SCOPE_SYSTEM)
+ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
+ else
+ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
+
+ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
+}
+#endif
+
+static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
+#ifdef CONFIG_COMPAT
+ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
+ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
+ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
+#endif
+ {},
+};
+
+static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+{
+ switch (cap->hwcap_type) {
+ case CAP_HWCAP:
+ cpu_set_feature(cap->hwcap);
+ break;
+#ifdef CONFIG_COMPAT
+ case CAP_COMPAT_HWCAP:
+ compat_elf_hwcap |= (u32)cap->hwcap;
+ break;
+ case CAP_COMPAT_HWCAP2:
+ compat_elf_hwcap2 |= (u32)cap->hwcap;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/* Check if we have a particular HWCAP enabled */
+static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+{
+ bool rc;
+
+ switch (cap->hwcap_type) {
+ case CAP_HWCAP:
+ rc = cpu_have_feature(cap->hwcap);
+ break;
+#ifdef CONFIG_COMPAT
+ case CAP_COMPAT_HWCAP:
+ rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
+ break;
+ case CAP_COMPAT_HWCAP2:
+ rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ rc = false;
+ }
+
+ return rc;
+}
+
+static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+{
+ /* We support emulation of accesses to CPU ID feature registers */
+ cpu_set_named_feature(CPUID);
+ for (; hwcaps->matches; hwcaps++)
+ if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
+ cap_set_elf_hwcap(hwcaps);
+}
+
+static void update_cpu_capabilities(u16 scope_mask)
+{
+ int i;
+ const struct arm64_cpu_capabilities *caps;
+
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask) ||
+ cpus_have_cap(caps->capability) ||
+ !caps->matches(caps, cpucap_default_scope(caps)))
+ continue;
+
+ if (caps->desc)
+ pr_info("detected: %s\n", caps->desc);
+ cpus_set_cap(caps->capability);
+
+ if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
+ set_bit(caps->capability, boot_capabilities);
+ }
+}
+
+/*
+ * Enable all the available capabilities on this CPU. The capabilities
+ * with BOOT_CPU scope are handled separately and hence skipped here.
+ */
+static int cpu_enable_non_boot_scope_capabilities(void *__unused)
+{
+ int i;
+ u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
+
+ for_each_available_cap(i) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
+
+ if (WARN_ON(!cap))
+ continue;
+
+ if (!(cap->type & non_boot_scope))
+ continue;
+
+ if (cap->cpu_enable)
+ cap->cpu_enable(cap);
+ }
+ return 0;
+}
+
+/*
+ * Run through the enabled capabilities and enable() it on all active
+ * CPUs
+ */
+static void __init enable_cpu_capabilities(u16 scope_mask)
+{
+ int i;
+ const struct arm64_cpu_capabilities *caps;
+ bool boot_scope;
+
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+ boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
+
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ unsigned int num;
+
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
+ continue;
+ num = caps->capability;
+ if (!cpus_have_cap(num))
+ continue;
+
+ /* Ensure cpus_have_const_cap(num) works */
+ static_branch_enable(&cpu_hwcap_keys[num]);
+
+ if (boot_scope && caps->cpu_enable)
+ /*
+ * Capabilities with SCOPE_BOOT_CPU scope are finalised
+ * before any secondary CPU boots. Thus, each secondary
+ * will enable the capability as appropriate via
+ * check_local_cpu_capabilities(). The only exception is
+ * the boot CPU, for which the capability must be
+ * enabled here. This approach avoids costly
+ * stop_machine() calls for this case.
+ */
+ caps->cpu_enable(caps);
+ }
+
+ /*
+ * For all non-boot scope capabilities, use stop_machine()
+ * as it schedules the work allowing us to modify PSTATE,
+ * instead of on_each_cpu() which uses an IPI, giving us a
+ * PSTATE that disappears when we return.
+ */
+ if (!boot_scope)
+ stop_machine(cpu_enable_non_boot_scope_capabilities,
+ NULL, cpu_online_mask);
+}
+
+/*
+ * Run through the list of capabilities to check for conflicts.
+ * If the system has already detected a capability, take necessary
+ * action on this CPU.
+ */
+static void verify_local_cpu_caps(u16 scope_mask)
+{
+ int i;
+ bool cpu_has_cap, system_has_cap;
+ const struct arm64_cpu_capabilities *caps;
+
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
+ continue;
+
+ cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
+ system_has_cap = cpus_have_cap(caps->capability);
+
+ if (system_has_cap) {
+ /*
+ * Check if the new CPU misses an advertised feature,
+ * which is not safe to miss.
+ */
+ if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
+ break;
+ /*
+ * We have to issue cpu_enable() irrespective of
+ * whether the CPU has it or not, as it is enabeld
+ * system wide. It is upto the call back to take
+ * appropriate action on this CPU.
+ */
+ if (caps->cpu_enable)
+ caps->cpu_enable(caps);
+ } else {
+ /*
+ * Check if the CPU has this capability if it isn't
+ * safe to have when the system doesn't.
+ */
+ if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
+ break;
+ }
+ }
+
+ if (i < ARM64_NCAPS) {
+ pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
+ smp_processor_id(), caps->capability,
+ caps->desc, system_has_cap, cpu_has_cap);
+
+ if (cpucap_panic_on_conflict(caps))
+ cpu_panic_kernel();
+ else
+ cpu_die_early();
+ }
+}
+
+/*
+ * Check for CPU features that are used in early boot
+ * based on the Boot CPU value.
+ */
+static void check_early_cpu_features(void)
+{
+ verify_cpu_asid_bits();
+
+ verify_local_cpu_caps(SCOPE_BOOT_CPU);
+}
+
+static void
+verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+{
+
+ for (; caps->matches; caps++)
+ if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ pr_crit("CPU%d: missing HWCAP: %s\n",
+ smp_processor_id(), caps->desc);
+ cpu_die_early();
+ }
+}
+
+static void verify_sve_features(void)
+{
+ u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
+ u64 zcr = read_zcr_features();
+
+ unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
+ unsigned int len = zcr & ZCR_ELx_LEN_MASK;
+
+ if (len < safe_len || sve_verify_vq_map()) {
+ pr_crit("CPU%d: SVE: vector length support mismatch\n",
+ smp_processor_id());
+ cpu_die_early();
+ }
+
+ /* Add checks on other ZCR bits here if necessary */
+}
+
+static void verify_hyp_capabilities(void)
+{
+ u64 safe_mmfr1, mmfr0, mmfr1;
+ int parange, ipa_max;
+ unsigned int safe_vmid_bits, vmid_bits;
+
+ if (!IS_ENABLED(CONFIG_KVM))
+ return;
+
+ safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+
+ /* Verify VMID bits */
+ safe_vmid_bits = get_vmid_bits(safe_mmfr1);
+ vmid_bits = get_vmid_bits(mmfr1);
+ if (vmid_bits < safe_vmid_bits) {
+ pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id());
+ cpu_die_early();
+ }
+
+ /* Verify IPA range */
+ parange = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_PARANGE_SHIFT);
+ ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
+ if (ipa_max < get_kvm_ipa_limit()) {
+ pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
+ cpu_die_early();
+ }
+}
+
+/*
+ * Run through the enabled system capabilities and enable() it on this CPU.
+ * The capabilities were decided based on the available CPUs at the boot time.
+ * Any new CPU should match the system wide status of the capability. If the
+ * new CPU doesn't have a capability which the system now has enabled, we
+ * cannot do anything to fix it up and could cause unexpected failures. So
+ * we park the CPU.
+ */
+static void verify_local_cpu_capabilities(void)
+{
+ /*
+ * The capabilities with SCOPE_BOOT_CPU are checked from
+ * check_early_cpu_features(), as they need to be verified
+ * on all secondary CPUs.
+ */
+ verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+
+ verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (system_supports_32bit_el0())
+ verify_local_elf_hwcaps(compat_elf_hwcaps);
+
+ if (system_supports_sve())
+ verify_sve_features();
+
+ if (is_hyp_mode_available())
+ verify_hyp_capabilities();
+}
+
+void check_local_cpu_capabilities(void)
+{
+ /*
+ * All secondary CPUs should conform to the early CPU features
+ * in use by the kernel based on boot CPU.
+ */
+ check_early_cpu_features();
+
+ /*
+ * If we haven't finalised the system capabilities, this CPU gets
+ * a chance to update the errata work arounds and local features.
+ * Otherwise, this CPU should verify that it has all the system
+ * advertised capabilities.
+ */
+ if (!system_capabilities_finalized())
+ update_cpu_capabilities(SCOPE_LOCAL_CPU);
+ else
+ verify_local_cpu_capabilities();
+}
+
+static void __init setup_boot_cpu_capabilities(void)
+{
+ /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
+ update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
+ /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
+ enable_cpu_capabilities(SCOPE_BOOT_CPU);
+}
+
+bool this_cpu_has_cap(unsigned int n)
+{
+ if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+ if (cap)
+ return cap->matches(cap, SCOPE_LOCAL_CPU);
+ }
+
+ return false;
+}
+
+/*
+ * This helper function is used in a narrow window when,
+ * - The system wide safe registers are set with all the SMP CPUs and,
+ * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
+ * In all other cases cpus_have_{const_}cap() should be used.
+ */
+static bool __system_matches_cap(unsigned int n)
+{
+ if (n < ARM64_NCAPS) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+ if (cap)
+ return cap->matches(cap, SCOPE_SYSTEM);
+ }
+ return false;
+}
+
+void cpu_set_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ elf_hwcap |= BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_set_feature);
+
+bool cpu_have_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ return elf_hwcap & BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_have_feature);
+
+unsigned long cpu_get_elf_hwcap(void)
+{
+ /*
+ * We currently only populate the first 32 bits of AT_HWCAP. Please
+ * note that for userspace compatibility we guarantee that bits 62
+ * and 63 will always be returned as 0.
+ */
+ return lower_32_bits(elf_hwcap);
+}
+
+unsigned long cpu_get_elf_hwcap2(void)
+{
+ return upper_32_bits(elf_hwcap);
+}
+
+static void __init setup_system_capabilities(void)
+{
+ /*
+ * We have finalised the system-wide safe feature
+ * registers, finalise the capabilities that depend
+ * on it. Also enable all the available capabilities,
+ * that are not enabled already.
+ */
+ update_cpu_capabilities(SCOPE_SYSTEM);
+ enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+}
+
+void __init setup_cpu_features(void)
+{
+ u32 cwg;
+
+ setup_system_capabilities();
+ setup_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (system_supports_32bit_el0()) {
+ setup_elf_hwcaps(compat_elf_hwcaps);
+ elf_hwcap_fixup();
+ }
+
+ if (system_uses_ttbr0_pan())
+ pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+
+ sve_setup();
+ minsigstksz_setup();
+
+ /* Advertise that we have computed the system capabilities */
+ finalize_system_capabilities();
+
+ /*
+ * Check for sane CTR_EL0.CWG value.
+ */
+ cwg = cache_type_cwg();
+ if (!cwg)
+ pr_warn("No Cache Writeback Granule information, assuming %d\n",
+ ARCH_DMA_MINALIGN);
+}
+
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
+}
+
+static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
+{
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+}
+
+/*
+ * We emulate only the following system register space.
+ * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
+ * See Table C5-6 System instruction encodings for System register accesses,
+ * ARMv8 ARM(ARM DDI 0487A.f) for more details.
+ */
+static inline bool __attribute_const__ is_emulated(u32 id)
+{
+ return (sys_reg_Op0(id) == 0x3 &&
+ sys_reg_CRn(id) == 0x0 &&
+ sys_reg_Op1(id) == 0x0 &&
+ (sys_reg_CRm(id) == 0 ||
+ ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
+}
+
+/*
+ * With CRm == 0, reg should be one of :
+ * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
+ */
+static inline int emulate_id_reg(u32 id, u64 *valp)
+{
+ switch (id) {
+ case SYS_MIDR_EL1:
+ *valp = read_cpuid_id();
+ break;
+ case SYS_MPIDR_EL1:
+ *valp = SYS_MPIDR_SAFE_VAL;
+ break;
+ case SYS_REVIDR_EL1:
+ /* IMPLEMENTATION DEFINED values are emulated with 0 */
+ *valp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int emulate_sys_reg(u32 id, u64 *valp)
+{
+ struct arm64_ftr_reg *regp;
+
+ if (!is_emulated(id))
+ return -EINVAL;
+
+ if (sys_reg_CRm(id) == 0)
+ return emulate_id_reg(id, valp);
+
+ regp = get_arm64_ftr_reg_nowarn(id);
+ if (regp)
+ *valp = arm64_ftr_reg_user_value(regp);
+ else
+ /*
+ * The untracked registers are either IMPLEMENTATION DEFINED
+ * (e.g, ID_AFR0_EL1) or reserved RAZ.
+ */
+ *valp = 0;
+ return 0;
+}
+
+int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
+{
+ int rc;
+ u64 val;
+
+ rc = emulate_sys_reg(sys_reg, &val);
+ if (!rc) {
+ pt_regs_write_reg(regs, rt, val);
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
+ return rc;
+}
+
+bool try_emulate_mrs(struct pt_regs *regs, u32 insn)
+{
+ u32 sys_reg, rt;
+
+ if (compat_user_mode(regs) || !aarch64_insn_is_mrs(insn))
+ return false;
+
+ /*
+ * sys_reg values are defined as used in mrs/msr instruction.
+ * shift the imm value to get the encoding.
+ */
+ sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
+ rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
+ return do_emulate_mrs(regs, sys_reg, rt) == 0;
+}
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__meltdown_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: PTI\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
new file mode 100644
index 000000000..d4ff9ae67
--- /dev/null
+++ b/arch/arm64/kernel/cpuidle.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM64 CPU idle arch support
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/psci.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cpu_ops.h>
+
+int arm_cpuidle_init(unsigned int cpu)
+{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+ int ret = -EOPNOTSUPP;
+
+ if (ops && ops->cpu_suspend && ops->cpu_init_idle)
+ ret = ops->cpu_init_idle(cpu);
+
+ return ret;
+}
+
+/**
+ * arm_cpuidle_suspend() - function to enter a low-power idle state
+ * @arg: argument to pass to CPU suspend operations
+ *
+ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
+ * operations back-end error code otherwise.
+ */
+int arm_cpuidle_suspend(int index)
+{
+ int cpu = smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ return ops->cpu_suspend(index);
+}
+
+#ifdef CONFIG_ACPI
+
+#include <acpi/processor.h>
+
+#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
+
+static int psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i, count;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ count = pr->power.count - 1;
+ if (count <= 0)
+ return -ENODEV;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i + 1];
+ /*
+ * Only bits[31:0] represent a PSCI power_state while
+ * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+ */
+ state = lpi->address;
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return psci_acpi_cpu_init_idle(cpu);
+}
+
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ u32 state = lpi->address;
+
+ if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
+ else
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
+}
+#endif
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
new file mode 100644
index 000000000..4c0e72781
--- /dev/null
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Record and handle CPU attributes.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ */
+#include <asm/arch_timer.h>
+#include <asm/cache.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/personality.h>
+#include <linux/preempt.h>
+#include <linux/printk.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+
+/*
+ * In case the boot CPU is hotpluggable, we record its initial state and
+ * current state separately. Certain system registers may contain different
+ * values depending on configuration at or after reset.
+ */
+DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+static struct cpuinfo_arm64 boot_cpu_data;
+
+static const char *icache_policy_str[] = {
+ [ICACHE_POLICY_VPIPT] = "VPIPT",
+ [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
+ [ICACHE_POLICY_VIPT] = "VIPT",
+ [ICACHE_POLICY_PIPT] = "PIPT",
+};
+
+unsigned long __icache_flags;
+
+static const char *const hwcap_str[] = {
+ [KERNEL_HWCAP_FP] = "fp",
+ [KERNEL_HWCAP_ASIMD] = "asimd",
+ [KERNEL_HWCAP_EVTSTRM] = "evtstrm",
+ [KERNEL_HWCAP_AES] = "aes",
+ [KERNEL_HWCAP_PMULL] = "pmull",
+ [KERNEL_HWCAP_SHA1] = "sha1",
+ [KERNEL_HWCAP_SHA2] = "sha2",
+ [KERNEL_HWCAP_CRC32] = "crc32",
+ [KERNEL_HWCAP_ATOMICS] = "atomics",
+ [KERNEL_HWCAP_FPHP] = "fphp",
+ [KERNEL_HWCAP_ASIMDHP] = "asimdhp",
+ [KERNEL_HWCAP_CPUID] = "cpuid",
+ [KERNEL_HWCAP_ASIMDRDM] = "asimdrdm",
+ [KERNEL_HWCAP_JSCVT] = "jscvt",
+ [KERNEL_HWCAP_FCMA] = "fcma",
+ [KERNEL_HWCAP_LRCPC] = "lrcpc",
+ [KERNEL_HWCAP_DCPOP] = "dcpop",
+ [KERNEL_HWCAP_SHA3] = "sha3",
+ [KERNEL_HWCAP_SM3] = "sm3",
+ [KERNEL_HWCAP_SM4] = "sm4",
+ [KERNEL_HWCAP_ASIMDDP] = "asimddp",
+ [KERNEL_HWCAP_SHA512] = "sha512",
+ [KERNEL_HWCAP_SVE] = "sve",
+ [KERNEL_HWCAP_ASIMDFHM] = "asimdfhm",
+ [KERNEL_HWCAP_DIT] = "dit",
+ [KERNEL_HWCAP_USCAT] = "uscat",
+ [KERNEL_HWCAP_ILRCPC] = "ilrcpc",
+ [KERNEL_HWCAP_FLAGM] = "flagm",
+ [KERNEL_HWCAP_SSBS] = "ssbs",
+ [KERNEL_HWCAP_SB] = "sb",
+ [KERNEL_HWCAP_PACA] = "paca",
+ [KERNEL_HWCAP_PACG] = "pacg",
+ [KERNEL_HWCAP_DCPODP] = "dcpodp",
+ [KERNEL_HWCAP_SVE2] = "sve2",
+ [KERNEL_HWCAP_SVEAES] = "sveaes",
+ [KERNEL_HWCAP_SVEPMULL] = "svepmull",
+ [KERNEL_HWCAP_SVEBITPERM] = "svebitperm",
+ [KERNEL_HWCAP_SVESHA3] = "svesha3",
+ [KERNEL_HWCAP_SVESM4] = "svesm4",
+ [KERNEL_HWCAP_FLAGM2] = "flagm2",
+ [KERNEL_HWCAP_FRINT] = "frint",
+ [KERNEL_HWCAP_SVEI8MM] = "svei8mm",
+ [KERNEL_HWCAP_SVEF32MM] = "svef32mm",
+ [KERNEL_HWCAP_SVEF64MM] = "svef64mm",
+ [KERNEL_HWCAP_SVEBF16] = "svebf16",
+ [KERNEL_HWCAP_I8MM] = "i8mm",
+ [KERNEL_HWCAP_BF16] = "bf16",
+ [KERNEL_HWCAP_DGH] = "dgh",
+ [KERNEL_HWCAP_RNG] = "rng",
+ [KERNEL_HWCAP_BTI] = "bti",
+ [KERNEL_HWCAP_MTE] = "mte",
+ [KERNEL_HWCAP_ECV] = "ecv",
+ [KERNEL_HWCAP_AFP] = "afp",
+ [KERNEL_HWCAP_RPRES] = "rpres",
+};
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_KERNEL_HWCAP(x) const_ilog2(COMPAT_HWCAP_ ## x)
+static const char *const compat_hwcap_str[] = {
+ [COMPAT_KERNEL_HWCAP(SWP)] = "swp",
+ [COMPAT_KERNEL_HWCAP(HALF)] = "half",
+ [COMPAT_KERNEL_HWCAP(THUMB)] = "thumb",
+ [COMPAT_KERNEL_HWCAP(26BIT)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(FAST_MULT)] = "fastmult",
+ [COMPAT_KERNEL_HWCAP(FPA)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(VFP)] = "vfp",
+ [COMPAT_KERNEL_HWCAP(EDSP)] = "edsp",
+ [COMPAT_KERNEL_HWCAP(JAVA)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(IWMMXT)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(CRUNCH)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(THUMBEE)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(NEON)] = "neon",
+ [COMPAT_KERNEL_HWCAP(VFPv3)] = "vfpv3",
+ [COMPAT_KERNEL_HWCAP(VFPV3D16)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(TLS)] = "tls",
+ [COMPAT_KERNEL_HWCAP(VFPv4)] = "vfpv4",
+ [COMPAT_KERNEL_HWCAP(IDIVA)] = "idiva",
+ [COMPAT_KERNEL_HWCAP(IDIVT)] = "idivt",
+ [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae",
+ [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm",
+};
+
+#define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x)
+static const char *const compat_hwcap2_str[] = {
+ [COMPAT_KERNEL_HWCAP2(AES)] = "aes",
+ [COMPAT_KERNEL_HWCAP2(PMULL)] = "pmull",
+ [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1",
+ [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2",
+ [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32",
+};
+#endif /* CONFIG_COMPAT */
+
+static int c_show(struct seq_file *m, void *v)
+{
+ int i, j;
+ bool compat = personality(current->personality) == PER_LINUX32;
+
+ for_each_online_cpu(i) {
+ struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+ u32 midr = cpuinfo->reg_midr;
+
+ /*
+ * glibc reads /proc/cpuinfo to determine the number of
+ * online processors, looking for lines beginning with
+ * "processor". Give glibc what it expects.
+ */
+ seq_printf(m, "processor\t: %d\n", i);
+ if (compat)
+ seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+ MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
+
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000UL/HZ),
+ loops_per_jiffy / (5000UL/HZ) % 100);
+
+ /*
+ * Dump out the common processor features in a single line.
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+ * rather than attempting to parse this, but there's a body of
+ * software which does already (at least for 32-bit).
+ */
+ seq_puts(m, "Features\t:");
+ if (compat) {
+#ifdef CONFIG_COMPAT
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap_str); j++) {
+ if (compat_elf_hwcap & (1 << j)) {
+ /*
+ * Warn once if any feature should not
+ * have been present on arm64 platform.
+ */
+ if (WARN_ON_ONCE(!compat_hwcap_str[j]))
+ continue;
+
+ seq_printf(m, " %s", compat_hwcap_str[j]);
+ }
+ }
+
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap2_str); j++)
+ if (compat_elf_hwcap2 & (1 << j))
+ seq_printf(m, " %s", compat_hwcap2_str[j]);
+#endif /* CONFIG_COMPAT */
+ } else {
+ for (j = 0; j < ARRAY_SIZE(hwcap_str); j++)
+ if (cpu_have_feature(j))
+ seq_printf(m, " %s", hwcap_str[j]);
+ }
+ seq_puts(m, "\n");
+
+ seq_printf(m, "CPU implementer\t: 0x%02x\n",
+ MIDR_IMPLEMENTOR(midr));
+ seq_printf(m, "CPU architecture: 8\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
+ seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
+ seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
+ }
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show
+};
+
+
+static struct kobj_type cpuregs_kobj_type = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/*
+ * The ARM ARM uses the phrase "32-bit register" to describe a register
+ * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however
+ * no statement is made as to whether the upper 32 bits will or will not
+ * be made use of in future, and between ARM DDI 0487A.c and ARM DDI
+ * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit.
+ *
+ * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit
+ * registers, we expose them both as 64 bit values to cater for possible
+ * future expansion without an ABI break.
+ */
+#define kobj_to_cpuinfo(kobj) container_of(kobj, struct cpuinfo_arm64, kobj)
+#define CPUREGS_ATTR_RO(_name, _field) \
+ static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \
+ \
+ if (info->reg_midr) \
+ return sprintf(buf, "0x%016x\n", info->reg_##_field); \
+ else \
+ return 0; \
+ } \
+ static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name)
+
+CPUREGS_ATTR_RO(midr_el1, midr);
+CPUREGS_ATTR_RO(revidr_el1, revidr);
+
+static struct attribute *cpuregs_id_attrs[] = {
+ &cpuregs_attr_midr_el1.attr,
+ &cpuregs_attr_revidr_el1.attr,
+ NULL
+};
+
+static const struct attribute_group cpuregs_attr_group = {
+ .attrs = cpuregs_id_attrs,
+ .name = "identification"
+};
+
+static int cpuid_cpu_online(unsigned int cpu)
+{
+ int rc;
+ struct device *dev;
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ rc = -ENODEV;
+ goto out;
+ }
+ rc = kobject_add(&info->kobj, &dev->kobj, "regs");
+ if (rc)
+ goto out;
+ rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
+ if (rc)
+ kobject_del(&info->kobj);
+out:
+ return rc;
+}
+
+static int cpuid_cpu_offline(unsigned int cpu)
+{
+ struct device *dev;
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+ if (info->kobj.parent) {
+ sysfs_remove_group(&info->kobj, &cpuregs_attr_group);
+ kobject_del(&info->kobj);
+ }
+
+ return 0;
+}
+
+static int __init cpuinfo_regs_init(void)
+{
+ int cpu, ret;
+
+ for_each_possible_cpu(cpu) {
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+
+ kobject_init(&info->kobj, &cpuregs_kobj_type);
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
+ cpuid_cpu_online, cpuid_cpu_offline);
+ if (ret < 0) {
+ pr_err("cpuinfo: failed to register hotplug callbacks.\n");
+ return ret;
+ }
+ return 0;
+}
+device_initcall(cpuinfo_regs_init);
+
+static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
+{
+ unsigned int cpu = smp_processor_id();
+ u32 l1ip = CTR_L1IP(info->reg_ctr);
+
+ switch (l1ip) {
+ case ICACHE_POLICY_PIPT:
+ break;
+ case ICACHE_POLICY_VPIPT:
+ set_bit(ICACHEF_VPIPT, &__icache_flags);
+ break;
+ case ICACHE_POLICY_RESERVED:
+ case ICACHE_POLICY_VIPT:
+ /* Assume aliasing */
+ set_bit(ICACHEF_ALIASING, &__icache_flags);
+ break;
+ }
+
+ pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
+}
+
+static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+{
+ info->reg_cntfrq = arch_timer_get_cntfrq();
+ /*
+ * Use the effective value of the CTR_EL0 than the raw value
+ * exposed by the CPU. CTR_EL0.IDC field value must be interpreted
+ * with the CLIDR_EL1 fields to avoid triggering false warnings
+ * when there is a mismatch across the CPUs. Keep track of the
+ * effective value of the CTR_EL0 in our internal records for
+ * acurate sanity check and feature enablement.
+ */
+ info->reg_ctr = read_cpuid_effective_cachetype();
+ info->reg_dczid = read_cpuid(DCZID_EL0);
+ info->reg_midr = read_cpuid_id();
+ info->reg_revidr = read_cpuid(REVIDR_EL1);
+
+ info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
+ info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
+ info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+ info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+ info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
+ info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
+ info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
+
+ /* Update the 32bit ID registers only if AArch32 is implemented */
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+ info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
+ info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
+ info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+ info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
+ info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
+ info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+ info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
+
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ id_aa64pfr0_sve(info->reg_id_aa64pfr0))
+ info->reg_zcr = read_zcr_features();
+
+ cpuinfo_detect_icache_policy(info);
+}
+
+void cpuinfo_store_cpu(void)
+{
+ struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
+ __cpuinfo_store_cpu(info);
+ update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
+}
+
+void __init cpuinfo_store_boot_cpu(void)
+{
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
+ __cpuinfo_store_cpu(info);
+
+ boot_cpu_data = *info;
+ init_cpu_features(&boot_cpu_data);
+}
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
new file mode 100644
index 000000000..314391a15
--- /dev/null
+++ b/arch/arm64/kernel/crash_core.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/crash_core.h>
+#include <asm/cpufeature.h>
+#include <asm/memory.h>
+#include <asm/pgtable-hwdef.h>
+
+static inline u64 get_tcr_el1_t1sz(void);
+
+static inline u64 get_tcr_el1_t1sz(void)
+{
+ return (read_sysreg(tcr_el1) & TCR_T1SZ_MASK) >> TCR_T1SZ_OFFSET;
+}
+
+void arch_crash_save_vmcoreinfo(void)
+{
+ VMCOREINFO_NUMBER(VA_BITS);
+ /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
+ vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
+ kimage_voffset);
+ vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
+ PHYS_OFFSET);
+ vmcoreinfo_append_str("NUMBER(TCR_EL1_T1SZ)=0x%llx\n",
+ get_tcr_el1_t1sz());
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+ vmcoreinfo_append_str("NUMBER(KERNELPACMASK)=0x%llx\n",
+ system_supports_address_auth() ?
+ ptrauth_kernel_pac_mask() : 0);
+}
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
new file mode 100644
index 000000000..58303a9ec
--- /dev/null
+++ b/arch/arm64/kernel/crash_dump.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Routines for doing kexec-based kdump
+ *
+ * Copyright (C) 2017 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#include <linux/crash_dump.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/uaccess.h>
+#include <asm/memory.h>
+
+/**
+ * copy_oldmem_page() - copy one page from old kernel memory
+ * @pfn: page frame number to be copied
+ * @buf: buffer where the copied page is placed
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page
+ * @userbuf: if set, @buf is in a user address space
+ *
+ * This function copies one page from old kernel memory into buffer pointed by
+ * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
+ * copied or negative error in case of failure.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset,
+ int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
+ if (!vaddr)
+ return -ENOMEM;
+
+ if (userbuf) {
+ if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
+ memunmap(vaddr);
+ return -EFAULT;
+ }
+ } else {
+ memcpy(buf, vaddr + offset, csize);
+ }
+
+ memunmap(vaddr);
+
+ return csize;
+}
+
+/**
+ * elfcorehdr_read - read from ELF core header
+ * @buf: buffer where the data is placed
+ * @count: number of bytes to read
+ * @ppos: address in the memory
+ *
+ * This function reads @count bytes from elf core header which exists
+ * on crash dump kernel's memory.
+ */
+ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+ memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+ *ppos += count;
+
+ return count;
+}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
new file mode 100644
index 000000000..38a0213fd
--- /dev/null
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARMv8 single-step debug support and mdscr context switching.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kprobes.h>
+#include <linux/stat.h>
+#include <linux/uaccess.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cpufeature.h>
+#include <asm/cputype.h>
+#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
+
+/* Determine debug architecture. */
+u8 debug_monitors_arch(void)
+{
+ return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
+ ID_AA64DFR0_DEBUGVER_SHIFT);
+}
+
+/*
+ * MDSCR access routines.
+ */
+static void mdscr_write(u32 mdscr)
+{
+ unsigned long flags;
+ flags = local_daif_save();
+ write_sysreg(mdscr, mdscr_el1);
+ local_daif_restore(flags);
+}
+NOKPROBE_SYMBOL(mdscr_write);
+
+static u32 mdscr_read(void)
+{
+ return read_sysreg(mdscr_el1);
+}
+NOKPROBE_SYMBOL(mdscr_read);
+
+/*
+ * Allow root to disable self-hosted debug from userspace.
+ * This is useful if you want to connect an external JTAG debugger.
+ */
+static bool debug_enabled = true;
+
+static int create_debug_debugfs_entry(void)
+{
+ debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled);
+ return 0;
+}
+fs_initcall(create_debug_debugfs_entry);
+
+static int __init early_debug_disable(char *buf)
+{
+ debug_enabled = false;
+ return 0;
+}
+
+early_param("nodebugmon", early_debug_disable);
+
+/*
+ * Keep track of debug users on each core.
+ * The ref counts are per-cpu so we use a local_t type.
+ */
+static DEFINE_PER_CPU(int, mde_ref_count);
+static DEFINE_PER_CPU(int, kde_ref_count);
+
+void enable_debug_monitors(enum dbg_active_el el)
+{
+ u32 mdscr, enable = 0;
+
+ WARN_ON(preemptible());
+
+ if (this_cpu_inc_return(mde_ref_count) == 1)
+ enable = DBG_MDSCR_MDE;
+
+ if (el == DBG_ACTIVE_EL1 &&
+ this_cpu_inc_return(kde_ref_count) == 1)
+ enable |= DBG_MDSCR_KDE;
+
+ if (enable && debug_enabled) {
+ mdscr = mdscr_read();
+ mdscr |= enable;
+ mdscr_write(mdscr);
+ }
+}
+NOKPROBE_SYMBOL(enable_debug_monitors);
+
+void disable_debug_monitors(enum dbg_active_el el)
+{
+ u32 mdscr, disable = 0;
+
+ WARN_ON(preemptible());
+
+ if (this_cpu_dec_return(mde_ref_count) == 0)
+ disable = ~DBG_MDSCR_MDE;
+
+ if (el == DBG_ACTIVE_EL1 &&
+ this_cpu_dec_return(kde_ref_count) == 0)
+ disable &= ~DBG_MDSCR_KDE;
+
+ if (disable) {
+ mdscr = mdscr_read();
+ mdscr &= disable;
+ mdscr_write(mdscr);
+ }
+}
+NOKPROBE_SYMBOL(disable_debug_monitors);
+
+/*
+ * OS lock clearing.
+ */
+static int clear_os_lock(unsigned int cpu)
+{
+ write_sysreg(0, osdlr_el1);
+ write_sysreg(0, oslar_el1);
+ isb();
+ return 0;
+}
+
+static int __init debug_monitors_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
+ "arm64/debug_monitors:starting",
+ clear_os_lock, NULL);
+}
+postcore_initcall(debug_monitors_init);
+
+/*
+ * Single step API and exception handling.
+ */
+static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
+{
+ regs->pstate |= DBG_SPSR_SS;
+}
+NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
+
+static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
+{
+ regs->pstate &= ~DBG_SPSR_SS;
+}
+NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
+
+#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
+#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
+
+static DEFINE_SPINLOCK(debug_hook_lock);
+static LIST_HEAD(user_step_hook);
+static LIST_HEAD(kernel_step_hook);
+
+static void register_debug_hook(struct list_head *node, struct list_head *list)
+{
+ spin_lock(&debug_hook_lock);
+ list_add_rcu(node, list);
+ spin_unlock(&debug_hook_lock);
+
+}
+
+static void unregister_debug_hook(struct list_head *node)
+{
+ spin_lock(&debug_hook_lock);
+ list_del_rcu(node);
+ spin_unlock(&debug_hook_lock);
+ synchronize_rcu();
+}
+
+void register_user_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &user_step_hook);
+}
+
+void unregister_user_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_step_hook);
+}
+
+void unregister_kernel_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
+/*
+ * Call registered single step handlers
+ * There is no Syndrome info to check for determining the handler.
+ * So we call all the registered handlers, until the right handler is
+ * found which returns zero.
+ */
+static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct step_hook *hook;
+ struct list_head *list;
+ int retval = DBG_HOOK_ERROR;
+
+ list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
+
+ /*
+ * Since single-step exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
+ list_for_each_entry_rcu(hook, list, node) {
+ retval = hook->fn(regs, esr);
+ if (retval == DBG_HOOK_HANDLED)
+ break;
+ }
+
+ return retval;
+}
+NOKPROBE_SYMBOL(call_step_hook);
+
+static void send_user_sigtrap(int si_code)
+{
+ struct pt_regs *regs = current_pt_regs();
+
+ if (WARN_ON(!user_mode(regs)))
+ return;
+
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
+ arm64_force_sig_fault(SIGTRAP, si_code,
+ (void __user *)instruction_pointer(regs),
+ "User debug trap");
+}
+
+static int single_step_handler(unsigned long unused, unsigned int esr,
+ struct pt_regs *regs)
+{
+ bool handler_found = false;
+
+ /*
+ * If we are stepping a pending breakpoint, call the hw_breakpoint
+ * handler first.
+ */
+ if (!reinstall_suspended_bps(regs))
+ return 0;
+
+ if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
+
+ if (!handler_found && user_mode(regs)) {
+ send_user_sigtrap(TRAP_TRACE);
+
+ /*
+ * ptrace will disable single step unless explicitly
+ * asked to re-enable it. For other clients, it makes
+ * sense to leave it enabled (i.e. rewind the controls
+ * to the active-not-pending state).
+ */
+ user_rewind_single_step(current);
+ } else if (!handler_found) {
+ pr_warn("Unexpected kernel single-step exception at EL1\n");
+ /*
+ * Re-enable stepping since we know that we will be
+ * returning to regs.
+ */
+ set_regs_spsr_ss(regs);
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(single_step_handler);
+
+static LIST_HEAD(user_break_hook);
+static LIST_HEAD(kernel_break_hook);
+
+void register_user_break_hook(struct break_hook *hook)
+{
+ register_debug_hook(&hook->node, &user_break_hook);
+}
+
+void unregister_user_break_hook(struct break_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_break_hook(struct break_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_break_hook);
+}
+
+void unregister_kernel_break_hook(struct break_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
+static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct break_hook *hook;
+ struct list_head *list;
+ int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+
+ list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
+
+ /*
+ * Since brk exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
+ list_for_each_entry_rcu(hook, list, node) {
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~hook->mask) == hook->imm)
+ fn = hook->fn;
+ }
+
+ return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
+}
+NOKPROBE_SYMBOL(call_break_hook);
+
+static int brk_handler(unsigned long unused, unsigned int esr,
+ struct pt_regs *regs)
+{
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
+ if (user_mode(regs)) {
+ send_user_sigtrap(TRAP_BRKPT);
+ } else {
+ pr_warn("Unexpected kernel BRK exception at EL1\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(brk_handler);
+
+int aarch32_break_handler(struct pt_regs *regs)
+{
+ u32 arm_instr;
+ u16 thumb_instr;
+ bool bp = false;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+
+ if (!compat_user_mode(regs))
+ return -EFAULT;
+
+ if (compat_thumb_mode(regs)) {
+ /* get 16-bit Thumb instruction */
+ __le16 instr;
+ get_user(instr, (__le16 __user *)pc);
+ thumb_instr = le16_to_cpu(instr);
+ if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
+ /* get second half of 32-bit Thumb-2 instruction */
+ get_user(instr, (__le16 __user *)(pc + 2));
+ thumb_instr = le16_to_cpu(instr);
+ bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
+ } else {
+ bp = thumb_instr == AARCH32_BREAK_THUMB;
+ }
+ } else {
+ /* 32-bit ARM instruction */
+ __le32 instr;
+ get_user(instr, (__le32 __user *)pc);
+ arm_instr = le32_to_cpu(instr);
+ bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+ }
+
+ if (!bp)
+ return -EFAULT;
+
+ send_user_sigtrap(TRAP_BRKPT);
+ return 0;
+}
+NOKPROBE_SYMBOL(aarch32_break_handler);
+
+void __init debug_traps_init(void)
+{
+ hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
+ TRAP_TRACE, "single-step handler");
+ hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
+ TRAP_BRKPT, "BRK handler");
+}
+
+/* Re-enable single step for syscall restarting. */
+void user_rewind_single_step(struct task_struct *task)
+{
+ /*
+ * If single step is active for this thread, then set SPSR.SS
+ * to 1 to avoid returning to the active-pending state.
+ */
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
+ set_regs_spsr_ss(task_pt_regs(task));
+}
+NOKPROBE_SYMBOL(user_rewind_single_step);
+
+void user_fastforward_single_step(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
+ clear_regs_spsr_ss(task_pt_regs(task));
+}
+
+void user_regs_reset_single_step(struct user_pt_regs *regs,
+ struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
+ set_user_regs_spsr_ss(regs);
+ else
+ clear_user_regs_spsr_ss(regs);
+}
+
+/* Kernel API */
+void kernel_enable_single_step(struct pt_regs *regs)
+{
+ WARN_ON(!irqs_disabled());
+ set_regs_spsr_ss(regs);
+ mdscr_write(mdscr_read() | DBG_MDSCR_SS);
+ enable_debug_monitors(DBG_ACTIVE_EL1);
+}
+NOKPROBE_SYMBOL(kernel_enable_single_step);
+
+void kernel_disable_single_step(void)
+{
+ WARN_ON(!irqs_disabled());
+ mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
+ disable_debug_monitors(DBG_ACTIVE_EL1);
+}
+NOKPROBE_SYMBOL(kernel_disable_single_step);
+
+int kernel_active_single_step(void)
+{
+ WARN_ON(!irqs_disabled());
+ return mdscr_read() & DBG_MDSCR_SS;
+}
+NOKPROBE_SYMBOL(kernel_active_single_step);
+
+void kernel_rewind_single_step(struct pt_regs *regs)
+{
+ set_regs_spsr_ss(regs);
+}
+
+/* ptrace API */
+void user_enable_single_step(struct task_struct *task)
+{
+ struct thread_info *ti = task_thread_info(task);
+
+ if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
+ set_regs_spsr_ss(task_pt_regs(task));
+}
+NOKPROBE_SYMBOL(user_enable_single_step);
+
+void user_disable_single_step(struct task_struct *task)
+{
+ clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
+}
+NOKPROBE_SYMBOL(user_disable_single_step);
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
new file mode 100644
index 000000000..0073b24b5
--- /dev/null
+++ b/arch/arm64/kernel/efi-entry.S
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * EFI entry point.
+ *
+ * Copyright (C) 2013, 2014 Red Hat, Inc.
+ * Author: Mark Salter <msalter@redhat.com>
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+
+ __INIT
+
+SYM_CODE_START(efi_enter_kernel)
+ /*
+ * efi_pe_entry() will have copied the kernel image if necessary and we
+ * end up here with device tree address in x1 and the kernel entry
+ * point stored in x0. Save those values in registers which are
+ * callee preserved.
+ */
+ ldr w2, =primary_entry_offset
+ add x19, x0, x2 // relocated Image entrypoint
+ mov x20, x1 // DTB address
+
+ /*
+ * Clean the copied Image to the PoC, and ensure it is not shadowed by
+ * stale icache entries from before relocation.
+ */
+ ldr w1, =kernel_size
+ bl __clean_dcache_area_poc
+ ic ialluis
+
+ /*
+ * Clean the remainder of this routine to the PoC
+ * so that we can safely disable the MMU and caches.
+ */
+ adr x0, 0f
+ ldr w1, 3f
+ bl __clean_dcache_area_poc
+0:
+ /* Turn off Dcache and MMU */
+ mrs x0, CurrentEL
+ cmp x0, #CurrentEL_EL2
+ b.ne 1f
+ mrs x0, sctlr_el2
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ pre_disable_mmu_workaround
+ msr sctlr_el2, x0
+ isb
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ pre_disable_mmu_workaround
+ msr sctlr_el1, x0
+ isb
+2:
+ /* Jump to kernel entry point */
+ mov x0, x20
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ br x19
+SYM_CODE_END(efi_enter_kernel)
+3: .long . - 0b
diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
new file mode 100644
index 000000000..a71844fb9
--- /dev/null
+++ b/arch/arm64/kernel/efi-header.S
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2017 Linaro, Ltd.
+ * Copyright (C) 2013, 2014 Red Hat, Inc.
+ */
+
+#include <linux/pe.h>
+#include <linux/sizes.h>
+
+ .macro __EFI_PE_HEADER
+ .long PE_MAGIC
+coff_header:
+ .short IMAGE_FILE_MACHINE_ARM64 // Machine
+ .short section_count // NumberOfSections
+ .long 0 // TimeDateStamp
+ .long 0 // PointerToSymbolTable
+ .long 0 // NumberOfSymbols
+ .short section_table - optional_header // SizeOfOptionalHeader
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics
+
+optional_header:
+ .short PE_OPT_MAGIC_PE32PLUS // PE32+ format
+ .byte 0x02 // MajorLinkerVersion
+ .byte 0x14 // MinorLinkerVersion
+ .long __initdata_begin - efi_header_end // SizeOfCode
+ .long __pecoff_data_size // SizeOfInitializedData
+ .long 0 // SizeOfUninitializedData
+ .long __efistub_efi_pe_entry - _head // AddressOfEntryPoint
+ .long efi_header_end - _head // BaseOfCode
+
+extra_header_fields:
+ .quad 0 // ImageBase
+ .long SEGMENT_ALIGN // SectionAlignment
+ .long PECOFF_FILE_ALIGNMENT // FileAlignment
+ .short 0 // MajorOperatingSystemVersion
+ .short 0 // MinorOperatingSystemVersion
+ .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion
+ .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion
+ .short 0 // MajorSubsystemVersion
+ .short 0 // MinorSubsystemVersion
+ .long 0 // Win32VersionValue
+
+ .long _end - _head // SizeOfImage
+
+ // Everything before the kernel image is considered part of the header
+ .long efi_header_end - _head // SizeOfHeaders
+ .long 0 // CheckSum
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
+ .short 0 // DllCharacteristics
+ .quad 0 // SizeOfStackReserve
+ .quad 0 // SizeOfStackCommit
+ .quad 0 // SizeOfHeapReserve
+ .quad 0 // SizeOfHeapCommit
+ .long 0 // LoaderFlags
+ .long (section_table - .) / 8 // NumberOfRvaAndSizes
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+
+#ifdef CONFIG_DEBUG_EFI
+ .long efi_debug_table - _head // DebugTable
+ .long efi_debug_table_size
+#endif
+
+ // Section table
+section_table:
+ .ascii ".text\0\0\0"
+ .long __initdata_begin - efi_header_end // VirtualSize
+ .long efi_header_end - _head // VirtualAddress
+ .long __initdata_begin - efi_header_end // SizeOfRawData
+ .long efi_header_end - _head // PointerToRawData
+
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE // Characteristics
+
+ .ascii ".data\0\0\0"
+ .long __pecoff_data_size // VirtualSize
+ .long __initdata_begin - _head // VirtualAddress
+ .long __pecoff_data_rawsize // SizeOfRawData
+ .long __initdata_begin - _head // PointerToRawData
+
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE // Characteristics
+
+ .set section_count, (. - section_table) / 40
+
+#ifdef CONFIG_DEBUG_EFI
+ /*
+ * The debug table is referenced via its Relative Virtual Address (RVA),
+ * which is only defined for those parts of the image that are covered
+ * by a section declaration. Since this header is not covered by any
+ * section, the debug table must be emitted elsewhere. So stick it in
+ * the .init.rodata section instead.
+ *
+ * Note that the EFI debug entry itself may legally have a zero RVA,
+ * which means we can simply put it right after the section headers.
+ */
+ __INITRODATA
+
+ .align 2
+efi_debug_table:
+ // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_CODEVIEW // Type
+ .long efi_debug_entry_size // SizeOfData
+ .long 0 // RVA
+ .long efi_debug_entry - _head // FileOffset
+
+ .set efi_debug_table_size, . - efi_debug_table
+ .previous
+
+efi_debug_entry:
+ // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
+ .ascii "NB10" // Signature
+ .long 0 // Unknown
+ .long 0 // Unknown2
+ .long 0 // Unknown3
+
+ .asciz VMLINUX_PATH
+
+ .set efi_debug_entry_size, . - efi_debug_entry
+#endif
+
+ /*
+ * EFI will load .text onwards at the 4k section alignment
+ * described in the PE/COFF header. To ensure that instruction
+ * sequences using an adrp and a :lo12: immediate will function
+ * correctly at this alignment, we must ensure that .text is
+ * placed at a 4k boundary in the Image to begin with.
+ */
+ .balign SEGMENT_ALIGN
+efi_header_end:
+ .endm
diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
new file mode 100644
index 000000000..2d3c4b023
--- /dev/null
+++ b/arch/arm64/kernel/efi-rt-wrapper.S
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+SYM_FUNC_START(__efi_rt_asm_wrapper)
+ stp x29, x30, [sp, #-32]!
+ mov x29, sp
+
+ /*
+ * Register x18 is designated as the 'platform' register by the AAPCS,
+ * which means firmware running at the same exception level as the OS
+ * (such as UEFI) should never touch it.
+ */
+ stp x1, x18, [sp, #16]
+
+ ldr_l x16, efi_rt_stack_top
+ mov sp, x16
+#ifdef CONFIG_SHADOW_CALL_STACK
+ str x18, [sp, #-16]!
+#endif
+
+ /*
+ * We are lucky enough that no EFI runtime services take more than
+ * 5 arguments, so all are passed in registers rather than via the
+ * stack.
+ */
+ mov x8, x0
+ mov x0, x2
+ mov x1, x3
+ mov x2, x4
+ mov x3, x5
+ mov x4, x6
+ blr x8
+
+ mov sp, x29
+ ldp x1, x2, [sp, #16]
+ cmp x2, x18
+ ldp x29, x30, [sp], #32
+ b.ne 0f
+ ret
+0:
+ /*
+ * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a
+ * shadow stack pointer, which we need to restore before returning to
+ * potentially instrumented code. This is safe because the wrapper is
+ * called with preemption disabled and a separate shadow stack is used
+ * for interrupts.
+ */
+#ifdef CONFIG_SHADOW_CALL_STACK
+ ldr_l x18, efi_rt_stack_top
+ ldr x18, [x18, #-16]
+#endif
+
+ b efi_handle_corrupted_x18 // tail call
+SYM_FUNC_END(__efi_rt_asm_wrapper)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
new file mode 100644
index 000000000..3ee3b3dac
--- /dev/null
+++ b/arch/arm64/kernel/efi.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+
+#include <asm/efi.h>
+
+static bool region_is_misaligned(const efi_memory_desc_t *md)
+{
+ if (PAGE_SIZE == EFI_PAGE_SIZE)
+ return false;
+ return !PAGE_ALIGNED(md->phys_addr) ||
+ !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
+}
+
+/*
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+ * executable, everything else can be mapped with the XN bits
+ * set. Also take the new (optional) RO/XP bits into account.
+ */
+static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
+{
+ u64 attr = md->attribute;
+ u32 type = md->type;
+
+ if (type == EFI_MEMORY_MAPPED_IO)
+ return PROT_DEVICE_nGnRE;
+
+ if (region_is_misaligned(md)) {
+ static bool __initdata code_is_misaligned;
+
+ /*
+ * Regions that are not aligned to the OS page size cannot be
+ * mapped with strict permissions, as those might interfere
+ * with the permissions that are needed by the adjacent
+ * region's mapping. However, if we haven't encountered any
+ * misaligned runtime code regions so far, we can safely use
+ * non-executable permissions for non-code regions.
+ */
+ code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
+
+ return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
+ : pgprot_val(PAGE_KERNEL);
+ }
+
+ /* R-- */
+ if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
+ (EFI_MEMORY_XP | EFI_MEMORY_RO))
+ return pgprot_val(PAGE_KERNEL_RO);
+
+ /* R-X */
+ if (attr & EFI_MEMORY_RO)
+ return pgprot_val(PAGE_KERNEL_ROX);
+
+ /* RW- */
+ if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
+ EFI_MEMORY_XP) ||
+ type != EFI_RUNTIME_SERVICES_CODE)
+ return pgprot_val(PAGE_KERNEL);
+
+ /* RWX */
+ return pgprot_val(PAGE_KERNEL_EXEC);
+}
+
+/* we will fill this structure from the stub, so don't put it in .bss */
+struct screen_info screen_info __section(".data");
+
+int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+{
+ pteval_t prot_val = create_mapping_protection(md);
+ bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ md->type == EFI_RUNTIME_SERVICES_DATA);
+
+ /*
+ * If this region is not aligned to the page size used by the OS, the
+ * mapping will be rounded outwards, and may end up sharing a page
+ * frame with an adjacent runtime memory region. Given that the page
+ * table descriptor covering the shared page will be rewritten when the
+ * adjacent region gets mapped, we must avoid block mappings here so we
+ * don't have to worry about splitting them when that happens.
+ */
+ if (region_is_misaligned(md))
+ page_mappings_only = true;
+
+ create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
+ md->num_pages << EFI_PAGE_SHIFT,
+ __pgprot(prot_val | PTE_NG), page_mappings_only);
+ return 0;
+}
+
+static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
+{
+ efi_memory_desc_t *md = data;
+ pte_t pte = READ_ONCE(*ptep);
+
+ if (md->attribute & EFI_MEMORY_RO)
+ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+ if (md->attribute & EFI_MEMORY_XP)
+ pte = set_pte_bit(pte, __pgprot(PTE_PXN));
+ set_pte(ptep, pte);
+ return 0;
+}
+
+int __init efi_set_mapping_permissions(struct mm_struct *mm,
+ efi_memory_desc_t *md)
+{
+ BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
+ md->type != EFI_RUNTIME_SERVICES_DATA);
+
+ if (region_is_misaligned(md))
+ return 0;
+
+ /*
+ * Calling apply_to_page_range() is only safe on regions that are
+ * guaranteed to be mapped down to pages. Since we are only called
+ * for regions that have been mapped using efi_create_mapping() above
+ * (and this is checked by the generic Memory Attributes table parsing
+ * routines), there is no need to check that again here.
+ */
+ return apply_to_page_range(mm, md->virt_addr,
+ md->num_pages << EFI_PAGE_SHIFT,
+ set_permissions, md);
+}
+
+/*
+ * UpdateCapsule() depends on the system being shutdown via
+ * ResetSystem().
+ */
+bool efi_poweroff_required(void)
+{
+ return efi_enabled(EFI_RUNTIME_SERVICES);
+}
+
+asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
+{
+ pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
+ return s;
+}
+
+DEFINE_RAW_SPINLOCK(efi_rt_lock);
+
+asmlinkage u64 *efi_rt_stack_top __ro_after_init;
+
+/* EFI requires 8 KiB of stack space for runtime services */
+static_assert(THREAD_SIZE >= SZ_8K);
+
+static int __init arm64_efi_rt_init(void)
+{
+ void *p;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
+ p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
+ NUMA_NO_NODE, &&l);
+l: if (!p) {
+ pr_warn("Failed to allocate EFI runtime stack\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return -ENOMEM;
+ }
+
+ efi_rt_stack_top = p + THREAD_SIZE;
+ return 0;
+}
+core_initcall(arm64_efi_rt_init);
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
new file mode 100644
index 000000000..7a8cd856e
--- /dev/null
+++ b/arch/arm64/kernel/entry-common.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Exception handling code
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/context_tracking.h>
+#include <linux/ptrace.h>
+#include <linux/thread_info.h>
+
+#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
+#include <asm/esr.h>
+#include <asm/exception.h>
+#include <asm/kprobes.h>
+#include <asm/mmu.h>
+#include <asm/sysreg.h>
+
+/*
+ * This is intended to match the logic in irqentry_enter(), handling the kernel
+ * mode transitions only.
+ */
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+{
+ regs->exit_rcu = false;
+
+ if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter();
+ trace_hardirqs_off_finish();
+
+ regs->exit_rcu = true;
+ return;
+ }
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter_check_tick();
+ trace_hardirqs_off_finish();
+}
+
+/*
+ * This is intended to match the logic in irqentry_exit(), handling the kernel
+ * mode transitions only, and with preemption handled elsewhere.
+ */
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (interrupts_enabled(regs)) {
+ if (regs->exit_rcu) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ rcu_irq_exit();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ return;
+ }
+
+ trace_hardirqs_on();
+ } else {
+ if (regs->exit_rcu)
+ rcu_irq_exit();
+ }
+}
+
+void noinstr arm64_enter_nmi(struct pt_regs *regs)
+{
+ regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+ __nmi_enter();
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirq_enter();
+ rcu_nmi_enter();
+
+ trace_hardirqs_off_finish();
+ ftrace_nmi_enter();
+}
+
+void noinstr arm64_exit_nmi(struct pt_regs *regs)
+{
+ bool restore = regs->lockdep_hardirqs;
+
+ ftrace_nmi_exit();
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+
+ rcu_nmi_exit();
+ lockdep_hardirq_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ __nmi_exit();
+}
+
+asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ arm64_enter_nmi(regs);
+ else
+ enter_from_kernel_mode(regs);
+}
+
+asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ arm64_exit_nmi(regs);
+ else
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ far = untagged_addr(far);
+ do_mem_abort(far, esr, regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ do_sp_pc_abort(far, esr, regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ do_el1_undef(regs, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ do_el1_bti(regs, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ bad_mode(regs, 0, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
+{
+ regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_nmi_enter();
+
+ trace_hardirqs_off_finish();
+}
+
+static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
+{
+ bool restore = regs->lockdep_hardirqs;
+
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+
+ rcu_nmi_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ arm64_enter_el1_dbg(regs);
+ do_debug_exception(far, esr, regs);
+ arm64_exit_el1_dbg(regs);
+}
+
+static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_kernel_mode(regs);
+ local_daif_inherit(regs);
+ do_el1_fpac(regs, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
+}
+
+asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_DABT_CUR:
+ case ESR_ELx_EC_IABT_CUR:
+ el1_abort(regs, esr);
+ break;
+ /*
+ * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
+ * recursive exception when trying to push the initial pt_regs.
+ */
+ case ESR_ELx_EC_PC_ALIGN:
+ el1_pc(regs, esr);
+ break;
+ case ESR_ELx_EC_SYS64:
+ case ESR_ELx_EC_UNKNOWN:
+ el1_undef(regs, esr);
+ break;
+ case ESR_ELx_EC_BTI:
+ el1_bti(regs, esr);
+ break;
+ case ESR_ELx_EC_BREAKPT_CUR:
+ case ESR_ELx_EC_SOFTSTP_CUR:
+ case ESR_ELx_EC_WATCHPT_CUR:
+ case ESR_ELx_EC_BRK64:
+ el1_dbg(regs, esr);
+ break;
+ case ESR_ELx_EC_FPAC:
+ el1_fpac(regs, esr);
+ break;
+ default:
+ el1_inv(regs, esr);
+ }
+}
+
+asmlinkage void noinstr enter_from_user_mode(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+ trace_hardirqs_off_finish();
+}
+
+asmlinkage void noinstr exit_to_user_mode(void)
+{
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ user_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ far = untagged_addr(far);
+ do_mem_abort(far, esr, regs);
+}
+
+static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ /*
+ * We've taken an instruction abort from userspace and not yet
+ * re-enabled IRQs. If the address is a kernel address, apply
+ * BP hardening prior to enabling IRQs and pre-emption.
+ */
+ if (!is_ttbr0_addr(far))
+ arm64_apply_bp_hardening();
+
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_mem_abort(far, esr, regs);
+}
+
+static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_fpsimd_acc(esr, regs);
+}
+
+static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_sve_acc(esr, regs);
+}
+
+static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_fpsimd_exc(esr, regs);
+}
+
+static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_el0_sys(esr, regs);
+}
+
+static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ if (!is_ttbr0_addr(instruction_pointer(regs)))
+ arm64_apply_bp_hardening();
+
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_sp_pc_abort(far, esr, regs);
+}
+
+static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_sp_pc_abort(regs->sp, esr, regs);
+}
+
+static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_el0_undef(regs, esr);
+}
+
+static void noinstr el0_bti(struct pt_regs *regs)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_el0_bti(regs);
+}
+
+static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ bad_el0_sync(regs, 0, esr);
+}
+
+static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
+{
+ /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
+ unsigned long far = read_sysreg(far_el1);
+
+ enter_from_user_mode();
+ do_debug_exception(far, esr, regs);
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+}
+
+static void noinstr el0_svc(struct pt_regs *regs)
+{
+ enter_from_user_mode();
+ do_el0_svc(regs);
+}
+
+static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_el0_fpac(regs, esr);
+}
+
+asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_SVC64:
+ el0_svc(regs);
+ break;
+ case ESR_ELx_EC_DABT_LOW:
+ el0_da(regs, esr);
+ break;
+ case ESR_ELx_EC_IABT_LOW:
+ el0_ia(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_ASIMD:
+ el0_fpsimd_acc(regs, esr);
+ break;
+ case ESR_ELx_EC_SVE:
+ el0_sve_acc(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_EXC64:
+ el0_fpsimd_exc(regs, esr);
+ break;
+ case ESR_ELx_EC_SYS64:
+ case ESR_ELx_EC_WFx:
+ el0_sys(regs, esr);
+ break;
+ case ESR_ELx_EC_SP_ALIGN:
+ el0_sp(regs, esr);
+ break;
+ case ESR_ELx_EC_PC_ALIGN:
+ el0_pc(regs, esr);
+ break;
+ case ESR_ELx_EC_UNKNOWN:
+ el0_undef(regs, esr);
+ break;
+ case ESR_ELx_EC_BTI:
+ el0_bti(regs);
+ break;
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_WATCHPT_LOW:
+ case ESR_ELx_EC_BRK64:
+ el0_dbg(regs, esr);
+ break;
+ case ESR_ELx_EC_FPAC:
+ el0_fpac(regs, esr);
+ break;
+ default:
+ el0_inv(regs, esr);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode();
+ local_daif_restore(DAIF_PROCCTX);
+ do_el0_cp15(esr, regs);
+}
+
+static void noinstr el0_svc_compat(struct pt_regs *regs)
+{
+ enter_from_user_mode();
+ do_el0_svc_compat(regs);
+}
+
+asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_SVC32:
+ el0_svc_compat(regs);
+ break;
+ case ESR_ELx_EC_DABT_LOW:
+ el0_da(regs, esr);
+ break;
+ case ESR_ELx_EC_IABT_LOW:
+ el0_ia(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_ASIMD:
+ el0_fpsimd_acc(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_EXC32:
+ el0_fpsimd_exc(regs, esr);
+ break;
+ case ESR_ELx_EC_PC_ALIGN:
+ el0_pc(regs, esr);
+ break;
+ case ESR_ELx_EC_UNKNOWN:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_LS:
+ case ESR_ELx_EC_CP14_64:
+ el0_undef(regs, esr);
+ break;
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
+ el0_cp15(regs, esr);
+ break;
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_WATCHPT_LOW:
+ case ESR_ELx_EC_BKPT32:
+ el0_dbg(regs, esr);
+ break;
+ default:
+ el0_inv(regs, esr);
+ }
+}
+#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
new file mode 100644
index 000000000..2ca395c25
--- /dev/null
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * FP/SIMD state saving and restoring
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/fpsimdmacros.h>
+
+/*
+ * Save the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state
+ */
+SYM_FUNC_START(fpsimd_save_state)
+ fpsimd_save x0, 8
+ ret
+SYM_FUNC_END(fpsimd_save_state)
+
+/*
+ * Load the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state
+ */
+SYM_FUNC_START(fpsimd_load_state)
+ fpsimd_restore x0, 8
+ ret
+SYM_FUNC_END(fpsimd_load_state)
+
+#ifdef CONFIG_ARM64_SVE
+
+SYM_FUNC_START(sve_save_state)
+ sve_save 0, x1, 2
+ ret
+SYM_FUNC_END(sve_save_state)
+
+SYM_FUNC_START(sve_load_state)
+ sve_load 0, x1, x2, 3, x4
+ ret
+SYM_FUNC_END(sve_load_state)
+
+SYM_FUNC_START(sve_get_vl)
+ _sve_rdvl 0, 1
+ ret
+SYM_FUNC_END(sve_get_vl)
+
+/*
+ * Load SVE state from FPSIMD state.
+ *
+ * x0 = pointer to struct fpsimd_state
+ * x1 = VQ - 1
+ *
+ * Each SVE vector will be loaded with the first 128-bits taken from FPSIMD
+ * and the rest zeroed. All the other SVE registers will be zeroed.
+ */
+SYM_FUNC_START(sve_load_from_fpsimd_state)
+ sve_load_vq x1, x2, x3
+ fpsimd_restore x0, 8
+ _for n, 0, 15, _sve_pfalse \n
+ _sve_wrffr 0
+ ret
+SYM_FUNC_END(sve_load_from_fpsimd_state)
+
+/* Zero all SVE registers but the first 128-bits of each vector */
+SYM_FUNC_START(sve_flush_live)
+ sve_flush
+ ret
+SYM_FUNC_END(sve_flush_live)
+
+#endif /* CONFIG_ARM64_SVE */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
new file mode 100644
index 000000000..67f68c9ef
--- /dev/null
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm64/kernel/entry-ftrace.S
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+/*
+ * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
+ * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
+ * ftrace_make_call() have patched those NOPs to:
+ *
+ * MOV X9, LR
+ * BL <entry>
+ *
+ * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
+ *
+ * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
+ * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
+ * clobber.
+ *
+ * We save the callsite's context into a pt_regs before invoking any ftrace
+ * callbacks. So that we can get a sensible backtrace, we create a stack record
+ * for the callsite and the ftrace entry assembly. This is not sufficient for
+ * reliable stacktrace: until we create the callsite stack record, its caller
+ * is missing from the LR and existing chain of frame records.
+ */
+ .macro ftrace_regs_entry, allregs=0
+ /* Make room for pt_regs, plus a callee frame */
+ sub sp, sp, #(S_FRAME_SIZE + 16)
+
+ /* Save function arguments (and x9 for simplicity) */
+ stp x0, x1, [sp, #S_X0]
+ stp x2, x3, [sp, #S_X2]
+ stp x4, x5, [sp, #S_X4]
+ stp x6, x7, [sp, #S_X6]
+ stp x8, x9, [sp, #S_X8]
+
+ /* Optionally save the callee-saved registers, always save the FP */
+ .if \allregs == 1
+ stp x10, x11, [sp, #S_X10]
+ stp x12, x13, [sp, #S_X12]
+ stp x14, x15, [sp, #S_X14]
+ stp x16, x17, [sp, #S_X16]
+ stp x18, x19, [sp, #S_X18]
+ stp x20, x21, [sp, #S_X20]
+ stp x22, x23, [sp, #S_X22]
+ stp x24, x25, [sp, #S_X24]
+ stp x26, x27, [sp, #S_X26]
+ stp x28, x29, [sp, #S_X28]
+ .else
+ str x29, [sp, #S_FP]
+ .endif
+
+ /* Save the callsite's SP and LR */
+ add x10, sp, #(S_FRAME_SIZE + 16)
+ stp x9, x10, [sp, #S_LR]
+
+ /* Save the PC after the ftrace callsite */
+ str x30, [sp, #S_PC]
+
+ /* Create a frame record for the callsite above pt_regs */
+ stp x29, x9, [sp, #S_FRAME_SIZE]
+ add x29, sp, #S_FRAME_SIZE
+
+ /* Create our frame record within pt_regs. */
+ stp x29, x30, [sp, #S_STACKFRAME]
+ add x29, sp, #S_STACKFRAME
+ .endm
+
+SYM_CODE_START(ftrace_regs_caller)
+#ifdef BTI_C
+ BTI_C
+#endif
+ ftrace_regs_entry 1
+ b ftrace_common
+SYM_CODE_END(ftrace_regs_caller)
+
+SYM_CODE_START(ftrace_caller)
+#ifdef BTI_C
+ BTI_C
+#endif
+ ftrace_regs_entry 0
+ b ftrace_common
+SYM_CODE_END(ftrace_caller)
+
+SYM_CODE_START(ftrace_common)
+ sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
+ mov x1, x9 // parent_ip (callsite's LR)
+ ldr_l x2, function_trace_op // op
+ mov x3, sp // regs
+
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
+ bl ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+/*
+ * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
+ * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
+ * to restore x0-x8, x29, and x30.
+ */
+ftrace_common_return:
+ /* Restore function arguments */
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+ ldr x8, [sp, #S_X8]
+
+ /* Restore the callsite's FP, LR, PC */
+ ldr x29, [sp, #S_FP]
+ ldr x30, [sp, #S_LR]
+ ldr x9, [sp, #S_PC]
+
+ /* Restore the callsite's SP */
+ add sp, sp, #S_FRAME_SIZE + 16
+
+ ret x9
+SYM_CODE_END(ftrace_common)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_CODE_START(ftrace_graph_caller)
+ ldr x0, [sp, #S_PC]
+ sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
+ add x1, sp, #S_LR // parent_ip (callsite's LR)
+ ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
+ bl prepare_ftrace_return
+ b ftrace_common_return
+SYM_CODE_END(ftrace_graph_caller)
+#endif
+
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+/*
+ * Gcc with -pg will put the following code in the beginning of each function:
+ * mov x0, x30
+ * bl _mcount
+ * [function's body ...]
+ * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
+ * ftrace is enabled.
+ *
+ * Please note that x0 as an argument will not be used here because we can
+ * get lr(x30) of instrumented function at any time by winding up call stack
+ * as long as the kernel is compiled without -fomit-frame-pointer.
+ * (or CONFIG_FRAME_POINTER, this is forced on arm64)
+ *
+ * stack layout after mcount_enter in _mcount():
+ *
+ * current sp/fp => 0:+-----+
+ * in _mcount() | x29 | -> instrumented function's fp
+ * +-----+
+ * | x30 | -> _mcount()'s lr (= instrumented function's pc)
+ * old sp => +16:+-----+
+ * when instrumented | |
+ * function calls | ... |
+ * _mcount() | |
+ * | |
+ * instrumented => +xx:+-----+
+ * function's fp | x29 | -> parent's fp
+ * +-----+
+ * | x30 | -> instrumented function's lr (= parent's pc)
+ * +-----+
+ * | ... |
+ */
+
+ .macro mcount_enter
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ .endm
+
+ .macro mcount_exit
+ ldp x29, x30, [sp], #16
+ ret
+ .endm
+
+ .macro mcount_adjust_addr rd, rn
+ sub \rd, \rn, #AARCH64_INSN_SIZE
+ .endm
+
+ /* for instrumented function's parent */
+ .macro mcount_get_parent_fp reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg]
+ .endm
+
+ /* for instrumented function */
+ .macro mcount_get_pc0 reg
+ mcount_adjust_addr \reg, x30
+ .endm
+
+ .macro mcount_get_pc reg
+ ldr \reg, [x29, #8]
+ mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg, #8]
+ .endm
+
+ .macro mcount_get_lr_addr reg
+ ldr \reg, [x29]
+ add \reg, \reg, #8
+ .endm
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+/*
+ * void _mcount(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function makes calls, if enabled, to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+SYM_FUNC_START(_mcount)
+ mcount_enter
+
+ ldr_l x2, ftrace_trace_function
+ adr x0, ftrace_stub
+ cmp x0, x2 // if (ftrace_trace_function
+ b.eq skip_ftrace_call // != ftrace_stub) {
+
+ mcount_get_pc x0 // function's pc
+ mcount_get_lr x1 // function's lr (= parent's pc)
+ blr x2 // (*ftrace_trace_function)(pc, lr);
+
+skip_ftrace_call: // }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ldr_l x2, ftrace_graph_return
+ cmp x0, x2 // if ((ftrace_graph_return
+ b.ne ftrace_graph_caller // != ftrace_stub)
+
+ ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
+ adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
+ cmp x0, x2
+ b.ne ftrace_graph_caller // ftrace_graph_caller();
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+ mcount_exit
+SYM_FUNC_END(_mcount)
+EXPORT_SYMBOL(_mcount)
+NOKPROBE(_mcount)
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+/*
+ * _mcount() is used to build the kernel with -pg option, but all the branch
+ * instructions to _mcount() are replaced to NOP initially at kernel start up,
+ * and later on, NOP to branch to ftrace_caller() when enabled or branch to
+ * NOP when disabled per-function base.
+ */
+SYM_FUNC_START(_mcount)
+ ret
+SYM_FUNC_END(_mcount)
+EXPORT_SYMBOL(_mcount)
+NOKPROBE(_mcount)
+
+/*
+ * void ftrace_caller(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function is a counterpart of _mcount() in 'static' ftrace, and
+ * makes calls to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+SYM_FUNC_START(ftrace_caller)
+ mcount_enter
+
+ mcount_get_pc0 x0 // function's pc
+ mcount_get_lr x1 // function's lr
+
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
+ nop // This will be replaced with "bl xxx"
+ // where xxx can be any kind of tracer.
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+ mcount_exit
+SYM_FUNC_END(ftrace_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * void ftrace_graph_caller(void)
+ *
+ * Called from _mcount() or ftrace_caller() when function_graph tracer is
+ * selected.
+ * This function w/ prepare_ftrace_return() fakes link register's value on
+ * the call stack in order to intercept instrumented function's return path
+ * and run return_to_handler() later on its exit.
+ */
+SYM_FUNC_START(ftrace_graph_caller)
+ mcount_get_pc x0 // function's pc
+ mcount_get_lr_addr x1 // pointer to function's saved lr
+ mcount_get_parent_fp x2 // parent's fp
+ bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
+
+ mcount_exit
+SYM_FUNC_END(ftrace_graph_caller)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+SYM_FUNC_START(ftrace_stub)
+ ret
+SYM_FUNC_END(ftrace_stub)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * void return_to_handler(void)
+ *
+ * Run ftrace_return_to_handler() before going back to parent.
+ * @fp is checked against the value passed by ftrace_graph_caller().
+ */
+SYM_CODE_START(return_to_handler)
+ /* save return value regs */
+ sub sp, sp, #64
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #48]
+
+ mov x0, x29 // parent's fp
+ bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
+ mov x30, x0 // restore the original return address
+
+ /* restore return value regs */
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ add sp, sp, #64
+
+ ret
+SYM_CODE_END(return_to_handler)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
new file mode 100644
index 000000000..a94acea77
--- /dev/null
+++ b/arch/arm64/kernel/entry.S
@@ -0,0 +1,1240 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low-level exception handling code
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Authors: Catalin Marinas <catalin.marinas@arm.com>
+ * Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm_pointer_auth.h>
+#include <asm/bug.h>
+#include <asm/cpufeature.h>
+#include <asm/errno.h>
+#include <asm/esr.h>
+#include <asm/irq.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/scs.h>
+#include <asm/thread_info.h>
+#include <asm/asm-uaccess.h>
+#include <asm/unistd.h>
+
+/*
+ * Context tracking and irqflag tracing need to instrument transitions between
+ * user and kernel mode.
+ */
+ .macro user_exit_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
+ bl enter_from_user_mode
+#endif
+ .endm
+
+ .macro user_enter_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
+ bl exit_to_user_mode
+#endif
+ .endm
+
+ .macro clear_gp_regs
+ .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
+ mov x\n, xzr
+ .endr
+ .endm
+
+/*
+ * Bad Abort numbers
+ *-----------------
+ */
+#define BAD_SYNC 0
+#define BAD_IRQ 1
+#define BAD_FIQ 2
+#define BAD_ERROR 3
+
+ .macro kernel_ventry, el, label, regsize = 64
+ .align 7
+.Lventry_start\@:
+ .if \el == 0
+ /*
+ * This must be the first instruction of the EL0 vector entries. It is
+ * skipped by the trampoline vectors, to trigger the cleanup.
+ */
+ b .Lskip_tramp_vectors_cleanup\@
+ .if \regsize == 64
+ mrs x30, tpidrro_el0
+ msr tpidrro_el0, xzr
+ .else
+ mov x30, xzr
+ .endif
+.Lskip_tramp_vectors_cleanup\@:
+ .endif
+
+ sub sp, sp, #S_FRAME_SIZE
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * Test whether the SP has overflowed, without corrupting a GPR.
+ * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
+ * should always be zero.
+ */
+ add sp, sp, x0 // sp' = sp + x0
+ sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
+ tbnz x0, #THREAD_SHIFT, 0f
+ sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
+ sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
+ b el\()\el\()_\label
+
+0:
+ /*
+ * Either we've just detected an overflow, or we've taken an exception
+ * while on the overflow stack. Either way, we won't return to
+ * userspace, and can clobber EL0 registers to free up GPRs.
+ */
+
+ /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+ msr tpidr_el0, x0
+
+ /* Recover the original x0 value and stash it in tpidrro_el0 */
+ sub x0, sp, x0
+ msr tpidrro_el0, x0
+
+ /* Switch to the overflow stack */
+ adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
+
+ /*
+ * Check whether we were already on the overflow stack. This may happen
+ * after panic() re-enables interrupts.
+ */
+ mrs x0, tpidr_el0 // sp of interrupted context
+ sub x0, sp, x0 // delta with top of overflow stack
+ tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
+ b.ne __bad_stack // no? -> bad stack pointer
+
+ /* We were already on the overflow stack. Restore sp/x0 and carry on. */
+ sub sp, sp, x0
+ mrs x0, tpidrro_el0
+#endif
+ b el\()\el\()_\label
+.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
+ .endm
+
+ .macro tramp_alias, dst, sym, tmp
+ mov_q \dst, TRAMP_VALIAS
+ adr_l \tmp, \sym
+ add \dst, \dst, \tmp
+ adr_l \tmp, .entry.tramp.text
+ sub \dst, \dst, \tmp
+ .endm
+
+ /*
+ * This macro corrupts x0-x3. It is the caller's duty to save/restore
+ * them if required.
+ */
+ .macro apply_ssbd, state, tmp1, tmp2
+alternative_cb spectre_v4_patch_fw_mitigation_enable
+ b .L__asm_ssbd_skip\@ // Patched to NOP
+alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, .L__asm_ssbd_skip\@
+ ldr \tmp2, [tsk, #TSK_TI_FLAGS]
+ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+alternative_cb smccc_patch_fw_mitigation_conduit
+ nop // Patched to SMC/HVC #0
+alternative_cb_end
+.L__asm_ssbd_skip\@:
+ .endm
+
+ /* Check for MTE asynchronous tag check faults */
+ .macro check_mte_async_tcf, tmp, ti_flags
+#ifdef CONFIG_ARM64_MTE
+ .arch_extension lse
+alternative_if_not ARM64_MTE
+ b 1f
+alternative_else_nop_endif
+ mrs_s \tmp, SYS_TFSRE0_EL1
+ tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
+ /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
+ mov \tmp, #_TIF_MTE_ASYNC_FAULT
+ add \ti_flags, tsk, #TSK_TI_FLAGS
+ stset \tmp, [\ti_flags]
+ msr_s SYS_TFSRE0_EL1, xzr
+1:
+#endif
+ .endm
+
+ /* Clear the MTE asynchronous tag check faults */
+ .macro clear_mte_async_tcf
+#ifdef CONFIG_ARM64_MTE
+alternative_if ARM64_MTE
+ dsb ish
+ msr_s SYS_TFSRE0_EL1, xzr
+alternative_else_nop_endif
+#endif
+ .endm
+
+ .macro kernel_entry, el, regsize = 64
+ .if \regsize == 32
+ mov w0, w0 // zero upper 32 bits of x0
+ .endif
+ stp x0, x1, [sp, #16 * 0]
+ stp x2, x3, [sp, #16 * 1]
+ stp x4, x5, [sp, #16 * 2]
+ stp x6, x7, [sp, #16 * 3]
+ stp x8, x9, [sp, #16 * 4]
+ stp x10, x11, [sp, #16 * 5]
+ stp x12, x13, [sp, #16 * 6]
+ stp x14, x15, [sp, #16 * 7]
+ stp x16, x17, [sp, #16 * 8]
+ stp x18, x19, [sp, #16 * 9]
+ stp x20, x21, [sp, #16 * 10]
+ stp x22, x23, [sp, #16 * 11]
+ stp x24, x25, [sp, #16 * 12]
+ stp x26, x27, [sp, #16 * 13]
+ stp x28, x29, [sp, #16 * 14]
+
+ .if \el == 0
+ clear_gp_regs
+ mrs x21, sp_el0
+ ldr_this_cpu tsk, __entry_task, x20
+ msr sp_el0, tsk
+
+ /*
+ * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+ * when scheduling.
+ */
+ ldr x19, [tsk, #TSK_TI_FLAGS]
+ disable_step_tsk x19, x20
+
+ /* Check for asynchronous tag check faults in user space */
+ check_mte_async_tcf x22, x23
+ apply_ssbd 1, x22, x23
+
+ ptrauth_keys_install_kernel tsk, x20, x22, x23
+
+ scs_load_current
+ .else
+ add x21, sp, #S_FRAME_SIZE
+ get_current_task tsk
+ /* Save the task's original addr_limit and set USER_DS */
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
+ str x20, [sp, #S_ORIG_ADDR_LIMIT]
+ mov x20, #USER_DS
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
+ /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
+ .endif /* \el == 0 */
+ mrs x22, elr_el1
+ mrs x23, spsr_el1
+ stp lr, x21, [sp, #S_LR]
+
+ /*
+ * In order to be able to dump the contents of struct pt_regs at the
+ * time the exception was taken (in case we attempt to walk the call
+ * stack later), chain it together with the stack frames.
+ */
+ .if \el == 0
+ stp xzr, xzr, [sp, #S_STACKFRAME]
+ .else
+ stp x29, x22, [sp, #S_STACKFRAME]
+ .endif
+ add x29, sp, #S_STACKFRAME
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+alternative_if_not ARM64_HAS_PAN
+ bl __swpan_entry_el\el
+alternative_else_nop_endif
+#endif
+
+ stp x22, x23, [sp, #S_PC]
+
+ /* Not in a syscall by default (el0_svc overwrites for real syscall) */
+ .if \el == 0
+ mov w21, #NO_SYSCALL
+ str w21, [sp, #S_SYSCALLNO]
+ .endif
+
+ /* Save pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mrs_s x20, SYS_ICC_PMR_EL1
+ str x20, [sp, #S_PMR_SAVE]
+ mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
+ msr_s SYS_ICC_PMR_EL1, x20
+alternative_else_nop_endif
+
+ /* Re-enable tag checking (TCO set on exception entry) */
+#ifdef CONFIG_ARM64_MTE
+alternative_if ARM64_MTE
+ SET_PSTATE_TCO(0)
+alternative_else_nop_endif
+#endif
+
+ /*
+ * Registers that may be useful after this macro is invoked:
+ *
+ * x20 - ICC_PMR_EL1
+ * x21 - aborted SP
+ * x22 - aborted PC
+ * x23 - aborted PSTATE
+ */
+ .endm
+
+ .macro kernel_exit, el
+ .if \el != 0
+ disable_daif
+
+ /* Restore the task's original addr_limit. */
+ ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
+
+ /* No need to restore UAO, it will be restored from SPSR_EL1 */
+ .endif
+
+ /* Restore pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ ldr x20, [sp, #S_PMR_SAVE]
+ msr_s SYS_ICC_PMR_EL1, x20
+ mrs_s x21, SYS_ICC_CTLR_EL1
+ tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
+ dsb sy // Ensure priority change is seen by redistributor
+.L__skip_pmr_sync\@:
+alternative_else_nop_endif
+
+ ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+alternative_if_not ARM64_HAS_PAN
+ bl __swpan_exit_el\el
+alternative_else_nop_endif
+#endif
+
+ .if \el == 0
+ ldr x23, [sp, #S_SP] // load return stack pointer
+ msr sp_el0, x23
+ tst x22, #PSR_MODE32_BIT // native task?
+ b.eq 3f
+
+#ifdef CONFIG_ARM64_ERRATUM_845719
+alternative_if ARM64_WORKAROUND_845719
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ mrs x29, contextidr_el1
+ msr contextidr_el1, x29
+#else
+ msr contextidr_el1, xzr
+#endif
+alternative_else_nop_endif
+#endif
+3:
+ scs_save tsk, x0
+
+ /* No kernel C function calls after this as user keys are set. */
+ ptrauth_keys_install_user tsk, x0, x1, x2
+
+ apply_ssbd 0, x0, x1
+ .endif
+
+ msr elr_el1, x21 // set up the return data
+ msr spsr_el1, x22
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+ ldp x4, x5, [sp, #16 * 2]
+ ldp x6, x7, [sp, #16 * 3]
+ ldp x8, x9, [sp, #16 * 4]
+ ldp x10, x11, [sp, #16 * 5]
+ ldp x12, x13, [sp, #16 * 6]
+ ldp x14, x15, [sp, #16 * 7]
+ ldp x16, x17, [sp, #16 * 8]
+ ldp x18, x19, [sp, #16 * 9]
+ ldp x20, x21, [sp, #16 * 10]
+ ldp x22, x23, [sp, #16 * 11]
+ ldp x24, x25, [sp, #16 * 12]
+ ldp x26, x27, [sp, #16 * 13]
+ ldp x28, x29, [sp, #16 * 14]
+
+ .if \el == 0
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #S_FRAME_SIZE // restore sp
+ eret
+alternative_else_nop_endif
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ bne 4f
+ msr far_el1, x29
+ tramp_alias x30, tramp_exit_native, x29
+ br x30
+4:
+ tramp_alias x30, tramp_exit_compat, x29
+ br x30
+#endif
+ .else
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #S_FRAME_SIZE // restore sp
+
+ /* Ensure any device/NC reads complete */
+ alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+
+ eret
+ .endif
+ sb
+ .endm
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+ * EL0, there is no need to check the state of TTBR0_EL1 since
+ * accesses are always enabled.
+ * Note that the meaning of this bit differs from the ARMv8.1 PAN
+ * feature as all TTBR0_EL1 accesses are disabled, not just those to
+ * user mappings.
+ */
+SYM_CODE_START_LOCAL(__swpan_entry_el1)
+ mrs x21, ttbr0_el1
+ tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
+ orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
+ b.eq 1f // TTBR0 access already disabled
+ and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
+SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
+ __uaccess_ttbr0_disable x21
+1: ret
+SYM_CODE_END(__swpan_entry_el1)
+
+ /*
+ * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+ * PAN bit checking.
+ */
+SYM_CODE_START_LOCAL(__swpan_exit_el1)
+ tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
+ __uaccess_ttbr0_enable x0, x1
+1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
+ ret
+SYM_CODE_END(__swpan_exit_el1)
+
+SYM_CODE_START_LOCAL(__swpan_exit_el0)
+ __uaccess_ttbr0_enable x0, x1
+ /*
+ * Enable errata workarounds only if returning to user. The only
+ * workaround currently required for TTBR0_EL1 changes are for the
+ * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+ * corruption).
+ */
+ b post_ttbr_update_workaround
+SYM_CODE_END(__swpan_exit_el0)
+#endif
+
+ .macro irq_stack_entry
+ mov x19, sp // preserve the original sp
+ scs_save tsk // preserve the original shadow stack
+
+ /*
+ * Compare sp with the base of the task stack.
+ * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+ * and should switch to the irq stack.
+ */
+ ldr x25, [tsk, TSK_STACK]
+ eor x25, x25, x19
+ and x25, x25, #~(THREAD_SIZE - 1)
+ cbnz x25, 9998f
+
+ ldr_this_cpu x25, irq_stack_ptr, x26
+ mov x26, #IRQ_STACK_SIZE
+ add x26, x25, x26
+
+ /* switch to the irq stack */
+ mov sp, x26
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+ /* also switch to the irq shadow stack */
+ adr_this_cpu scs_sp, irq_shadow_call_stack, x26
+#endif
+
+9998:
+ .endm
+
+ /*
+ * The callee-saved regs (x19-x29) should be preserved between
+ * irq_stack_entry and irq_stack_exit, but note that kernel_entry
+ * uses x20-x23 to store data for later use.
+ */
+ .macro irq_stack_exit
+ mov sp, x19
+ scs_load_current
+ .endm
+
+/* GPRs used by entry code */
+tsk .req x28 // current thread_info
+
+/*
+ * Interrupt handling.
+ */
+ .macro irq_handler, handler:req
+ ldr_l x1, \handler
+ mov x0, sp
+ irq_stack_entry
+ blr x1
+ irq_stack_exit
+ .endm
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ /*
+ * Set res to 0 if irqs were unmasked in interrupted context.
+ * Otherwise set res to non-0 value.
+ */
+ .macro test_irqs_unmasked res:req, pmr:req
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ sub \res, \pmr, #GIC_PRIO_IRQON
+alternative_else
+ mov \res, xzr
+alternative_endif
+ .endm
+#endif
+
+ .macro gic_prio_kentry_setup, tmp:req
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
+ msr_s SYS_ICC_PMR_EL1, \tmp
+ alternative_else_nop_endif
+#endif
+ .endm
+
+ .macro el1_interrupt_handler, handler:req
+ enable_da_f
+
+ mov x0, sp
+ bl enter_el1_irq_or_nmi
+
+ irq_handler \handler
+
+#ifdef CONFIG_PREEMPTION
+ ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ /*
+ * DA_F were cleared at start of handling. If anything is set in DAIF,
+ * we come back from an NMI, so skip preemption
+ */
+ mrs x0, daif
+ orr x24, x24, x0
+alternative_else_nop_endif
+ cbnz x24, 1f // preempt count != 0 || NMI return path
+ bl arm64_preempt_schedule_irq // irq en/disable is done inside
+1:
+#endif
+
+ mov x0, sp
+ bl exit_el1_irq_or_nmi
+ .endm
+
+ .macro el0_interrupt_handler, handler:req
+ user_exit_irqoff
+ enable_da_f
+
+ tbz x22, #55, 1f
+ bl do_el0_irq_bp_hardening
+1:
+ irq_handler \handler
+ .endm
+
+ .text
+
+/*
+ * Exception vectors.
+ */
+ .pushsection ".entry.text", "ax"
+
+ .align 11
+SYM_CODE_START(vectors)
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
+ kernel_ventry 1, irq_invalid // IRQ EL1t
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
+ kernel_ventry 1, error_invalid // Error EL1t
+
+ kernel_ventry 1, sync // Synchronous EL1h
+ kernel_ventry 1, irq // IRQ EL1h
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, error // Error EL1h
+
+ kernel_ventry 0, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, irq // IRQ 64-bit EL0
+ kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
+ kernel_ventry 0, error // Error 64-bit EL0
+
+#ifdef CONFIG_COMPAT
+ kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
+#else
+ kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
+#endif
+SYM_CODE_END(vectors)
+
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * We detected an overflow in kernel_ventry, which switched to the
+ * overflow stack. Stash the exception regs, and head to our overflow
+ * handler.
+ */
+__bad_stack:
+ /* Restore the original x0 value */
+ mrs x0, tpidrro_el0
+
+ /*
+ * Store the original GPRs to the new stack. The orginal SP (minus
+ * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+ */
+ sub sp, sp, #S_FRAME_SIZE
+ kernel_entry 1
+ mrs x0, tpidr_el0
+ add x0, x0, #S_FRAME_SIZE
+ str x0, [sp, #S_SP]
+
+ /* Stash the regs for handle_bad_stack */
+ mov x0, sp
+
+ /* Time to die */
+ bl handle_bad_stack
+ ASM_BUG()
+#endif /* CONFIG_VMAP_STACK */
+
+/*
+ * Invalid mode handlers
+ */
+ .macro inv_entry, el, reason, regsize = 64
+ kernel_entry \el, \regsize
+ mov x0, sp
+ mov x1, #\reason
+ mrs x2, esr_el1
+ bl bad_mode
+ ASM_BUG()
+ .endm
+
+SYM_CODE_START_LOCAL(el0_sync_invalid)
+ inv_entry 0, BAD_SYNC
+SYM_CODE_END(el0_sync_invalid)
+
+SYM_CODE_START_LOCAL(el0_irq_invalid)
+ inv_entry 0, BAD_IRQ
+SYM_CODE_END(el0_irq_invalid)
+
+SYM_CODE_START_LOCAL(el0_fiq_invalid)
+ inv_entry 0, BAD_FIQ
+SYM_CODE_END(el0_fiq_invalid)
+
+SYM_CODE_START_LOCAL(el0_error_invalid)
+ inv_entry 0, BAD_ERROR
+SYM_CODE_END(el0_error_invalid)
+
+#ifdef CONFIG_COMPAT
+SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
+ inv_entry 0, BAD_FIQ, 32
+SYM_CODE_END(el0_fiq_invalid_compat)
+#endif
+
+SYM_CODE_START_LOCAL(el1_sync_invalid)
+ inv_entry 1, BAD_SYNC
+SYM_CODE_END(el1_sync_invalid)
+
+SYM_CODE_START_LOCAL(el1_irq_invalid)
+ inv_entry 1, BAD_IRQ
+SYM_CODE_END(el1_irq_invalid)
+
+SYM_CODE_START_LOCAL(el1_fiq_invalid)
+ inv_entry 1, BAD_FIQ
+SYM_CODE_END(el1_fiq_invalid)
+
+SYM_CODE_START_LOCAL(el1_error_invalid)
+ inv_entry 1, BAD_ERROR
+SYM_CODE_END(el1_error_invalid)
+
+/*
+ * EL1 mode handlers.
+ */
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
+ kernel_entry 1
+ mov x0, sp
+ bl el1_sync_handler
+ kernel_exit 1
+SYM_CODE_END(el1_sync)
+
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
+ kernel_entry 1
+ el1_interrupt_handler handle_arch_irq
+ kernel_exit 1
+SYM_CODE_END(el1_irq)
+
+/*
+ * EL0 mode handlers.
+ */
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
+ kernel_entry 0
+ mov x0, sp
+ bl el0_sync_handler
+ b ret_to_user
+SYM_CODE_END(el0_sync)
+
+#ifdef CONFIG_COMPAT
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
+ kernel_entry 0, 32
+ mov x0, sp
+ bl el0_sync_compat_handler
+ b ret_to_user
+SYM_CODE_END(el0_sync_compat)
+
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
+ kernel_entry 0, 32
+ b el0_irq_naked
+SYM_CODE_END(el0_irq_compat)
+
+SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
+ kernel_entry 0, 32
+ b el0_error_naked
+SYM_CODE_END(el0_error_compat)
+#endif
+
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
+ kernel_entry 0
+el0_irq_naked:
+ el0_interrupt_handler handle_arch_irq
+ b ret_to_user
+SYM_CODE_END(el0_irq)
+
+SYM_CODE_START_LOCAL(el1_error)
+ kernel_entry 1
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ kernel_exit 1
+SYM_CODE_END(el1_error)
+
+SYM_CODE_START_LOCAL(el0_error)
+ kernel_entry 0
+el0_error_naked:
+ mrs x25, esr_el1
+ user_exit_irqoff
+ enable_dbg
+ mov x0, sp
+ mov x1, x25
+ bl do_serror
+ enable_da_f
+ b ret_to_user
+SYM_CODE_END(el0_error)
+
+/*
+ * "slow" syscall return path.
+ */
+SYM_CODE_START_LOCAL(ret_to_user)
+ disable_daif
+ gic_prio_kentry_setup tmp=x3
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
+ ldr x19, [tsk, #TSK_TI_FLAGS]
+ and x2, x19, #_TIF_WORK_MASK
+ cbnz x2, work_pending
+finish_ret_to_user:
+ user_enter_irqoff
+ /* Ignore asynchronous tag check faults in the uaccess routines */
+ clear_mte_async_tcf
+ enable_step_tsk x19, x2
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ bl stackleak_erase
+#endif
+ kernel_exit 0
+
+/*
+ * Ok, we need to do extra processing, enter the slow path.
+ */
+work_pending:
+ mov x0, sp // 'regs'
+ mov x1, x19
+ bl do_notify_resume
+ ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
+ b finish_ret_to_user
+SYM_CODE_END(ret_to_user)
+
+ .popsection // .entry.text
+
+ // Move from tramp_pg_dir to swapper_pg_dir
+ .macro tramp_map_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ add \tmp, \tmp, #(2 * PAGE_SIZE)
+ bic \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ /* ASID already in \tmp[63:48] */
+ movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+ movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+ /* 2MB boundary containing the vectors, so we nobble the walk cache */
+ movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+ isb
+ tlbi vae1, \tmp
+ dsb nsh
+alternative_else_nop_endif
+#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
+ .endm
+
+ // Move from swapper_pg_dir to tramp_pg_dir
+ .macro tramp_unmap_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ sub \tmp, \tmp, #(2 * PAGE_SIZE)
+ orr \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+ /*
+ * We avoid running the post_ttbr_update_workaround here because
+ * it's only needed by Cavium ThunderX, which requires KPTI to be
+ * disabled.
+ */
+ .endm
+
+ .macro tramp_data_page dst
+ adr_l \dst, .entry.tramp.text
+ sub \dst, \dst, PAGE_SIZE
+ .endm
+
+ .macro tramp_data_read_var dst, var
+#ifdef CONFIG_RANDOMIZE_BASE
+ tramp_data_page \dst
+ add \dst, \dst, #:lo12:__entry_tramp_data_\var
+ ldr \dst, [\dst]
+#else
+ ldr \dst, =\var
+#endif
+ .endm
+
+#define BHB_MITIGATION_NONE 0
+#define BHB_MITIGATION_LOOP 1
+#define BHB_MITIGATION_FW 2
+#define BHB_MITIGATION_INSN 3
+
+ .macro tramp_ventry, vector_start, regsize, kpti, bhb
+ .align 7
+1:
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
+
+ .if \bhb == BHB_MITIGATION_LOOP
+ /*
+ * This sequence must appear before the first indirect branch. i.e. the
+ * ret out of tramp_ventry. It appears here because x30 is free.
+ */
+ __mitigate_spectre_bhb_loop x30
+ .endif // \bhb == BHB_MITIGATION_LOOP
+
+ .if \bhb == BHB_MITIGATION_INSN
+ clearbhb
+ isb
+ .endif // \bhb == BHB_MITIGATION_INSN
+
+ .if \kpti == 1
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+ * entry onto the return stack and using a RET instruction to
+ * enter the full-fat kernel vectors.
+ */
+ bl 2f
+ b .
+2:
+ tramp_map_kernel x30
+alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ tramp_data_read_var x30, vectors
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
+ prfm plil1strm, [x30, #(1b - \vector_start)]
+alternative_else_nop_endif
+
+ msr vbar_el1, x30
+ isb
+ .else
+ ldr x30, =vectors
+ .endif // \kpti == 1
+
+ .if \bhb == BHB_MITIGATION_FW
+ /*
+ * The firmware sequence must appear before the first indirect branch.
+ * i.e. the ret out of tramp_ventry. But it also needs the stack to be
+ * mapped to save/restore the registers the SMC clobbers.
+ */
+ __mitigate_spectre_bhb_fw
+ .endif // \bhb == BHB_MITIGATION_FW
+
+ add x30, x30, #(1b - \vector_start + 4)
+ ret
+.org 1b + 128 // Did we overflow the ventry slot?
+ .endm
+
+ .macro tramp_exit, regsize = 64
+ tramp_data_read_var x30, this_cpu_vector
+ this_cpu_offset x29
+ ldr x30, [x30, x29]
+
+ msr vbar_el1, x30
+ ldr lr, [sp, #S_LR]
+ tramp_unmap_kernel x29
+ .if \regsize == 64
+ mrs x29, far_el1
+ .endif
+ add sp, sp, #S_FRAME_SIZE // restore sp
+ eret
+ sb
+ .endm
+
+ .macro generate_tramp_vector, kpti, bhb
+.Lvector_start\@:
+ .space 0x400
+
+ .rept 4
+ tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
+ .endr
+ .rept 4
+ tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
+ .endr
+ .endm
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ * The order must match __bp_harden_el1_vectors and the
+ * arm64_bp_harden_el1_vectors enum.
+ */
+ .pushsection ".entry.tramp.text", "ax"
+ .align 11
+SYM_CODE_START_NOALIGN(tramp_vectors)
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
+SYM_CODE_END(tramp_vectors)
+
+SYM_CODE_START(tramp_exit_native)
+ tramp_exit
+SYM_CODE_END(tramp_exit_native)
+
+SYM_CODE_START(tramp_exit_compat)
+ tramp_exit 32
+SYM_CODE_END(tramp_exit_compat)
+
+ .ltorg
+ .popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+ .pushsection ".rodata", "a"
+ .align PAGE_SHIFT
+SYM_DATA_START(__entry_tramp_data_start)
+__entry_tramp_data_vectors:
+ .quad vectors
+#ifdef CONFIG_ARM_SDE_INTERFACE
+__entry_tramp_data___sdei_asm_handler:
+ .quad __sdei_asm_handler
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+__entry_tramp_data_this_cpu_vector:
+ .quad this_cpu_vector
+SYM_DATA_END(__entry_tramp_data_start)
+ .popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+/*
+ * Exception vectors for spectre mitigations on entry from EL1 when
+ * kpti is not in use.
+ */
+ .macro generate_el1_vector, bhb
+.Lvector_start\@:
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
+ kernel_ventry 1, irq_invalid // IRQ EL1t
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
+ kernel_ventry 1, error_invalid // Error EL1t
+
+ kernel_ventry 1, sync // Synchronous EL1h
+ kernel_ventry 1, irq // IRQ EL1h
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, error // Error EL1h
+
+ .rept 4
+ tramp_ventry .Lvector_start\@, 64, 0, \bhb
+ .endr
+ .rept 4
+ tramp_ventry .Lvector_start\@, 32, 0, \bhb
+ .endr
+ .endm
+
+/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
+ .pushsection ".entry.text", "ax"
+ .align 11
+SYM_CODE_START(__bp_harden_el1_vectors)
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_el1_vector bhb=BHB_MITIGATION_LOOP
+ generate_el1_vector bhb=BHB_MITIGATION_FW
+ generate_el1_vector bhb=BHB_MITIGATION_INSN
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+SYM_CODE_END(__bp_harden_el1_vectors)
+ .popsection
+
+
+/*
+ * Register switch for AArch64. The callee-saved registers need to be saved
+ * and restored. On entry:
+ * x0 = previous task_struct (must be preserved across the switch)
+ * x1 = next task_struct
+ * Previous and next are guaranteed not to be the same.
+ *
+ */
+SYM_FUNC_START(cpu_switch_to)
+ mov x10, #THREAD_CPU_CONTEXT
+ add x8, x0, x10
+ mov x9, sp
+ stp x19, x20, [x8], #16 // store callee-saved registers
+ stp x21, x22, [x8], #16
+ stp x23, x24, [x8], #16
+ stp x25, x26, [x8], #16
+ stp x27, x28, [x8], #16
+ stp x29, x9, [x8], #16
+ str lr, [x8]
+ add x8, x1, x10
+ ldp x19, x20, [x8], #16 // restore callee-saved registers
+ ldp x21, x22, [x8], #16
+ ldp x23, x24, [x8], #16
+ ldp x25, x26, [x8], #16
+ ldp x27, x28, [x8], #16
+ ldp x29, x9, [x8], #16
+ ldr lr, [x8]
+ mov sp, x9
+ msr sp_el0, x1
+ ptrauth_keys_install_kernel x1, x8, x9, x10
+ scs_save x0, x8
+ scs_load_current
+ ret
+SYM_FUNC_END(cpu_switch_to)
+NOKPROBE(cpu_switch_to)
+
+/*
+ * This is how we return from a fork.
+ */
+SYM_CODE_START(ret_from_fork)
+ bl schedule_tail
+ cbz x19, 1f // not a kernel thread
+ mov x0, x20
+ blr x19
+1: get_current_task tsk
+ b ret_to_user
+SYM_CODE_END(ret_from_fork)
+NOKPROBE(ret_from_fork)
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+
+#include <asm/sdei.h>
+#include <uapi/linux/arm_sdei.h>
+
+.macro sdei_handler_exit exit_mode
+ /* On success, this call never returns... */
+ cmp \exit_mode, #SDEI_EXIT_SMC
+ b.ne 99f
+ smc #0
+ b .
+99: hvc #0
+ b .
+.endm
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * The regular SDEI entry point may have been unmapped along with the rest of
+ * the kernel. This trampoline restores the kernel mapping to make the x1 memory
+ * argument accessible.
+ *
+ * This clobbers x4, __sdei_handler() will restore this from firmware's
+ * copy.
+ */
+.ltorg
+.pushsection ".entry.tramp.text", "ax"
+SYM_CODE_START(__sdei_asm_entry_trampoline)
+ mrs x4, ttbr1_el1
+ tbz x4, #USER_ASID_BIT, 1f
+
+ tramp_map_kernel tmp=x4
+ isb
+ mov x4, xzr
+
+ /*
+ * Use reg->interrupted_regs.addr_limit to remember whether to unmap
+ * the kernel on exit.
+ */
+1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+
+ tramp_data_read_var x4, __sdei_asm_handler
+ br x4
+SYM_CODE_END(__sdei_asm_entry_trampoline)
+NOKPROBE(__sdei_asm_entry_trampoline)
+
+/*
+ * Make the exit call and restore the original ttbr1_el1
+ *
+ * x0 & x1: setup for the exit API call
+ * x2: exit_mode
+ * x4: struct sdei_registered_event argument from registration time.
+ */
+SYM_CODE_START(__sdei_asm_exit_trampoline)
+ ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+ cbnz x4, 1f
+
+ tramp_unmap_kernel tmp=x4
+
+1: sdei_handler_exit exit_mode=x2
+SYM_CODE_END(__sdei_asm_exit_trampoline)
+NOKPROBE(__sdei_asm_exit_trampoline)
+ .ltorg
+.popsection // .entry.tramp.text
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+/*
+ * Software Delegated Exception entry point.
+ *
+ * x0: Event number
+ * x1: struct sdei_registered_event argument from registration time.
+ * x2: interrupted PC
+ * x3: interrupted PSTATE
+ * x4: maybe clobbered by the trampoline
+ *
+ * Firmware has preserved x0->x17 for us, we must save/restore the rest to
+ * follow SMC-CC. We save (or retrieve) all the registers as the handler may
+ * want them.
+ */
+SYM_CODE_START(__sdei_asm_handler)
+ stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
+ stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
+ stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
+ stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
+ stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
+ stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
+ stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
+ stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
+ stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
+ stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
+ stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
+ stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
+ stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
+ stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
+ mov x4, sp
+ stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
+
+ mov x19, x1
+
+ /* Store the registered-event for crash_smp_send_stop() */
+ ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
+ cbnz w4, 1f
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
+ b 2f
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
+2: str x19, [x5]
+
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * entry.S may have been using sp as a scratch register, find whether
+ * this is a normal or critical event and switch to the appropriate
+ * stack for this CPU.
+ */
+ cbnz w4, 1f
+ ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
+ b 2f
+1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
+2: mov x6, #SDEI_STACK_SIZE
+ add x5, x5, x6
+ mov sp, x5
+#endif
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+ /* Use a separate shadow call stack for normal and critical events */
+ cbnz w4, 3f
+ adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
+ b 4f
+3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
+4:
+#endif
+
+ /*
+ * We may have interrupted userspace, or a guest, or exit-from or
+ * return-to either of these. We can't trust sp_el0, restore it.
+ */
+ mrs x28, sp_el0
+ ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
+ msr sp_el0, x0
+
+ /* If we interrupted the kernel point to the previous stack/frame. */
+ and x0, x3, #0xc
+ mrs x1, CurrentEL
+ cmp x0, x1
+ csel x29, x29, xzr, eq // fp, or zero
+ csel x4, x2, xzr, eq // elr, or zero
+
+ stp x29, x4, [sp, #-16]!
+ mov x29, sp
+
+ add x0, x19, #SDEI_EVENT_INTREGS
+ mov x1, x19
+ bl __sdei_handler
+
+ msr sp_el0, x28
+ /* restore regs >x17 that we clobbered */
+ mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
+ ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
+ ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
+ ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
+ mov sp, x1
+
+ mov x1, x0 // address to complete_and_resume
+ /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
+ cmp x0, #1
+ mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
+ mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
+ csel x0, x2, x3, ls
+
+ ldr_l x2, sdei_exit_mode
+
+ /* Clear the registered-event seen by crash_smp_send_stop() */
+ ldrb w3, [x4, #SDEI_EVENT_PRIORITY]
+ cbnz w3, 1f
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
+ b 2f
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
+2: str xzr, [x5]
+
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ sdei_handler_exit exit_mode=x2
+alternative_else_nop_endif
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
+ br x5
+#endif
+SYM_CODE_END(__sdei_asm_handler)
+NOKPROBE(__sdei_asm_handler)
+
+SYM_CODE_START(__sdei_handler_abort)
+ mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
+ adr x1, 1f
+ ldr_l x2, sdei_exit_mode
+ sdei_handler_exit exit_mode=x2
+ // exit the handler and jump to the next instruction.
+ // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
+1: ret
+SYM_CODE_END(__sdei_handler_abort)
+NOKPROBE(__sdei_handler_abort)
+#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
new file mode 100644
index 000000000..a9bbfb800
--- /dev/null
+++ b/arch/arm64/kernel/fpsimd.c
@@ -0,0 +1,1449 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FP/SIMD context switching and fault handling
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bottom_half.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/prctl.h>
+#include <linux/preempt.h>
+#include <linux/ptrace.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/sysctl.h>
+#include <linux/swab.h>
+
+#include <asm/esr.h>
+#include <asm/exception.h>
+#include <asm/fpsimd.h>
+#include <asm/cpufeature.h>
+#include <asm/cputype.h>
+#include <asm/neon.h>
+#include <asm/processor.h>
+#include <asm/simd.h>
+#include <asm/sigcontext.h>
+#include <asm/sysreg.h>
+#include <asm/traps.h>
+#include <asm/virt.h>
+
+#define FPEXC_IOF (1 << 0)
+#define FPEXC_DZF (1 << 1)
+#define FPEXC_OFF (1 << 2)
+#define FPEXC_UFF (1 << 3)
+#define FPEXC_IXF (1 << 4)
+#define FPEXC_IDF (1 << 7)
+
+/*
+ * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
+ *
+ * In order to reduce the number of times the FPSIMD state is needlessly saved
+ * and restored, we need to keep track of two things:
+ * (a) for each task, we need to remember which CPU was the last one to have
+ * the task's FPSIMD state loaded into its FPSIMD registers;
+ * (b) for each CPU, we need to remember which task's userland FPSIMD state has
+ * been loaded into its FPSIMD registers most recently, or whether it has
+ * been used to perform kernel mode NEON in the meantime.
+ *
+ * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
+ * the id of the current CPU every time the state is loaded onto a CPU. For (b),
+ * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
+ * address of the userland FPSIMD state of the task that was loaded onto the CPU
+ * the most recently, or NULL if kernel mode NEON has been performed after that.
+ *
+ * With this in place, we no longer have to restore the next FPSIMD state right
+ * when switching between tasks. Instead, we can defer this check to userland
+ * resume, at which time we verify whether the CPU's fpsimd_last_state and the
+ * task's fpsimd_cpu are still mutually in sync. If this is the case, we
+ * can omit the FPSIMD restore.
+ *
+ * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
+ * indicate whether or not the userland FPSIMD state of the current task is
+ * present in the registers. The flag is set unless the FPSIMD registers of this
+ * CPU currently contain the most recent userland FPSIMD state of the current
+ * task.
+ *
+ * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
+ * save the task's FPSIMD context back to task_struct from softirq context.
+ * To prevent this from racing with the manipulation of the task's FPSIMD state
+ * from task context and thereby corrupting the state, it is necessary to
+ * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
+ * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
+ * run but prevent them to use FPSIMD.
+ *
+ * For a certain task, the sequence may look something like this:
+ * - the task gets scheduled in; if both the task's fpsimd_cpu field
+ * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
+ * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
+ * cleared, otherwise it is set;
+ *
+ * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
+ * userland FPSIMD state is copied from memory to the registers, the task's
+ * fpsimd_cpu field is set to the id of the current CPU, the current
+ * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
+ * TIF_FOREIGN_FPSTATE flag is cleared;
+ *
+ * - the task executes an ordinary syscall; upon return to userland, the
+ * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
+ * restored;
+ *
+ * - the task executes a syscall which executes some NEON instructions; this is
+ * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
+ * register contents to memory, clears the fpsimd_last_state per-cpu variable
+ * and sets the TIF_FOREIGN_FPSTATE flag;
+ *
+ * - the task gets preempted after kernel_neon_end() is called; as we have not
+ * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
+ * whatever is in the FPSIMD registers is not saved to memory, but discarded.
+ */
+struct fpsimd_last_state_struct {
+ struct user_fpsimd_state *st;
+ void *sve_state;
+ unsigned int sve_vl;
+};
+
+static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
+
+/* Default VL for tasks that don't set it explicitly: */
+static int __sve_default_vl = -1;
+
+static int get_sve_default_vl(void)
+{
+ return READ_ONCE(__sve_default_vl);
+}
+
+#ifdef CONFIG_ARM64_SVE
+
+static void set_sve_default_vl(int val)
+{
+ WRITE_ONCE(__sve_default_vl, val);
+}
+
+/* Maximum supported vector length across all CPUs (initially poisoned) */
+int __ro_after_init sve_max_vl = SVE_VL_MIN;
+int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
+
+/*
+ * Set of available vector lengths,
+ * where length vq encoded as bit __vq_to_bit(vq):
+ */
+__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+/* Set of vector lengths present on at least one cpu: */
+static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
+
+static void __percpu *efi_sve_state;
+
+#else /* ! CONFIG_ARM64_SVE */
+
+/* Dummy declaration for code that will be optimised out: */
+extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
+extern void __percpu *efi_sve_state;
+
+#endif /* ! CONFIG_ARM64_SVE */
+
+DEFINE_PER_CPU(bool, fpsimd_context_busy);
+EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
+
+static void __get_cpu_fpsimd_context(void)
+{
+ bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
+
+ WARN_ON(busy);
+}
+
+/*
+ * Claim ownership of the CPU FPSIMD context for use by the calling context.
+ *
+ * The caller may freely manipulate the FPSIMD context metadata until
+ * put_cpu_fpsimd_context() is called.
+ *
+ * The double-underscore version must only be called if you know the task
+ * can't be preempted.
+ */
+static void get_cpu_fpsimd_context(void)
+{
+ preempt_disable();
+ __get_cpu_fpsimd_context();
+}
+
+static void __put_cpu_fpsimd_context(void)
+{
+ bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
+
+ WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
+}
+
+/*
+ * Release the CPU FPSIMD context.
+ *
+ * Must be called from a context in which get_cpu_fpsimd_context() was
+ * previously called, with no call to put_cpu_fpsimd_context() in the
+ * meantime.
+ */
+static void put_cpu_fpsimd_context(void)
+{
+ __put_cpu_fpsimd_context();
+ preempt_enable();
+}
+
+static bool have_cpu_fpsimd_context(void)
+{
+ return !preemptible() && __this_cpu_read(fpsimd_context_busy);
+}
+
+/*
+ * Call __sve_free() directly only if you know task can't be scheduled
+ * or preempted.
+ */
+static void __sve_free(struct task_struct *task)
+{
+ kfree(task->thread.sve_state);
+ task->thread.sve_state = NULL;
+}
+
+static void sve_free(struct task_struct *task)
+{
+ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
+
+ __sve_free(task);
+}
+
+/*
+ * TIF_SVE controls whether a task can use SVE without trapping while
+ * in userspace, and also the way a task's FPSIMD/SVE state is stored
+ * in thread_struct.
+ *
+ * The kernel uses this flag to track whether a user task is actively
+ * using SVE, and therefore whether full SVE register state needs to
+ * be tracked. If not, the cheaper FPSIMD context handling code can
+ * be used instead of the more costly SVE equivalents.
+ *
+ * * TIF_SVE set:
+ *
+ * The task can execute SVE instructions while in userspace without
+ * trapping to the kernel.
+ *
+ * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
+ * corresponding Zn), P0-P15 and FFR are encoded in in
+ * task->thread.sve_state, formatted appropriately for vector
+ * length task->thread.sve_vl.
+ *
+ * task->thread.sve_state must point to a valid buffer at least
+ * sve_state_size(task) bytes in size.
+ *
+ * During any syscall, the kernel may optionally clear TIF_SVE and
+ * discard the vector state except for the FPSIMD subset.
+ *
+ * * TIF_SVE clear:
+ *
+ * An attempt by the user task to execute an SVE instruction causes
+ * do_sve_acc() to be called, which does some preparation and then
+ * sets TIF_SVE.
+ *
+ * When stored, FPSIMD registers V0-V31 are encoded in
+ * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
+ * logically zero but not stored anywhere; P0-P15 and FFR are not
+ * stored and have unspecified values from userspace's point of
+ * view. For hygiene purposes, the kernel zeroes them on next use,
+ * but userspace is discouraged from relying on this.
+ *
+ * task->thread.sve_state does not need to be non-NULL, valid or any
+ * particular size: it must not be dereferenced.
+ *
+ * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
+ * irrespective of whether TIF_SVE is clear or set, since these are
+ * not vector length dependent.
+ */
+
+/*
+ * Update current's FPSIMD/SVE registers from thread_struct.
+ *
+ * This function should be called only when the FPSIMD/SVE state in
+ * thread_struct is known to be up to date, when preparing to enter
+ * userspace.
+ */
+static void task_fpsimd_load(void)
+{
+ WARN_ON(!system_supports_fpsimd());
+ WARN_ON(!have_cpu_fpsimd_context());
+
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ sve_load_state(sve_pffr(&current->thread),
+ &current->thread.uw.fpsimd_state.fpsr,
+ sve_vq_from_vl(current->thread.sve_vl) - 1);
+ else
+ fpsimd_load_state(&current->thread.uw.fpsimd_state);
+}
+
+/*
+ * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
+ * date with respect to the CPU registers.
+ */
+static void fpsimd_save(void)
+{
+ struct fpsimd_last_state_struct const *last =
+ this_cpu_ptr(&fpsimd_last_state);
+ /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+
+ WARN_ON(!system_supports_fpsimd());
+ WARN_ON(!have_cpu_fpsimd_context());
+
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+ if (WARN_ON(sve_get_vl() != last->sve_vl)) {
+ /*
+ * Can't save the user regs, so current would
+ * re-enter user with corrupt state.
+ * There's no way to recover, so kill it:
+ */
+ force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
+ return;
+ }
+
+ sve_save_state((char *)last->sve_state +
+ sve_ffr_offset(last->sve_vl),
+ &last->st->fpsr);
+ } else
+ fpsimd_save_state(last->st);
+ }
+}
+
+/*
+ * All vector length selection from userspace comes through here.
+ * We're on a slow path, so some sanity-checks are included.
+ * If things go wrong there's a bug somewhere, but try to fall back to a
+ * safe choice.
+ */
+static unsigned int find_supported_vector_length(unsigned int vl)
+{
+ int bit;
+ int max_vl = sve_max_vl;
+
+ if (WARN_ON(!sve_vl_valid(vl)))
+ vl = SVE_VL_MIN;
+
+ if (WARN_ON(!sve_vl_valid(max_vl)))
+ max_vl = SVE_VL_MIN;
+
+ if (vl > max_vl)
+ vl = max_vl;
+
+ bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
+ __vq_to_bit(sve_vq_from_vl(vl)));
+ return sve_vl_from_vq(__bit_to_vq(bit));
+}
+
+#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
+
+static int sve_proc_do_default_vl(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+ int vl = get_sve_default_vl();
+ struct ctl_table tmp_table = {
+ .data = &vl,
+ .maxlen = sizeof(vl),
+ };
+
+ ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
+ /* Writing -1 has the special meaning "set to max": */
+ if (vl == -1)
+ vl = sve_max_vl;
+
+ if (!sve_vl_valid(vl))
+ return -EINVAL;
+
+ set_sve_default_vl(find_supported_vector_length(vl));
+ return 0;
+}
+
+static struct ctl_table sve_default_vl_table[] = {
+ {
+ .procname = "sve_default_vector_length",
+ .mode = 0644,
+ .proc_handler = sve_proc_do_default_vl,
+ },
+ { }
+};
+
+static int __init sve_sysctl_init(void)
+{
+ if (system_supports_sve())
+ if (!register_sysctl("abi", sve_default_vl_table))
+ return -EINVAL;
+
+ return 0;
+}
+
+#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
+static int __init sve_sysctl_init(void) { return 0; }
+#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
+
+#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
+ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ u64 a = swab64(x);
+ u64 b = swab64(x >> 64);
+
+ return ((__uint128_t)a << 64) | b;
+}
+#else
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ return x;
+}
+#endif
+
+#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+
+static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
+ unsigned int vq)
+{
+ unsigned int i;
+ __uint128_t *p;
+
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
+}
+
+/*
+ * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
+ * task->thread.sve_state.
+ *
+ * Task can be a non-runnable task, or current. In the latter case,
+ * the caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.uw.fpsimd_state must be up to date before calling this
+ * function.
+ */
+static void fpsimd_to_sve(struct task_struct *task)
+{
+ unsigned int vq;
+ void *sst = task->thread.sve_state;
+ struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
+
+ if (!system_supports_sve())
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+ __fpsimd_to_sve(sst, fst, vq);
+}
+
+/*
+ * Transfer the SVE state in task->thread.sve_state to
+ * task->thread.uw.fpsimd_state.
+ *
+ * Task can be a non-runnable task, or current. In the latter case,
+ * the caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.sve_state must be up to date before calling this function.
+ */
+static void sve_to_fpsimd(struct task_struct *task)
+{
+ unsigned int vq;
+ void const *sst = task->thread.sve_state;
+ struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
+ unsigned int i;
+ __uint128_t const *p;
+
+ if (!system_supports_sve())
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
+ p = (__uint128_t const *)ZREG(sst, vq, i);
+ fst->vregs[i] = arm64_le128_to_cpu(*p);
+ }
+}
+
+#ifdef CONFIG_ARM64_SVE
+
+/*
+ * Return how many bytes of memory are required to store the full SVE
+ * state for task, given task's currently configured vector length.
+ */
+size_t sve_state_size(struct task_struct const *task)
+{
+ return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
+}
+
+/*
+ * Ensure that task->thread.sve_state is allocated and sufficiently large.
+ *
+ * This function should be used only in preparation for replacing
+ * task->thread.sve_state with new data. The memory is always zeroed
+ * here to prevent stale data from showing through: this is done in
+ * the interest of testability and predictability: except in the
+ * do_sve_acc() case, there is no ABI requirement to hide stale data
+ * written previously be task.
+ */
+void sve_alloc(struct task_struct *task)
+{
+ if (task->thread.sve_state) {
+ memset(task->thread.sve_state, 0, sve_state_size(task));
+ return;
+ }
+
+ /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
+ task->thread.sve_state =
+ kzalloc(sve_state_size(task), GFP_KERNEL);
+
+ /*
+ * If future SVE revisions can have larger vectors though,
+ * this may cease to be true:
+ */
+ BUG_ON(!task->thread.sve_state);
+}
+
+
+/*
+ * Ensure that task->thread.sve_state is up to date with respect to
+ * the user task, irrespective of when SVE is in use or not.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void fpsimd_sync_to_sve(struct task_struct *task)
+{
+ if (!test_tsk_thread_flag(task, TIF_SVE))
+ fpsimd_to_sve(task);
+}
+
+/*
+ * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
+ * the user task, irrespective of whether SVE is in use or not.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void sve_sync_to_fpsimd(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_SVE))
+ sve_to_fpsimd(task);
+}
+
+/*
+ * Ensure that task->thread.sve_state is up to date with respect to
+ * the task->thread.uw.fpsimd_state.
+ *
+ * This should only be called by ptrace to merge new FPSIMD register
+ * values into a task for which SVE is currently active.
+ * task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.uw.fpsimd_state must already have been initialised with
+ * the new FPSIMD register values to be merged in.
+ */
+void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
+{
+ unsigned int vq;
+ void *sst = task->thread.sve_state;
+ struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
+
+ if (!test_tsk_thread_flag(task, TIF_SVE))
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+
+ memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
+ __fpsimd_to_sve(sst, fst, vq);
+}
+
+int sve_set_vector_length(struct task_struct *task,
+ unsigned long vl, unsigned long flags)
+{
+ if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
+ PR_SVE_SET_VL_ONEXEC))
+ return -EINVAL;
+
+ if (!sve_vl_valid(vl))
+ return -EINVAL;
+
+ /*
+ * Clamp to the maximum vector length that VL-agnostic SVE code can
+ * work with. A flag may be assigned in the future to allow setting
+ * of larger vector lengths without confusing older software.
+ */
+ if (vl > SVE_VL_ARCH_MAX)
+ vl = SVE_VL_ARCH_MAX;
+
+ vl = find_supported_vector_length(vl);
+
+ if (flags & (PR_SVE_VL_INHERIT |
+ PR_SVE_SET_VL_ONEXEC))
+ task->thread.sve_vl_onexec = vl;
+ else
+ /* Reset VL to system default on next exec: */
+ task->thread.sve_vl_onexec = 0;
+
+ /* Only actually set the VL if not deferred: */
+ if (flags & PR_SVE_SET_VL_ONEXEC)
+ goto out;
+
+ if (vl == task->thread.sve_vl)
+ goto out;
+
+ /*
+ * To ensure the FPSIMD bits of the SVE vector registers are preserved,
+ * write any live register state back to task_struct, and convert to a
+ * non-SVE thread.
+ */
+ if (task == current) {
+ get_cpu_fpsimd_context();
+
+ fpsimd_save();
+ }
+
+ fpsimd_flush_task_state(task);
+ if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
+ sve_to_fpsimd(task);
+
+ if (task == current)
+ put_cpu_fpsimd_context();
+
+ /*
+ * Force reallocation of task SVE state to the correct size
+ * on next use:
+ */
+ sve_free(task);
+
+ task->thread.sve_vl = vl;
+
+out:
+ update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
+ flags & PR_SVE_VL_INHERIT);
+
+ return 0;
+}
+
+/*
+ * Encode the current vector length and flags for return.
+ * This is only required for prctl(): ptrace has separate fields
+ *
+ * flags are as for sve_set_vector_length().
+ */
+static int sve_prctl_status(unsigned long flags)
+{
+ int ret;
+
+ if (flags & PR_SVE_SET_VL_ONEXEC)
+ ret = current->thread.sve_vl_onexec;
+ else
+ ret = current->thread.sve_vl;
+
+ if (test_thread_flag(TIF_SVE_VL_INHERIT))
+ ret |= PR_SVE_VL_INHERIT;
+
+ return ret;
+}
+
+/* PR_SVE_SET_VL */
+int sve_set_current_vl(unsigned long arg)
+{
+ unsigned long vl, flags;
+ int ret;
+
+ vl = arg & PR_SVE_VL_LEN_MASK;
+ flags = arg & ~vl;
+
+ if (!system_supports_sve() || is_compat_task())
+ return -EINVAL;
+
+ ret = sve_set_vector_length(current, vl, flags);
+ if (ret)
+ return ret;
+
+ return sve_prctl_status(flags);
+}
+
+/* PR_SVE_GET_VL */
+int sve_get_current_vl(void)
+{
+ if (!system_supports_sve() || is_compat_task())
+ return -EINVAL;
+
+ return sve_prctl_status(0);
+}
+
+static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
+{
+ unsigned int vq, vl;
+ unsigned long zcr;
+
+ bitmap_zero(map, SVE_VQ_MAX);
+
+ zcr = ZCR_ELx_LEN_MASK;
+ zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
+
+ for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
+ write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
+ vl = sve_get_vl();
+ vq = sve_vq_from_vl(vl); /* skip intervening lengths */
+ set_bit(__vq_to_bit(vq), map);
+ }
+}
+
+/*
+ * Initialise the set of known supported VQs for the boot CPU.
+ * This is called during kernel boot, before secondary CPUs are brought up.
+ */
+void __init sve_init_vq_map(void)
+{
+ sve_probe_vqs(sve_vq_map);
+ bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
+}
+
+/*
+ * If we haven't committed to the set of supported VQs yet, filter out
+ * those not supported by the current CPU.
+ * This function is called during the bring-up of early secondary CPUs only.
+ */
+void sve_update_vq_map(void)
+{
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+
+ sve_probe_vqs(tmp_map);
+ bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
+ bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
+}
+
+/*
+ * Check whether the current CPU supports all VQs in the committed set.
+ * This function is called during the bring-up of late secondary CPUs only.
+ */
+int sve_verify_vq_map(void)
+{
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
+
+ sve_probe_vqs(tmp_map);
+
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
+ pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+ return 0;
+
+ /*
+ * For KVM, it is necessary to ensure that this CPU doesn't
+ * support any vector length that guests may have probed as
+ * unsupported.
+ */
+
+ /* Recover the set of supported VQs: */
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ /* Find VQs supported that are not globally supported: */
+ bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
+
+ /* Find the lowest such VQ, if any: */
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ return 0; /* no mismatches */
+
+ /*
+ * Mismatches above sve_max_virtualisable_vl are fine, since
+ * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
+ */
+ if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
+ pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __init sve_efi_setup(void)
+{
+ if (!IS_ENABLED(CONFIG_EFI))
+ return;
+
+ /*
+ * alloc_percpu() warns and prints a backtrace if this goes wrong.
+ * This is evidence of a crippled system and we are returning void,
+ * so no attempt is made to handle this situation here.
+ */
+ if (!sve_vl_valid(sve_max_vl))
+ goto fail;
+
+ efi_sve_state = __alloc_percpu(
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
+ if (!efi_sve_state)
+ goto fail;
+
+ return;
+
+fail:
+ panic("Cannot allocate percpu memory for EFI SVE save/restore");
+}
+
+/*
+ * Enable SVE for EL1.
+ * Intended for use by the cpufeatures code during CPU boot.
+ */
+void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
+ isb();
+}
+
+/*
+ * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
+ * vector length.
+ *
+ * Use only if SVE is present.
+ * This function clobbers the SVE vector length.
+ */
+u64 read_zcr_features(void)
+{
+ u64 zcr;
+ unsigned int vq_max;
+
+ /*
+ * Set the maximum possible VL, and write zeroes to all other
+ * bits to see if they stick.
+ */
+ sve_kernel_enable(NULL);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
+
+ zcr = read_sysreg_s(SYS_ZCR_EL1);
+ zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
+ vq_max = sve_vq_from_vl(sve_get_vl());
+ zcr |= vq_max - 1; /* set LEN field to maximum effective value */
+
+ return zcr;
+}
+
+void __init sve_setup(void)
+{
+ u64 zcr;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
+
+ if (!system_supports_sve())
+ return;
+
+ /*
+ * The SVE architecture mandates support for 128-bit vectors,
+ * so sve_vq_map must have at least SVE_VQ_MIN set.
+ * If something went wrong, at least try to patch it up:
+ */
+ if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+ set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+
+ zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
+ sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
+
+ /*
+ * Sanity-check that the max VL we determined through CPU features
+ * corresponds properly to sve_vq_map. If not, do our best:
+ */
+ if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
+ sve_max_vl = find_supported_vector_length(sve_max_vl);
+
+ /*
+ * For the default VL, pick the maximum supported value <= 64.
+ * VL == 64 is guaranteed not to grow the signal frame.
+ */
+ set_sve_default_vl(find_supported_vector_length(64));
+
+ bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
+ SVE_VQ_MAX);
+
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ /* No non-virtualisable VLs found */
+ sve_max_virtualisable_vl = SVE_VQ_MAX;
+ else if (WARN_ON(b == SVE_VQ_MAX - 1))
+ /* No virtualisable VLs? This is architecturally forbidden. */
+ sve_max_virtualisable_vl = SVE_VQ_MIN;
+ else /* b + 1 < SVE_VQ_MAX */
+ sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
+
+ if (sve_max_virtualisable_vl > sve_max_vl)
+ sve_max_virtualisable_vl = sve_max_vl;
+
+ pr_info("SVE: maximum available vector length %u bytes per vector\n",
+ sve_max_vl);
+ pr_info("SVE: default vector length %u bytes per vector\n",
+ get_sve_default_vl());
+
+ /* KVM decides whether to support mismatched systems. Just warn here: */
+ if (sve_max_virtualisable_vl < sve_max_vl)
+ pr_warn("SVE: unvirtualisable vector lengths present\n");
+
+ sve_efi_setup();
+}
+
+/*
+ * Called from the put_task_struct() path, which cannot get here
+ * unless dead_task is really dead and not schedulable.
+ */
+void fpsimd_release_task(struct task_struct *dead_task)
+{
+ __sve_free(dead_task);
+}
+
+#endif /* CONFIG_ARM64_SVE */
+
+/*
+ * Trapped SVE access
+ *
+ * Storage is allocated for the full SVE state, the current FPSIMD
+ * register contents are migrated across, and TIF_SVE is set so that
+ * the SVE access trap will be disabled the next time this task
+ * reaches ret_to_user.
+ *
+ * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
+ * would have disabled the SVE access trap for userspace during
+ * ret_to_user, making an SVE access trap impossible in that case.
+ */
+void do_sve_acc(unsigned int esr, struct pt_regs *regs)
+{
+ /* Even if we chose not to use SVE, the hardware could still trap: */
+ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ return;
+ }
+
+ sve_alloc(current);
+
+ get_cpu_fpsimd_context();
+
+ fpsimd_save();
+
+ /* Force ret_to_user to reload the registers: */
+ fpsimd_flush_task_state(current);
+
+ fpsimd_to_sve(current);
+ if (test_and_set_thread_flag(TIF_SVE))
+ WARN_ON(1); /* SVE access shouldn't have trapped */
+
+ put_cpu_fpsimd_context();
+}
+
+/*
+ * Trapped FP/ASIMD access.
+ */
+void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
+{
+ /* TODO: implement lazy context saving/restoring */
+ WARN_ON(1);
+}
+
+/*
+ * Raise a SIGFPE for the current process.
+ */
+void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
+{
+ unsigned int si_code = FPE_FLTUNK;
+
+ if (esr & ESR_ELx_FP_EXC_TFV) {
+ if (esr & FPEXC_IOF)
+ si_code = FPE_FLTINV;
+ else if (esr & FPEXC_DZF)
+ si_code = FPE_FLTDIV;
+ else if (esr & FPEXC_OFF)
+ si_code = FPE_FLTOVF;
+ else if (esr & FPEXC_UFF)
+ si_code = FPE_FLTUND;
+ else if (esr & FPEXC_IXF)
+ si_code = FPE_FLTRES;
+ }
+
+ send_sig_fault(SIGFPE, si_code,
+ (void __user *)instruction_pointer(regs),
+ current);
+}
+
+void fpsimd_thread_switch(struct task_struct *next)
+{
+ bool wrong_task, wrong_cpu;
+
+ if (!system_supports_fpsimd())
+ return;
+
+ __get_cpu_fpsimd_context();
+
+ /* Save unsaved fpsimd state, if any: */
+ fpsimd_save();
+
+ /*
+ * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
+ * state. For kernel threads, FPSIMD registers are never loaded
+ * and wrong_task and wrong_cpu will always be true.
+ */
+ wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
+ &next->thread.uw.fpsimd_state;
+ wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
+
+ update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
+ wrong_task || wrong_cpu);
+
+ __put_cpu_fpsimd_context();
+}
+
+void fpsimd_flush_thread(void)
+{
+ int vl, supported_vl;
+
+ if (!system_supports_fpsimd())
+ return;
+
+ get_cpu_fpsimd_context();
+
+ fpsimd_flush_task_state(current);
+ memset(&current->thread.uw.fpsimd_state, 0,
+ sizeof(current->thread.uw.fpsimd_state));
+
+ if (system_supports_sve()) {
+ clear_thread_flag(TIF_SVE);
+ sve_free(current);
+
+ /*
+ * Reset the task vector length as required.
+ * This is where we ensure that all user tasks have a valid
+ * vector length configured: no kernel task can become a user
+ * task without an exec and hence a call to this function.
+ * By the time the first call to this function is made, all
+ * early hardware probing is complete, so __sve_default_vl
+ * should be valid.
+ * If a bug causes this to go wrong, we make some noise and
+ * try to fudge thread.sve_vl to a safe value here.
+ */
+ vl = current->thread.sve_vl_onexec ?
+ current->thread.sve_vl_onexec : get_sve_default_vl();
+
+ if (WARN_ON(!sve_vl_valid(vl)))
+ vl = SVE_VL_MIN;
+
+ supported_vl = find_supported_vector_length(vl);
+ if (WARN_ON(supported_vl != vl))
+ vl = supported_vl;
+
+ current->thread.sve_vl = vl;
+
+ /*
+ * If the task is not set to inherit, ensure that the vector
+ * length will be reset by a subsequent exec:
+ */
+ if (!test_thread_flag(TIF_SVE_VL_INHERIT))
+ current->thread.sve_vl_onexec = 0;
+ }
+
+ put_cpu_fpsimd_context();
+}
+
+/*
+ * Save the userland FPSIMD state of 'current' to memory, but only if the state
+ * currently held in the registers does in fact belong to 'current'
+ */
+void fpsimd_preserve_current_state(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ get_cpu_fpsimd_context();
+ fpsimd_save();
+ put_cpu_fpsimd_context();
+}
+
+/*
+ * Like fpsimd_preserve_current_state(), but ensure that
+ * current->thread.uw.fpsimd_state is updated so that it can be copied to
+ * the signal frame.
+ */
+void fpsimd_signal_preserve_current_state(void)
+{
+ fpsimd_preserve_current_state();
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ sve_to_fpsimd(current);
+}
+
+/*
+ * Associate current's FPSIMD context with this cpu
+ * The caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
+ */
+void fpsimd_bind_task_to_cpu(void)
+{
+ struct fpsimd_last_state_struct *last =
+ this_cpu_ptr(&fpsimd_last_state);
+
+ WARN_ON(!system_supports_fpsimd());
+ last->st = &current->thread.uw.fpsimd_state;
+ last->sve_state = current->thread.sve_state;
+ last->sve_vl = current->thread.sve_vl;
+ current->thread.fpsimd_cpu = smp_processor_id();
+
+ if (system_supports_sve()) {
+ /* Toggle SVE trapping for userspace if needed */
+ if (test_thread_flag(TIF_SVE))
+ sve_user_enable();
+ else
+ sve_user_disable();
+
+ /* Serialised by exception return to user */
+ }
+}
+
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl)
+{
+ struct fpsimd_last_state_struct *last =
+ this_cpu_ptr(&fpsimd_last_state);
+
+ WARN_ON(!system_supports_fpsimd());
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ last->st = st;
+ last->sve_state = sve_state;
+ last->sve_vl = sve_vl;
+}
+
+/*
+ * Load the userland FPSIMD state of 'current' from memory, but only if the
+ * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
+ * state of 'current'
+ */
+void fpsimd_restore_current_state(void)
+{
+ /*
+ * For the tasks that were created before we detected the absence of
+ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
+ * e.g, init. This could be then inherited by the children processes.
+ * If we later detect that the system doesn't support FP/SIMD,
+ * we must clear the flag for all the tasks to indicate that the
+ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
+ * do_notify_resume().
+ */
+ if (!system_supports_fpsimd()) {
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+ return;
+ }
+
+ get_cpu_fpsimd_context();
+
+ if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ task_fpsimd_load();
+ fpsimd_bind_task_to_cpu();
+ }
+
+ put_cpu_fpsimd_context();
+}
+
+/*
+ * Load an updated userland FPSIMD state for 'current' from memory and set the
+ * flag that indicates that the FPSIMD register contents are the most recent
+ * FPSIMD state of 'current'
+ */
+void fpsimd_update_current_state(struct user_fpsimd_state const *state)
+{
+ if (WARN_ON(!system_supports_fpsimd()))
+ return;
+
+ get_cpu_fpsimd_context();
+
+ current->thread.uw.fpsimd_state = *state;
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ fpsimd_to_sve(current);
+
+ task_fpsimd_load();
+ fpsimd_bind_task_to_cpu();
+
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+
+ put_cpu_fpsimd_context();
+}
+
+/*
+ * Invalidate live CPU copies of task t's FPSIMD state
+ *
+ * This function may be called with preemption enabled. The barrier()
+ * ensures that the assignment to fpsimd_cpu is visible to any
+ * preemption/softirq that could race with set_tsk_thread_flag(), so
+ * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
+ *
+ * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
+ * subsequent code.
+ */
+void fpsimd_flush_task_state(struct task_struct *t)
+{
+ t->thread.fpsimd_cpu = NR_CPUS;
+ /*
+ * If we don't support fpsimd, bail out after we have
+ * reset the fpsimd_cpu for this task and clear the
+ * FPSTATE.
+ */
+ if (!system_supports_fpsimd())
+ return;
+ barrier();
+ set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
+
+ barrier();
+}
+
+/*
+ * Invalidate any task's FPSIMD state that is present on this cpu.
+ * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
+ * before calling this function.
+ */
+static void fpsimd_flush_cpu_state(void)
+{
+ WARN_ON(!system_supports_fpsimd());
+ __this_cpu_write(fpsimd_last_state.st, NULL);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+}
+
+/*
+ * Save the FPSIMD state to memory and invalidate cpu view.
+ * This function must be called with preemption disabled.
+ */
+void fpsimd_save_and_flush_cpu_state(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+ WARN_ON(preemptible());
+ __get_cpu_fpsimd_context();
+ fpsimd_save();
+ fpsimd_flush_cpu_state();
+ __put_cpu_fpsimd_context();
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+
+/*
+ * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
+ * context
+ *
+ * Must not be called unless may_use_simd() returns true.
+ * Task context in the FPSIMD registers is saved back to memory as necessary.
+ *
+ * A matching call to kernel_neon_end() must be made before returning from the
+ * calling context.
+ *
+ * The caller may freely use the FPSIMD registers until kernel_neon_end() is
+ * called.
+ */
+void kernel_neon_begin(void)
+{
+ if (WARN_ON(!system_supports_fpsimd()))
+ return;
+
+ BUG_ON(!may_use_simd());
+
+ get_cpu_fpsimd_context();
+
+ /* Save unsaved fpsimd state, if any: */
+ fpsimd_save();
+
+ /* Invalidate any task state remaining in the fpsimd regs: */
+ fpsimd_flush_cpu_state();
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+/*
+ * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
+ *
+ * Must be called from a context in which kernel_neon_begin() was previously
+ * called, with no call to kernel_neon_end() in the meantime.
+ *
+ * The caller must not use the FPSIMD registers after this function is called,
+ * unless kernel_neon_begin() is called again in the meantime.
+ */
+void kernel_neon_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ put_cpu_fpsimd_context();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#ifdef CONFIG_EFI
+
+static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
+static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
+static DEFINE_PER_CPU(bool, efi_sve_state_used);
+
+/*
+ * EFI runtime services support functions
+ *
+ * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
+ * This means that for EFI (and only for EFI), we have to assume that FPSIMD
+ * is always used rather than being an optional accelerator.
+ *
+ * These functions provide the necessary support for ensuring FPSIMD
+ * save/restore in the contexts from which EFI is used.
+ *
+ * Do not use them for any other purpose -- if tempted to do so, you are
+ * either doing something wrong or you need to propose some refactoring.
+ */
+
+/*
+ * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
+ */
+void __efi_fpsimd_begin(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ WARN_ON(preemptible());
+
+ if (may_use_simd()) {
+ kernel_neon_begin();
+ } else {
+ /*
+ * If !efi_sve_state, SVE can't be in use yet and doesn't need
+ * preserving:
+ */
+ if (system_supports_sve() && likely(efi_sve_state)) {
+ char *sve_state = this_cpu_ptr(efi_sve_state);
+
+ __this_cpu_write(efi_sve_state_used, true);
+
+ sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
+ &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
+ } else {
+ fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
+ }
+
+ __this_cpu_write(efi_fpsimd_state_used, true);
+ }
+}
+
+/*
+ * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
+ */
+void __efi_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
+ kernel_neon_end();
+ } else {
+ if (system_supports_sve() &&
+ likely(__this_cpu_read(efi_sve_state_used))) {
+ char const *sve_state = this_cpu_ptr(efi_sve_state);
+
+ sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
+ &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
+ sve_vq_from_vl(sve_get_vl()) - 1);
+
+ __this_cpu_write(efi_sve_state_used, false);
+ } else {
+ fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
+ }
+ }
+}
+
+#endif /* CONFIG_EFI */
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
+#ifdef CONFIG_CPU_PM
+static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ fpsimd_save_and_flush_cpu_state();
+ break;
+ case CPU_PM_EXIT:
+ break;
+ case CPU_PM_ENTER_FAILED:
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_pm_notifier_block = {
+ .notifier_call = fpsimd_cpu_pm_notifier,
+};
+
+static void __init fpsimd_pm_init(void)
+{
+ cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
+}
+
+#else
+static inline void fpsimd_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int fpsimd_cpu_dead(unsigned int cpu)
+{
+ per_cpu(fpsimd_last_state.st, cpu) = NULL;
+ return 0;
+}
+
+static inline void fpsimd_hotplug_init(void)
+{
+ cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
+ NULL, fpsimd_cpu_dead);
+}
+
+#else
+static inline void fpsimd_hotplug_init(void) { }
+#endif
+
+/*
+ * FP/SIMD support code initialisation.
+ */
+static int __init fpsimd_init(void)
+{
+ if (cpu_have_named_feature(FP)) {
+ fpsimd_pm_init();
+ fpsimd_hotplug_init();
+ } else {
+ pr_notice("Floating-point is not implemented\n");
+ }
+
+ if (!cpu_have_named_feature(ASIMD))
+ pr_notice("Advanced SIMD is not implemented\n");
+
+ return sve_sysctl_init();
+}
+core_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
new file mode 100644
index 000000000..402a24f84
--- /dev/null
+++ b/arch/arm64/kernel/ftrace.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/arm64/kernel/ftrace.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/swab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Replace a single instruction, which may be a branch or NOP.
+ * If @validate == true, a replaced instruction is checked against 'old'.
+ */
+static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
+ bool validate)
+{
+ u32 replaced;
+
+ /*
+ * Note:
+ * We are paranoid about modifying text, as if a bug were to happen, it
+ * could cause us to read or write to someplace that could cause harm.
+ * Carefully read and modify the code with aarch64_insn_*() which uses
+ * probe_kernel_*(), and make sure what we read is what we expected it
+ * to be before modifying it.
+ */
+ if (validate) {
+ if (aarch64_insn_read((void *)pc, &replaced))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
+ if (aarch64_insn_patch_text_nosync((void *)pc, new))
+ return -EPERM;
+
+ return 0;
+}
+
+/*
+ * Replace tracer function in ftrace_caller()
+ */
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long pc;
+ u32 new;
+
+ pc = (unsigned long)&ftrace_call;
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
+ AARCH64_INSN_BRANCH_LINK);
+
+ return ftrace_modify_code(pc, 0, new, false);
+}
+
+static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
+{
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ struct plt_entry *plt = mod->arch.ftrace_trampolines;
+
+ if (addr == FTRACE_ADDR)
+ return &plt[FTRACE_PLT_IDX];
+ if (addr == FTRACE_REGS_ADDR &&
+ IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ return &plt[FTRACE_REGS_PLT_IDX];
+#endif
+ return NULL;
+}
+
+/*
+ * Find the address the callsite must branch to in order to reach '*addr'.
+ *
+ * Due to the limited range of 'BL' instructions, modules may be placed too far
+ * away to branch directly and must use a PLT.
+ *
+ * Returns true when '*addr' contains a reachable target address, or has been
+ * modified to contain a PLT address. Returns false otherwise.
+ */
+static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
+ struct module *mod,
+ unsigned long *addr)
+{
+ unsigned long pc = rec->ip;
+ long offset = (long)*addr - (long)pc;
+ struct plt_entry *plt;
+
+ /*
+ * When the target is within range of the 'BL' instruction, use 'addr'
+ * as-is and branch to that directly.
+ */
+ if (offset >= -SZ_128M && offset < SZ_128M)
+ return true;
+
+ /*
+ * When the target is outside of the range of a 'BL' instruction, we
+ * must use a PLT to reach it. We can only place PLTs for modules, and
+ * only when module PLT support is built-in.
+ */
+ if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ return false;
+
+ /*
+ * 'mod' is only set at module load time, but if we end up
+ * dealing with an out-of-range condition, we can assume it
+ * is due to a module being loaded far away from the kernel.
+ *
+ * NOTE: __module_text_address() must be called with preemption
+ * disabled, but we can rely on ftrace_lock to ensure that 'mod'
+ * retains its validity throughout the remainder of this code.
+ */
+ if (!mod) {
+ preempt_disable();
+ mod = __module_text_address(pc);
+ preempt_enable();
+ }
+
+ if (WARN_ON(!mod))
+ return false;
+
+ plt = get_ftrace_plt(mod, *addr);
+ if (!plt) {
+ pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
+ return false;
+ }
+
+ *addr = (unsigned long)plt;
+ return true;
+}
+
+/*
+ * Turn on the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ if (!ftrace_find_callable_addr(rec, NULL, &addr))
+ return -EINVAL;
+
+ old = aarch64_insn_gen_nop();
+ new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
+ return -EINVAL;
+ if (!ftrace_find_callable_addr(rec, NULL, &addr))
+ return -EINVAL;
+
+ old = aarch64_insn_gen_branch_imm(pc, old_addr,
+ AARCH64_INSN_BRANCH_LINK);
+ new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+/*
+ * The compiler has inserted two NOPs before the regular function prologue.
+ * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
+ * and x9-x18 are free for our use.
+ *
+ * At runtime we want to be able to swing a single NOP <-> BL to enable or
+ * disable the ftrace call. The BL requires us to save the original LR value,
+ * so here we insert a <MOV X9, LR> over the first NOP so the instructions
+ * before the regular prologue are:
+ *
+ * | Compiled | Disabled | Enabled |
+ * +----------+------------+------------+
+ * | NOP | MOV X9, LR | MOV X9, LR |
+ * | NOP | NOP | BL <entry> |
+ *
+ * The LR value will be recovered by ftrace_regs_entry, and restored into LR
+ * before returning to the regular function prologue. When a function is not
+ * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
+ *
+ * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
+ * the BL.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
+ u32 old, new;
+
+ old = aarch64_insn_gen_nop();
+ new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
+ AARCH64_INSN_REG_LR,
+ AARCH64_INSN_VARIANT_64BIT);
+ return ftrace_modify_code(pc, old, new, true);
+}
+#endif
+
+/*
+ * Turn off the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old = 0, new;
+
+ new = aarch64_insn_gen_nop();
+
+ /*
+ * When using mcount, callsites in modules may have been initalized to
+ * call an arbitrary module PLT (which redirects to the _mcount stub)
+ * rather than the ftrace PLT we'll use at runtime (which redirects to
+ * the ftrace trampoline). We can ignore the old PLT when initializing
+ * the callsite.
+ *
+ * Note: 'mod' is only set at module load time.
+ */
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
+ IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
+ return aarch64_insn_patch_text_nosync((void *)pc, new);
+ }
+
+ if (!ftrace_find_callable_addr(rec, mod, &addr))
+ return -EINVAL;
+
+ old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+void arch_ftrace_update_code(int command)
+{
+ command |= FTRACE_MAY_SLEEP;
+ ftrace_modify_all_code(command);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * function_graph tracer expects ftrace_return_to_handler() to be called
+ * on the way back to parent. For this purpose, this function is called
+ * in _mcount() or ftrace_caller() to replace return address (*parent) on
+ * the call stack to return_to_handler.
+ *
+ * Note that @frame_pointer is used only for sanity check later.
+ */
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Note:
+ * No protection against faulting at *parent, which may be seen
+ * on other archs. It's unlikely on AArch64.
+ */
+ old = *parent;
+
+ if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+ *parent = return_hooker;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
+ * depending on @enable.
+ */
+static int ftrace_modify_graph_caller(bool enable)
+{
+ unsigned long pc = (unsigned long)&ftrace_graph_call;
+ u32 branch, nop;
+
+ branch = aarch64_insn_gen_branch_imm(pc,
+ (unsigned long)ftrace_graph_caller,
+ AARCH64_INSN_BRANCH_NOLINK);
+ nop = aarch64_insn_gen_nop();
+
+ if (enable)
+ return ftrace_modify_code(pc, nop, branch, true);
+ else
+ return ftrace_modify_code(pc, branch, nop, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
new file mode 100644
index 000000000..351ee64c7
--- /dev/null
+++ b/arch/arm64/kernel/head.S
@@ -0,0 +1,1005 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low-level CPU initialisation
+ * Based on arch/arm/kernel/head.S
+ *
+ * Copyright (C) 1994-2002 Russell King
+ * Copyright (C) 2003-2012 ARM Ltd.
+ * Authors: Catalin Marinas <catalin.marinas@arm.com>
+ * Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/pgtable.h>
+
+#include <asm/asm_pointer_auth.h>
+#include <asm/assembler.h>
+#include <asm/boot.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+#include <asm/cputype.h>
+#include <asm/elf.h>
+#include <asm/image.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/kvm_arm.h>
+#include <asm/memory.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/page.h>
+#include <asm/scs.h>
+#include <asm/smp.h>
+#include <asm/sysreg.h>
+#include <asm/thread_info.h>
+#include <asm/virt.h>
+
+#include "efi-header.S"
+
+#define __PHYS_OFFSET KERNEL_START
+
+#if (PAGE_OFFSET & 0x1fffff) != 0
+#error PAGE_OFFSET must be at least 2MB aligned
+#endif
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * The requirements are:
+ * MMU = off, D-cache = off, I-cache = on or off,
+ * x0 = physical address to the FDT blob.
+ *
+ * This code is mostly position independent so you call this at
+ * __pa(PAGE_OFFSET).
+ *
+ * Note that the callee-saved registers are used for storing variables
+ * that are useful before the MMU is enabled. The allocations are described
+ * in the entry routines.
+ */
+ __HEAD
+_head:
+ /*
+ * DO NOT MODIFY. Image header expected by Linux boot-loaders.
+ */
+#ifdef CONFIG_EFI
+ /*
+ * This add instruction has no meaningful effect except that
+ * its opcode forms the magic "MZ" signature required by UEFI.
+ */
+ add x13, x18, #0x16
+ b primary_entry
+#else
+ b primary_entry // branch to kernel start, magic
+ .long 0 // reserved
+#endif
+ .quad 0 // Image load offset from start of RAM, little-endian
+ le64sym _kernel_size_le // Effective size of kernel image, little-endian
+ le64sym _kernel_flags_le // Informative flags, little-endian
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .ascii ARM64_IMAGE_MAGIC // Magic number
+#ifdef CONFIG_EFI
+ .long pe_header - _head // Offset to the PE header.
+
+pe_header:
+ __EFI_PE_HEADER
+#else
+ .long 0 // reserved
+#endif
+
+ __INIT
+
+ /*
+ * The following callee saved general purpose registers are used on the
+ * primary lowlevel boot path:
+ *
+ * Register Scope Purpose
+ * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
+ * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
+ * x28 __create_page_tables() callee preserved temp register
+ * x19/x20 __primary_switch() callee preserved temp registers
+ * x24 __primary_switch() .. relocate_kernel() current RELR displacement
+ */
+SYM_CODE_START(primary_entry)
+ bl preserve_boot_args
+ bl el2_setup // Drop to EL1, w0=cpu_boot_mode
+ adrp x23, __PHYS_OFFSET
+ and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
+ bl set_cpu_boot_mode_flag
+ bl __create_page_tables
+ /*
+ * The following calls CPU setup code, see arch/arm64/mm/proc.S for
+ * details.
+ * On return, the CPU will be ready for the MMU to be turned on and
+ * the TCR will have been set.
+ */
+ bl __cpu_setup // initialise processor
+ b __primary_switch
+SYM_CODE_END(primary_entry)
+
+/*
+ * Preserve the arguments passed by the bootloader in x0 .. x3
+ */
+SYM_CODE_START_LOCAL(preserve_boot_args)
+ mov x21, x0 // x21=FDT
+
+ adr_l x0, boot_args // record the contents of
+ stp x21, x1, [x0] // x0 .. x3 at kernel entry
+ stp x2, x3, [x0, #16]
+
+ dmb sy // needed before dc ivac with
+ // MMU off
+
+ mov x1, #0x20 // 4 x 8 bytes
+ b __inval_dcache_area // tail call
+SYM_CODE_END(preserve_boot_args)
+
+/*
+ * Macro to create a table entry to the next page.
+ *
+ * tbl: page table address
+ * virt: virtual address
+ * shift: #imm page table shift
+ * ptrs: #imm pointers per table page
+ *
+ * Preserves: virt
+ * Corrupts: ptrs, tmp1, tmp2
+ * Returns: tbl -> next level table page address
+ */
+ .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
+ add \tmp1, \tbl, #PAGE_SIZE
+ phys_to_pte \tmp2, \tmp1
+ orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
+ lsr \tmp1, \virt, #\shift
+ sub \ptrs, \ptrs, #1
+ and \tmp1, \tmp1, \ptrs // table index
+ str \tmp2, [\tbl, \tmp1, lsl #3]
+ add \tbl, \tbl, #PAGE_SIZE // next level table page
+ .endm
+
+/*
+ * Macro to populate page table entries, these entries can be pointers to the next level
+ * or last level entries pointing to physical memory.
+ *
+ * tbl: page table address
+ * rtbl: pointer to page table or physical memory
+ * index: start index to write
+ * eindex: end index to write - [index, eindex] written to
+ * flags: flags for pagetable entry to or in
+ * inc: increment to rtbl between each entry
+ * tmp1: temporary variable
+ *
+ * Preserves: tbl, eindex, flags, inc
+ * Corrupts: index, tmp1
+ * Returns: rtbl
+ */
+ .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
+.Lpe\@: phys_to_pte \tmp1, \rtbl
+ orr \tmp1, \tmp1, \flags // tmp1 = table entry
+ str \tmp1, [\tbl, \index, lsl #3]
+ add \rtbl, \rtbl, \inc // rtbl = pa next level
+ add \index, \index, #1
+ cmp \index, \eindex
+ b.ls .Lpe\@
+ .endm
+
+/*
+ * Compute indices of table entries from virtual address range. If multiple entries
+ * were needed in the previous page table level then the next page table level is assumed
+ * to be composed of multiple pages. (This effectively scales the end index).
+ *
+ * vstart: virtual address of start of range
+ * vend: virtual address of end of range - we map [vstart, vend]
+ * shift: shift used to transform virtual address into index
+ * ptrs: number of entries in page table
+ * istart: index in table corresponding to vstart
+ * iend: index in table corresponding to vend
+ * count: On entry: how many extra entries were required in previous level, scales
+ * our end index.
+ * On exit: returns how many extra entries required for next page table level
+ *
+ * Preserves: vstart, vend, shift, ptrs
+ * Returns: istart, iend, count
+ */
+ .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
+ lsr \iend, \vend, \shift
+ mov \istart, \ptrs
+ sub \istart, \istart, #1
+ and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
+ mov \istart, \ptrs
+ mul \istart, \istart, \count
+ add \iend, \iend, \istart // iend += (count - 1) * ptrs
+ // our entries span multiple tables
+
+ lsr \istart, \vstart, \shift
+ mov \count, \ptrs
+ sub \count, \count, #1
+ and \istart, \istart, \count
+
+ sub \count, \iend, \istart
+ .endm
+
+/*
+ * Map memory for specified virtual address range. Each level of page table needed supports
+ * multiple entries. If a level requires n entries the next page table level is assumed to be
+ * formed from n pages.
+ *
+ * tbl: location of page table
+ * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
+ * vstart: virtual address of start of range
+ * vend: virtual address of end of range - we map [vstart, vend - 1]
+ * flags: flags to use to map last level entries
+ * phys: physical address corresponding to vstart - physical memory is contiguous
+ * pgds: the number of pgd entries
+ *
+ * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
+ * Preserves: vstart, flags
+ * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
+ */
+ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
+ sub \vend, \vend, #1
+ add \rtbl, \tbl, #PAGE_SIZE
+ mov \sv, \rtbl
+ mov \count, #0
+ compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+ mov \sv, \rtbl
+
+#if SWAPPER_PGTABLE_LEVELS > 3
+ compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+ mov \sv, \rtbl
+#endif
+
+#if SWAPPER_PGTABLE_LEVELS > 2
+ compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+#endif
+
+ compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
+ bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
+ populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
+ .endm
+
+/*
+ * Setup the initial page tables. We only setup the barest amount which is
+ * required to get the kernel running. The following sections are required:
+ * - identity mapping to enable the MMU (low address, TTBR0)
+ * - first few MB of the kernel linear mapping to jump to once the MMU has
+ * been enabled
+ */
+SYM_FUNC_START_LOCAL(__create_page_tables)
+ mov x28, lr
+
+ /*
+ * Invalidate the init page tables to avoid potential dirty cache lines
+ * being evicted. Other page tables are allocated in rodata as part of
+ * the kernel image, and thus are clean to the PoC per the boot
+ * protocol.
+ */
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
+ sub x1, x1, x0
+ bl __inval_dcache_area
+
+ /*
+ * Clear the init page tables.
+ */
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
+ sub x1, x1, x0
+1: stp xzr, xzr, [x0], #16
+ stp xzr, xzr, [x0], #16
+ stp xzr, xzr, [x0], #16
+ stp xzr, xzr, [x0], #16
+ subs x1, x1, #64
+ b.ne 1b
+
+ mov x7, SWAPPER_MM_MMUFLAGS
+
+ /*
+ * Create the identity mapping.
+ */
+ adrp x0, idmap_pg_dir
+ adrp x3, __idmap_text_start // __pa(__idmap_text_start)
+
+#ifdef CONFIG_ARM64_VA_BITS_52
+ mrs_s x6, SYS_ID_AA64MMFR2_EL1
+ and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ mov x5, #52
+ cbnz x6, 1f
+#endif
+ mov x5, #VA_BITS_MIN
+1:
+ adr_l x6, vabits_actual
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
+ /*
+ * VA_BITS may be too small to allow for an ID mapping to be created
+ * that covers system RAM if that is located sufficiently high in the
+ * physical address space. So for the ID map, use an extended virtual
+ * range in that case, and configure an additional translation level
+ * if needed.
+ *
+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
+ * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
+ * this number conveniently equals the number of leading zeroes in
+ * the physical address of __idmap_text_end.
+ */
+ adrp x5, __idmap_text_end
+ clz x5, x5
+ cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
+ b.ge 1f // .. then skip VA range extension
+
+ adr_l x6, idmap_t0sz
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
+#if (VA_BITS < 48)
+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
+
+ /*
+ * If VA_BITS < 48, we have to configure an additional table level.
+ * First, we have to verify our assumption that the current value of
+ * VA_BITS was chosen such that all translation levels are fully
+ * utilised, and that lowering T0SZ will always result in an additional
+ * translation level to be configured.
+ */
+#if VA_BITS != EXTRA_SHIFT
+#error "Mismatch between VA_BITS and page size/number of translation levels"
+#endif
+
+ mov x4, EXTRA_PTRS
+ create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
+#else
+ /*
+ * If VA_BITS == 48, we don't have to configure an additional
+ * translation level, but the top-level table has more entries.
+ */
+ mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
+ str_l x4, idmap_ptrs_per_pgd, x5
+#endif
+1:
+ ldr_l x4, idmap_ptrs_per_pgd
+ mov x5, x3 // __pa(__idmap_text_start)
+ adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
+
+ map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
+
+ /*
+ * Map the kernel image (starting with PHYS_OFFSET).
+ */
+ adrp x0, init_pg_dir
+ mov_q x5, KIMAGE_VADDR // compile time __va(_text)
+ add x5, x5, x23 // add KASLR displacement
+ mov x4, PTRS_PER_PGD
+ adrp x6, _end // runtime __pa(_end)
+ adrp x3, _text // runtime __pa(_text)
+ sub x6, x6, x3 // _end - _text
+ add x6, x6, x5 // runtime __va(_end)
+
+ map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
+
+ /*
+ * Since the page tables have been populated with non-cacheable
+ * accesses (MMU disabled), invalidate those tables again to
+ * remove any speculatively loaded cache lines.
+ */
+ dmb sy
+
+ adrp x0, idmap_pg_dir
+ adrp x1, idmap_pg_end
+ sub x1, x1, x0
+ bl __inval_dcache_area
+
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
+ sub x1, x1, x0
+ bl __inval_dcache_area
+
+ ret x28
+SYM_FUNC_END(__create_page_tables)
+
+/*
+ * The following fragment of code is executed with the MMU enabled.
+ *
+ * x0 = __PHYS_OFFSET
+ */
+SYM_FUNC_START_LOCAL(__primary_switched)
+ adrp x4, init_thread_union
+ add sp, x4, #THREAD_SIZE
+ adr_l x5, init_task
+ msr sp_el0, x5 // Save thread_info
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+ __ptrauth_keys_init_cpu x5, x6, x7, x8
+#endif
+
+ adr_l x8, vectors // load VBAR_EL1 with virtual
+ msr vbar_el1, x8 // vector table address
+ isb
+
+ stp xzr, x30, [sp, #-16]!
+ mov x29, sp
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+ adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
+#endif
+
+ str_l x21, __fdt_pointer, x5 // Save FDT pointer
+
+ ldr_l x4, kimage_vaddr // Save the offset between
+ sub x4, x4, x0 // the kernel virtual and
+ str_l x4, kimage_voffset, x5 // physical mappings
+
+ // Clear BSS
+ adr_l x0, __bss_start
+ mov x1, xzr
+ adr_l x2, __bss_stop
+ sub x2, x2, x0
+ bl __pi_memset
+ dsb ishst // Make zero page visible to PTW
+
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
+#ifdef CONFIG_RANDOMIZE_BASE
+ tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
+ b.ne 0f
+ mov x0, x21 // pass FDT address in x0
+ bl kaslr_early_init // parse FDT for KASLR options
+ cbz x0, 0f // KASLR disabled? just proceed
+ orr x23, x23, x0 // record KASLR offset
+ ldp x29, x30, [sp], #16 // we must enable KASLR, return
+ ret // to __primary_switch()
+0:
+#endif
+ add sp, sp, #16
+ mov x29, #0
+ mov x30, #0
+ b start_kernel
+SYM_FUNC_END(__primary_switched)
+
+ .pushsection ".rodata", "a"
+SYM_DATA_START(kimage_vaddr)
+ .quad _text
+SYM_DATA_END(kimage_vaddr)
+EXPORT_SYMBOL(kimage_vaddr)
+ .popsection
+
+/*
+ * end early head section, begin head code that is also used for
+ * hotplug and needs to have the same protections as the text region
+ */
+ .section ".idmap.text","awx"
+
+/*
+ * If we're fortunate enough to boot at EL2, ensure that the world is
+ * sane before dropping to EL1.
+ *
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
+ * booted in EL1 or EL2 respectively.
+ */
+SYM_FUNC_START(el2_setup)
+ msr SPsel, #1 // We want to use SP_EL{1,2}
+ mrs x0, CurrentEL
+ cmp x0, #CurrentEL_EL2
+ b.eq 1f
+ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
+ msr sctlr_el1, x0
+ mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
+ isb
+ ret
+
+1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
+ msr sctlr_el2, x0
+
+#ifdef CONFIG_ARM64_VHE
+ /*
+ * Check for VHE being present. For the rest of the EL2 setup,
+ * x2 being non-zero indicates that we do have VHE, and that the
+ * kernel is intended to run at EL2.
+ */
+ mrs x2, id_aa64mmfr1_el1
+ ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
+#else
+ mov x2, xzr
+#endif
+
+ /* Hyp configuration. */
+ mov_q x0, HCR_HOST_NVHE_FLAGS
+ cbz x2, set_hcr
+ mov_q x0, HCR_HOST_VHE_FLAGS
+set_hcr:
+ msr hcr_el2, x0
+ isb
+
+ /*
+ * Allow Non-secure EL1 and EL0 to access physical timer and counter.
+ * This is not necessary for VHE, since the host kernel runs in EL2,
+ * and EL0 accesses are configured in the later stage of boot process.
+ * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
+ * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
+ * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
+ * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
+ * EL2.
+ */
+ cbnz x2, 1f
+ mrs x0, cnthctl_el2
+ orr x0, x0, #3 // Enable EL1 physical timers
+ msr cnthctl_el2, x0
+1:
+ msr cntvoff_el2, xzr // Clear virtual offset
+
+#ifdef CONFIG_ARM_GIC_V3
+ /* GICv3 system register access */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
+ cbz x0, 3f
+
+ mrs_s x0, SYS_ICC_SRE_EL2
+ orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
+ orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
+ msr_s SYS_ICC_SRE_EL2, x0
+ isb // Make sure SRE is now set
+ mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
+ tbz x0, #0, 3f // and check that it sticks
+ msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
+
+3:
+#endif
+
+ /* Populate ID registers. */
+ mrs x0, midr_el1
+ mrs x1, mpidr_el1
+ msr vpidr_el2, x0
+ msr vmpidr_el2, x1
+
+#ifdef CONFIG_COMPAT
+ msr hstr_el2, xzr // Disable CP15 traps to EL2
+#endif
+
+ /* EL2 debug */
+ mrs x1, id_aa64dfr0_el1
+ sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
+ cmp x0, #1
+ b.lt 4f // Skip if no PMU present
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+4:
+ csel x3, xzr, x0, lt // all PMU counters from EL1
+
+ /* Statistical profiling */
+ ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
+ cbz x0, 7f // Skip if SPE not present
+ cbnz x2, 6f // VHE?
+ mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
+ and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
+ cbnz x4, 5f // then permit sampling of physical
+ mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
+ 1 << SYS_PMSCR_EL2_PA_SHIFT)
+ msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
+5:
+ mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+ orr x3, x3, x1 // If we don't have VHE, then
+ b 7f // use EL1&0 translation.
+6: // For VHE, use EL2 translation
+ orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
+7:
+ msr mdcr_el2, x3 // Configure debug traps
+
+ /* LORegions */
+ mrs x1, id_aa64mmfr1_el1
+ ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
+ cbz x0, 1f
+ msr_s SYS_LORC_EL1, xzr
+1:
+
+ /* Stage-2 translation */
+ msr vttbr_el2, xzr
+
+ cbz x2, install_el2_stub
+
+ mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
+ isb
+ ret
+
+SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
+ /*
+ * When VHE is not in use, early init of EL2 and EL1 needs to be
+ * done here.
+ * When VHE _is_ in use, EL1 will not be used in the host and
+ * requires no configuration, and all non-hyp-specific EL2 setup
+ * will be done via the _EL1 system register aliases in __cpu_setup.
+ */
+ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
+ msr sctlr_el1, x0
+
+ /* Coprocessor traps. */
+ mov x0, #0x33ff
+ msr cptr_el2, x0 // Disable copro. traps to EL2
+
+ /* SVE register access */
+ mrs x1, id_aa64pfr0_el1
+ ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
+ cbz x1, 7f
+
+ bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
+ msr cptr_el2, x0 // Disable copro. traps to EL2
+ isb
+ mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
+ msr_s SYS_ZCR_EL2, x1 // length for EL1.
+
+ /* Hypervisor stub */
+7: adr_l x0, __hyp_stub_vectors
+ msr vbar_el2, x0
+
+ /* spsr */
+ mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
+ PSR_MODE_EL1h)
+ msr spsr_el2, x0
+ msr elr_el2, lr
+ mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
+ eret
+SYM_FUNC_END(el2_setup)
+
+/*
+ * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
+ * in w0. See arch/arm64/include/asm/virt.h for more info.
+ */
+SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
+ adr_l x1, __boot_cpu_mode
+ cmp w0, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+1: str w0, [x1] // This CPU has booted in EL1
+ dmb sy
+ dc ivac, x1 // Invalidate potentially stale cache line
+ ret
+SYM_FUNC_END(set_cpu_boot_mode_flag)
+
+/*
+ * These values are written with the MMU off, but read with the MMU on.
+ * Writers will invalidate the corresponding address, discarding up to a
+ * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
+ * sufficient alignment that the CWG doesn't overlap another section.
+ */
+ .pushsection ".mmuoff.data.write", "aw"
+/*
+ * We need to find out the CPU boot mode long after boot, so we need to
+ * store it in a writable variable.
+ *
+ * This is not in .bss, because we set it sufficiently early that the boot-time
+ * zeroing of .bss would clobber it.
+ */
+SYM_DATA_START(__boot_cpu_mode)
+ .long BOOT_CPU_MODE_EL2
+ .long BOOT_CPU_MODE_EL1
+SYM_DATA_END(__boot_cpu_mode)
+/*
+ * The booting CPU updates the failed status @__early_cpu_boot_status,
+ * with MMU turned off.
+ */
+SYM_DATA_START(__early_cpu_boot_status)
+ .quad 0
+SYM_DATA_END(__early_cpu_boot_status)
+
+ .popsection
+
+ /*
+ * This provides a "holding pen" for platforms to hold all secondary
+ * cores are held until we're ready for them to initialise.
+ */
+SYM_FUNC_START(secondary_holding_pen)
+ bl el2_setup // Drop to EL1, w0=cpu_boot_mode
+ bl set_cpu_boot_mode_flag
+ mrs x0, mpidr_el1
+ mov_q x1, MPIDR_HWID_BITMASK
+ and x0, x0, x1
+ adr_l x3, secondary_holding_pen_release
+pen: ldr x4, [x3]
+ cmp x4, x0
+ b.eq secondary_startup
+ wfe
+ b pen
+SYM_FUNC_END(secondary_holding_pen)
+
+ /*
+ * Secondary entry point that jumps straight into the kernel. Only to
+ * be used where CPUs are brought online dynamically by the kernel.
+ */
+SYM_FUNC_START(secondary_entry)
+ bl el2_setup // Drop to EL1
+ bl set_cpu_boot_mode_flag
+ b secondary_startup
+SYM_FUNC_END(secondary_entry)
+
+SYM_FUNC_START_LOCAL(secondary_startup)
+ /*
+ * Common entry point for secondary CPUs.
+ */
+ bl __cpu_secondary_check52bitva
+ bl __cpu_setup // initialise processor
+ adrp x1, swapper_pg_dir
+ bl __enable_mmu
+ ldr x8, =__secondary_switched
+ br x8
+SYM_FUNC_END(secondary_startup)
+
+SYM_FUNC_START_LOCAL(__secondary_switched)
+ adr_l x5, vectors
+ msr vbar_el1, x5
+ isb
+
+ adr_l x0, secondary_data
+ ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ cbz x1, __secondary_too_slow
+ mov sp, x1
+ ldr x2, [x0, #CPU_BOOT_TASK]
+ cbz x2, __secondary_too_slow
+ msr sp_el0, x2
+ scs_load_current
+ mov x29, #0
+ mov x30, #0
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+ ptrauth_keys_init_cpu x2, x3, x4, x5
+#endif
+
+ b secondary_start_kernel
+SYM_FUNC_END(__secondary_switched)
+
+SYM_FUNC_START_LOCAL(__secondary_too_slow)
+ wfe
+ wfi
+ b __secondary_too_slow
+SYM_FUNC_END(__secondary_too_slow)
+
+/*
+ * The booting CPU updates the failed status @__early_cpu_boot_status,
+ * with MMU turned off.
+ *
+ * update_early_cpu_boot_status tmp, status
+ * - Corrupts tmp1, tmp2
+ * - Writes 'status' to __early_cpu_boot_status and makes sure
+ * it is committed to memory.
+ */
+
+ .macro update_early_cpu_boot_status status, tmp1, tmp2
+ mov \tmp2, #\status
+ adr_l \tmp1, __early_cpu_boot_status
+ str \tmp2, [\tmp1]
+ dmb sy
+ dc ivac, \tmp1 // Invalidate potentially stale cache line
+ .endm
+
+/*
+ * Enable the MMU.
+ *
+ * x0 = SCTLR_EL1 value for turning on the MMU.
+ * x1 = TTBR1_EL1 value
+ *
+ * Returns to the caller via x30/lr. This requires the caller to be covered
+ * by the .idmap.text section.
+ *
+ * Checks if the selected granule size is supported by the CPU.
+ * If it isn't, park the CPU
+ */
+SYM_FUNC_START(__enable_mmu)
+ mrs x2, ID_AA64MMFR0_EL1
+ ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
+ b.lt __no_granule_support
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
+ b.gt __no_granule_support
+ update_early_cpu_boot_status 0, x2, x3
+ adrp x2, idmap_pg_dir
+ phys_to_ttbr x1, x1
+ phys_to_ttbr x2, x2
+ msr ttbr0_el1, x2 // load TTBR0
+ offset_ttbr1 x1, x3
+ msr ttbr1_el1, x1 // load TTBR1
+ isb
+ msr sctlr_el1, x0
+ isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
+ ret
+SYM_FUNC_END(__enable_mmu)
+
+SYM_FUNC_START(__cpu_secondary_check52bitva)
+#ifdef CONFIG_ARM64_VA_BITS_52
+ ldr_l x0, vabits_actual
+ cmp x0, #52
+ b.ne 2f
+
+ mrs_s x0, SYS_ID_AA64MMFR2_EL1
+ and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ cbnz x0, 2f
+
+ update_early_cpu_boot_status \
+ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
+1: wfe
+ wfi
+ b 1b
+
+#endif
+2: ret
+SYM_FUNC_END(__cpu_secondary_check52bitva)
+
+SYM_FUNC_START_LOCAL(__no_granule_support)
+ /* Indicate that this CPU can't boot and is stuck in the kernel */
+ update_early_cpu_boot_status \
+ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
+1:
+ wfe
+ wfi
+ b 1b
+SYM_FUNC_END(__no_granule_support)
+
+#ifdef CONFIG_RELOCATABLE
+SYM_FUNC_START_LOCAL(__relocate_kernel)
+ /*
+ * Iterate over each entry in the relocation table, and apply the
+ * relocations in place.
+ */
+ ldr w9, =__rela_offset // offset to reloc table
+ ldr w10, =__rela_size // size of reloc table
+
+ mov_q x11, KIMAGE_VADDR // default virtual offset
+ add x11, x11, x23 // actual virtual offset
+ add x9, x9, x11 // __va(.rela)
+ add x10, x9, x10 // __va(.rela) + sizeof(.rela)
+
+0: cmp x9, x10
+ b.hs 1f
+ ldp x12, x13, [x9], #24
+ ldr x14, [x9, #-8]
+ cmp w13, #R_AARCH64_RELATIVE
+ b.ne 0b
+ add x14, x14, x23 // relocate
+ str x14, [x12, x23]
+ b 0b
+
+1:
+#ifdef CONFIG_RELR
+ /*
+ * Apply RELR relocations.
+ *
+ * RELR is a compressed format for storing relative relocations. The
+ * encoded sequence of entries looks like:
+ * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
+ *
+ * i.e. start with an address, followed by any number of bitmaps. The
+ * address entry encodes 1 relocation. The subsequent bitmap entries
+ * encode up to 63 relocations each, at subsequent offsets following
+ * the last address entry.
+ *
+ * The bitmap entries must have 1 in the least significant bit. The
+ * assumption here is that an address cannot have 1 in lsb. Odd
+ * addresses are not supported. Any odd addresses are stored in the RELA
+ * section, which is handled above.
+ *
+ * Excluding the least significant bit in the bitmap, each non-zero
+ * bit in the bitmap represents a relocation to be applied to
+ * a corresponding machine word that follows the base address
+ * word. The second least significant bit represents the machine
+ * word immediately following the initial address, and each bit
+ * that follows represents the next word, in linear order. As such,
+ * a single bitmap can encode up to 63 relocations in a 64-bit object.
+ *
+ * In this implementation we store the address of the next RELR table
+ * entry in x9, the address being relocated by the current address or
+ * bitmap entry in x13 and the address being relocated by the current
+ * bit in x14.
+ *
+ * Because addends are stored in place in the binary, RELR relocations
+ * cannot be applied idempotently. We use x24 to keep track of the
+ * currently applied displacement so that we can correctly relocate if
+ * __relocate_kernel is called twice with non-zero displacements (i.e.
+ * if there is both a physical misalignment and a KASLR displacement).
+ */
+ ldr w9, =__relr_offset // offset to reloc table
+ ldr w10, =__relr_size // size of reloc table
+ add x9, x9, x11 // __va(.relr)
+ add x10, x9, x10 // __va(.relr) + sizeof(.relr)
+
+ sub x15, x23, x24 // delta from previous offset
+ cbz x15, 7f // nothing to do if unchanged
+ mov x24, x23 // save new offset
+
+2: cmp x9, x10
+ b.hs 7f
+ ldr x11, [x9], #8
+ tbnz x11, #0, 3f // branch to handle bitmaps
+ add x13, x11, x23
+ ldr x12, [x13] // relocate address entry
+ add x12, x12, x15
+ str x12, [x13], #8 // adjust to start of bitmap
+ b 2b
+
+3: mov x14, x13
+4: lsr x11, x11, #1
+ cbz x11, 6f
+ tbz x11, #0, 5f // skip bit if not set
+ ldr x12, [x14] // relocate bit
+ add x12, x12, x15
+ str x12, [x14]
+
+5: add x14, x14, #8 // move to next bit's address
+ b 4b
+
+6: /*
+ * Move to the next bitmap's address. 8 is the word size, and 63 is the
+ * number of significant bits in a bitmap entry.
+ */
+ add x13, x13, #(8 * 63)
+ b 2b
+
+7:
+#endif
+ ret
+
+SYM_FUNC_END(__relocate_kernel)
+#endif
+
+SYM_FUNC_START_LOCAL(__primary_switch)
+#ifdef CONFIG_RANDOMIZE_BASE
+ mov x19, x0 // preserve new SCTLR_EL1 value
+ mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
+#endif
+
+ adrp x1, init_pg_dir
+ bl __enable_mmu
+#ifdef CONFIG_RELOCATABLE
+#ifdef CONFIG_RELR
+ mov x24, #0 // no RELR displacement yet
+#endif
+ bl __relocate_kernel
+#ifdef CONFIG_RANDOMIZE_BASE
+ ldr x8, =__primary_switched
+ adrp x0, __PHYS_OFFSET
+ blr x8
+
+ /*
+ * If we return here, we have a KASLR displacement in x23 which we need
+ * to take into account by discarding the current kernel mapping and
+ * creating a new one.
+ */
+ pre_disable_mmu_workaround
+ msr sctlr_el1, x20 // disable the MMU
+ isb
+ bl __create_page_tables // recreate kernel mapping
+
+ tlbi vmalle1 // Remove any stale TLB entries
+ dsb nsh
+ isb
+
+ msr sctlr_el1, x19 // re-enable the MMU
+ isb
+ ic iallu // flush instructions fetched
+ dsb nsh // via old mapping
+ isb
+
+ bl __relocate_kernel
+#endif
+#endif
+ ldr x8, =__primary_switched
+ adrp x0, __PHYS_OFFSET
+ br x8
+SYM_FUNC_END(__primary_switch)
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
new file mode 100644
index 000000000..8ccca6600
--- /dev/null
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Hibernate low-level support
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ * Author: James Morse <james.morse@arm.com>
+ */
+#include <linux/linkage.h>
+#include <linux/errno.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/cputype.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/virt.h>
+
+/*
+ * To prevent the possibility of old and new partial table walks being visible
+ * in the tlb, switch the ttbr to a zero page when we invalidate the old
+ * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
+ * Even switching to our copied tables will cause a changed output address at
+ * each stage of the walk.
+ */
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
+ phys_to_ttbr \tmp, \zero_page
+ msr ttbr1_el1, \tmp
+ isb
+ tlbi vmalle1
+ dsb nsh
+ phys_to_ttbr \tmp, \page_table
+ offset_ttbr1 \tmp, \tmp2
+ msr ttbr1_el1, \tmp
+ isb
+.endm
+
+
+/*
+ * Resume from hibernate
+ *
+ * Loads temporary page tables then restores the memory image.
+ * Finally branches to cpu_resume() to restore the state saved by
+ * swsusp_arch_suspend().
+ *
+ * Because this code has to be copied to a 'safe' page, it can't call out to
+ * other functions by PC-relative address. Also remember that it may be
+ * mid-way through over-writing other functions. For this reason it contains
+ * code from flush_icache_range() and uses the copy_page() macro.
+ *
+ * This 'safe' page is mapped via ttbr0, and executed from there. This function
+ * switches to a copy of the linear map in ttbr1, performs the restore, then
+ * switches ttbr1 to the original kernel's swapper_pg_dir.
+ *
+ * All of memory gets written to, including code. We need to clean the kernel
+ * text to the Point of Coherence (PoC) before secondary cores can be booted.
+ * Because the kernel modules and executable pages mapped to user space are
+ * also written as data, we clean all pages we touch to the Point of
+ * Unification (PoU).
+ *
+ * x0: physical address of temporary page tables
+ * x1: physical address of swapper page tables
+ * x2: address of cpu_resume
+ * x3: linear map address of restore_pblist in the current kernel
+ * x4: physical address of __hyp_stub_vectors, or 0
+ * x5: physical address of a zero page that remains zero after resume
+ */
+.pushsection ".hibernate_exit.text", "ax"
+SYM_CODE_START(swsusp_arch_suspend_exit)
+ /*
+ * We execute from ttbr0, change ttbr1 to our copied linear map tables
+ * with a break-before-make via the zero page
+ */
+ break_before_make_ttbr_switch x5, x0, x6, x8
+
+ mov x21, x1
+ mov x30, x2
+ mov x24, x4
+ mov x25, x5
+
+ /* walk the restore_pblist and use copy_page() to over-write memory */
+ mov x19, x3
+
+1: ldr x10, [x19, #HIBERN_PBE_ORIG]
+ mov x0, x10
+ ldr x1, [x19, #HIBERN_PBE_ADDR]
+
+ copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
+
+ add x1, x10, #PAGE_SIZE
+ /* Clean the copied page to PoU - based on flush_icache_range() */
+ raw_dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x4, x10, x3
+2: dc cvau, x4 /* clean D line / unified line */
+ add x4, x4, x2
+ cmp x4, x1
+ b.lo 2b
+
+ ldr x19, [x19, #HIBERN_PBE_NEXT]
+ cbnz x19, 1b
+ dsb ish /* wait for PoU cleaning to finish */
+
+ /* switch to the restored kernels page tables */
+ break_before_make_ttbr_switch x25, x21, x6, x8
+
+ ic ialluis
+ dsb ish
+ isb
+
+ cbz x24, 3f /* Do we need to re-initialise EL2? */
+ hvc #0
+3: ret
+SYM_CODE_END(swsusp_arch_suspend_exit)
+
+/*
+ * Restore the hyp stub.
+ * This must be done before the hibernate page is unmapped by _cpu_resume(),
+ * but happens before any of the hyp-stub's code is cleaned to PoC.
+ *
+ * x24: The physical address of __hyp_stub_vectors
+ */
+SYM_CODE_START_LOCAL(el1_sync)
+ msr vbar_el2, x24
+ eret
+SYM_CODE_END(el1_sync)
+
+.macro invalid_vector label
+SYM_CODE_START_LOCAL(\label)
+ b \label
+SYM_CODE_END(\label)
+.endm
+
+ invalid_vector el2_sync_invalid
+ invalid_vector el2_irq_invalid
+ invalid_vector el2_fiq_invalid
+ invalid_vector el2_error_invalid
+ invalid_vector el1_sync_invalid
+ invalid_vector el1_irq_invalid
+ invalid_vector el1_fiq_invalid
+ invalid_vector el1_error_invalid
+
+/* el2 vectors - switch el2 here while we restore the memory image. */
+ .align 11
+SYM_CODE_START(hibernate_el2_vectors)
+ ventry el2_sync_invalid // Synchronous EL2t
+ ventry el2_irq_invalid // IRQ EL2t
+ ventry el2_fiq_invalid // FIQ EL2t
+ ventry el2_error_invalid // Error EL2t
+
+ ventry el2_sync_invalid // Synchronous EL2h
+ ventry el2_irq_invalid // IRQ EL2h
+ ventry el2_fiq_invalid // FIQ EL2h
+ ventry el2_error_invalid // Error EL2h
+
+ ventry el1_sync // Synchronous 64-bit EL1
+ ventry el1_irq_invalid // IRQ 64-bit EL1
+ ventry el1_fiq_invalid // FIQ 64-bit EL1
+ ventry el1_error_invalid // Error 64-bit EL1
+
+ ventry el1_sync_invalid // Synchronous 32-bit EL1
+ ventry el1_irq_invalid // IRQ 32-bit EL1
+ ventry el1_fiq_invalid // FIQ 32-bit EL1
+ ventry el1_error_invalid // Error 32-bit EL1
+SYM_CODE_END(hibernate_el2_vectors)
+
+.popsection
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
new file mode 100644
index 000000000..42003774d
--- /dev/null
+++ b/arch/arm64/kernel/hibernate.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*:
+ * Hibernate support specific for ARM64
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ */
+#define pr_fmt(x) "hibernate: " x
+#include <linux/cpu.h>
+#include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/utsname.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/daifflags.h>
+#include <asm/irqflags.h>
+#include <asm/kexec.h>
+#include <asm/memory.h>
+#include <asm/mmu_context.h>
+#include <asm/mte.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/sections.h>
+#include <asm/smp.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/sysreg.h>
+#include <asm/virt.h>
+
+/*
+ * Hibernate core relies on this value being 0 on resume, and marks it
+ * __nosavedata assuming it will keep the resume kernel's '0' value. This
+ * doesn't happen with either KASLR.
+ *
+ * defined as "__visible int in_suspend __nosavedata" in
+ * kernel/power/hibernate.c
+ */
+extern int in_suspend;
+
+/* Do we need to reset el2? */
+#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
+
+/* temporary el2 vectors in the __hibernate_exit_text section. */
+extern char hibernate_el2_vectors[];
+
+/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
+extern char __hyp_stub_vectors[];
+
+/*
+ * The logical cpu number we should resume on, initialised to a non-cpu
+ * number.
+ */
+static int sleep_cpu = -EINVAL;
+
+/*
+ * Values that may not change over hibernate/resume. We put the build number
+ * and date in here so that we guarantee not to resume with a different
+ * kernel.
+ */
+struct arch_hibernate_hdr_invariants {
+ char uts_version[__NEW_UTS_LEN + 1];
+};
+
+/* These values need to be know across a hibernate/restore. */
+static struct arch_hibernate_hdr {
+ struct arch_hibernate_hdr_invariants invariants;
+
+ /* These are needed to find the relocated kernel if built with kaslr */
+ phys_addr_t ttbr1_el1;
+ void (*reenter_kernel)(void);
+
+ /*
+ * We need to know where the __hyp_stub_vectors are after restore to
+ * re-configure el2.
+ */
+ phys_addr_t __hyp_stub_vectors;
+
+ u64 sleep_cpu_mpidr;
+} resume_hdr;
+
+static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
+{
+ memset(i, 0, sizeof(*i));
+ memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
+}
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
+ unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
+
+ return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
+ crash_is_nosave(pfn);
+}
+
+void notrace save_processor_state(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+}
+
+void notrace restore_processor_state(void)
+{
+}
+
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+ struct arch_hibernate_hdr *hdr = addr;
+
+ if (max_size < sizeof(*hdr))
+ return -EOVERFLOW;
+
+ arch_hdr_invariants(&hdr->invariants);
+ hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
+ hdr->reenter_kernel = _cpu_resume;
+
+ /* We can't use __hyp_get_vectors() because kvm may still be loaded */
+ if (el2_reset_needed())
+ hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
+ else
+ hdr->__hyp_stub_vectors = 0;
+
+ /* Save the mpidr of the cpu we called cpu_suspend() on... */
+ if (sleep_cpu < 0) {
+ pr_err("Failing to hibernate on an unknown CPU.\n");
+ return -ENODEV;
+ }
+ hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
+ pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
+ hdr->sleep_cpu_mpidr);
+
+ return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_save);
+
+int arch_hibernation_header_restore(void *addr)
+{
+ int ret;
+ struct arch_hibernate_hdr_invariants invariants;
+ struct arch_hibernate_hdr *hdr = addr;
+
+ arch_hdr_invariants(&invariants);
+ if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
+ pr_crit("Hibernate image not generated by this kernel!\n");
+ return -EINVAL;
+ }
+
+ sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
+ pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
+ hdr->sleep_cpu_mpidr);
+ if (sleep_cpu < 0) {
+ pr_crit("Hibernated on a CPU not known to this kernel!\n");
+ sleep_cpu = -EINVAL;
+ return -EINVAL;
+ }
+
+ ret = bringup_hibernate_cpu(sleep_cpu);
+ if (ret) {
+ sleep_cpu = -EINVAL;
+ return ret;
+ }
+
+ resume_hdr = *hdr;
+
+ return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_restore);
+
+static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
+ unsigned long dst_addr,
+ pgprot_t pgprot)
+{
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
+ if (pgd_none(READ_ONCE(*pgdp))) {
+ pudp = (void *)get_safe_page(GFP_ATOMIC);
+ if (!pudp)
+ return -ENOMEM;
+ pgd_populate(&init_mm, pgdp, pudp);
+ }
+
+ p4dp = p4d_offset(pgdp, dst_addr);
+ if (p4d_none(READ_ONCE(*p4dp))) {
+ pudp = (void *)get_safe_page(GFP_ATOMIC);
+ if (!pudp)
+ return -ENOMEM;
+ p4d_populate(&init_mm, p4dp, pudp);
+ }
+
+ pudp = pud_offset(p4dp, dst_addr);
+ if (pud_none(READ_ONCE(*pudp))) {
+ pmdp = (void *)get_safe_page(GFP_ATOMIC);
+ if (!pmdp)
+ return -ENOMEM;
+ pud_populate(&init_mm, pudp, pmdp);
+ }
+
+ pmdp = pmd_offset(pudp, dst_addr);
+ if (pmd_none(READ_ONCE(*pmdp))) {
+ ptep = (void *)get_safe_page(GFP_ATOMIC);
+ if (!ptep)
+ return -ENOMEM;
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+ }
+
+ ptep = pte_offset_kernel(pmdp, dst_addr);
+ set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
+
+ return 0;
+}
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintenance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+ unsigned long dst_addr,
+ phys_addr_t *phys_dst_addr)
+{
+ void *page = (void *)get_safe_page(GFP_ATOMIC);
+ pgd_t *trans_pgd;
+ int rc;
+
+ if (!page)
+ return -ENOMEM;
+
+ memcpy(page, src_start, length);
+ __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+
+ trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
+ if (!trans_pgd)
+ return -ENOMEM;
+
+ rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
+ PAGE_KERNEL_EXEC);
+ if (rc)
+ return rc;
+
+ /*
+ * Load our new page tables. A strict BBM approach requires that we
+ * ensure that TLBs are free of any entries that may overlap with the
+ * global mappings we are about to install.
+ *
+ * For a real hibernate/resume cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+ * runtime services), while for a userspace-driven test_resume cycle it
+ * points to userspace page tables (and we must point it at a zero page
+ * ourselves). Elsewhere we only (un)install the idmap with preemption
+ * disabled, so T0SZ should be as required regardless.
+ */
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
+ isb();
+
+ *phys_dst_addr = virt_to_phys(page);
+
+ return 0;
+}
+
+#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
+
+#ifdef CONFIG_ARM64_MTE
+
+static DEFINE_XARRAY(mte_pages);
+
+static int save_tags(struct page *page, unsigned long pfn)
+{
+ void *tag_storage, *ret;
+
+ tag_storage = mte_allocate_tag_storage();
+ if (!tag_storage)
+ return -ENOMEM;
+
+ mte_save_page_tags(page_address(page), tag_storage);
+
+ ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
+ if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+ mte_free_tag_storage(tag_storage);
+ return xa_err(ret);
+ } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
+ mte_free_tag_storage(ret);
+ }
+
+ return 0;
+}
+
+static void swsusp_mte_free_storage(void)
+{
+ XA_STATE(xa_state, &mte_pages, 0);
+ void *tags;
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, ULONG_MAX) {
+ mte_free_tag_storage(tags);
+ }
+ xa_unlock(&mte_pages);
+
+ xa_destroy(&mte_pages);
+}
+
+static int swsusp_mte_save_tags(void)
+{
+ struct zone *zone;
+ unsigned long pfn, max_zone_pfn;
+ int ret = 0;
+ int n = 0;
+
+ if (!system_supports_mte())
+ return 0;
+
+ for_each_populated_zone(zone) {
+ max_zone_pfn = zone_end_pfn(zone);
+ for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
+ struct page *page = pfn_to_online_page(pfn);
+
+ if (!page)
+ continue;
+
+ if (!test_bit(PG_mte_tagged, &page->flags))
+ continue;
+
+ ret = save_tags(page, pfn);
+ if (ret) {
+ swsusp_mte_free_storage();
+ goto out;
+ }
+
+ n++;
+ }
+ }
+ pr_info("Saved %d MTE pages\n", n);
+
+out:
+ return ret;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+ XA_STATE(xa_state, &mte_pages, 0);
+ int n = 0;
+ void *tags;
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, ULONG_MAX) {
+ unsigned long pfn = xa_state.xa_index;
+ struct page *page = pfn_to_online_page(pfn);
+
+ mte_restore_page_tags(page_address(page), tags);
+
+ mte_free_tag_storage(tags);
+ n++;
+ }
+ xa_unlock(&mte_pages);
+
+ pr_info("Restored %d MTE pages\n", n);
+
+ xa_destroy(&mte_pages);
+}
+
+#else /* CONFIG_ARM64_MTE */
+
+static int swsusp_mte_save_tags(void)
+{
+ return 0;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
+int swsusp_arch_suspend(void)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct sleep_stack_data state;
+
+ if (cpus_are_stuck_in_kernel()) {
+ pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
+ return -EBUSY;
+ }
+
+ flags = local_daif_save();
+
+ if (__cpu_suspend_enter(&state)) {
+ /* make the crash dump kernel image visible/saveable */
+ crash_prepare_suspend();
+
+ ret = swsusp_mte_save_tags();
+ if (ret)
+ return ret;
+
+ sleep_cpu = smp_processor_id();
+ ret = swsusp_save();
+ } else {
+ /* Clean kernel core startup/idle code to PoC*/
+ dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
+ dcache_clean_range(__idmap_text_start, __idmap_text_end);
+
+ /* Clean kvm setup code to PoC? */
+ if (el2_reset_needed()) {
+ dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+ dcache_clean_range(__hyp_text_start, __hyp_text_end);
+ }
+
+ swsusp_mte_restore_tags();
+
+ /* make the crash dump kernel image protected again */
+ crash_post_resume();
+
+ /*
+ * Tell the hibernation core that we've just restored
+ * the memory
+ */
+ in_suspend = 0;
+
+ sleep_cpu = -EINVAL;
+ __cpu_suspend_exit();
+
+ /*
+ * Just in case the boot kernel did turn the SSBD
+ * mitigation off behind our back, let's set the state
+ * to what we expect it to be.
+ */
+ spectre_v4_enable_mitigation(NULL);
+ }
+
+ local_daif_restore(flags);
+
+ return ret;
+}
+
+static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
+{
+ pte_t pte = READ_ONCE(*src_ptep);
+
+ if (pte_valid(pte)) {
+ /*
+ * Resume will overwrite areas that may be marked
+ * read only (code, rodata). Clear the RDONLY bit from
+ * the temporary mappings we use during restore.
+ */
+ set_pte(dst_ptep, pte_mkwrite(pte));
+ } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
+ /*
+ * debug_pagealloc will removed the PTE_VALID bit if
+ * the page isn't in use by the resume kernel. It may have
+ * been in use by the original kernel, in which case we need
+ * to put it back in our copy to do the restore.
+ *
+ * Before marking this entry valid, check the pfn should
+ * be mapped.
+ */
+ BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
+ }
+}
+
+static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
+ unsigned long end)
+{
+ pte_t *src_ptep;
+ pte_t *dst_ptep;
+ unsigned long addr = start;
+
+ dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_ptep)
+ return -ENOMEM;
+ pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
+ dst_ptep = pte_offset_kernel(dst_pmdp, start);
+
+ src_ptep = pte_offset_kernel(src_pmdp, start);
+ do {
+ _copy_pte(dst_ptep, src_ptep, addr);
+ } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
+
+ return 0;
+}
+
+static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
+ unsigned long end)
+{
+ pmd_t *src_pmdp;
+ pmd_t *dst_pmdp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (pud_none(READ_ONCE(*dst_pudp))) {
+ dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_pmdp)
+ return -ENOMEM;
+ pud_populate(&init_mm, dst_pudp, dst_pmdp);
+ }
+ dst_pmdp = pmd_offset(dst_pudp, start);
+
+ src_pmdp = pmd_offset(src_pudp, start);
+ do {
+ pmd_t pmd = READ_ONCE(*src_pmdp);
+
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(pmd))
+ continue;
+ if (pmd_table(pmd)) {
+ if (copy_pte(dst_pmdp, src_pmdp, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pmd(dst_pmdp,
+ __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
+ }
+ } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
+ unsigned long end)
+{
+ pud_t *dst_pudp;
+ pud_t *src_pudp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (p4d_none(READ_ONCE(*dst_p4dp))) {
+ dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_pudp)
+ return -ENOMEM;
+ p4d_populate(&init_mm, dst_p4dp, dst_pudp);
+ }
+ dst_pudp = pud_offset(dst_p4dp, start);
+
+ src_pudp = pud_offset(src_p4dp, start);
+ do {
+ pud_t pud = READ_ONCE(*src_pudp);
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ continue;
+ if (pud_table(pud)) {
+ if (copy_pmd(dst_pudp, src_pudp, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pud(dst_pudp,
+ __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
+ }
+ } while (dst_pudp++, src_pudp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
+ unsigned long end)
+{
+ p4d_t *dst_p4dp;
+ p4d_t *src_p4dp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ dst_p4dp = p4d_offset(dst_pgdp, start);
+ src_p4dp = p4d_offset(src_pgdp, start);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none(READ_ONCE(*src_p4dp)))
+ continue;
+ if (copy_pud(dst_p4dp, src_p4dp, addr, next))
+ return -ENOMEM;
+ } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
+ unsigned long end)
+{
+ unsigned long next;
+ unsigned long addr = start;
+ pgd_t *src_pgdp = pgd_offset_k(start);
+
+ dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(READ_ONCE(*src_pgdp)))
+ continue;
+ if (copy_p4d(dst_pgdp, src_pgdp, addr, next))
+ return -ENOMEM;
+ } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
+ unsigned long end)
+{
+ int rc;
+ pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
+
+ if (!trans_pgd) {
+ pr_err("Failed to allocate memory for temporary page tables.\n");
+ return -ENOMEM;
+ }
+
+ rc = copy_page_tables(trans_pgd, start, end);
+ if (!rc)
+ *dst_pgdp = trans_pgd;
+
+ return rc;
+}
+
+/*
+ * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
+ *
+ * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
+ * we don't need to free it here.
+ */
+int swsusp_arch_resume(void)
+{
+ int rc;
+ void *zero_page;
+ size_t exit_size;
+ pgd_t *tmp_pg_dir;
+ phys_addr_t phys_hibernate_exit;
+ void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
+ void *, phys_addr_t, phys_addr_t);
+
+ /*
+ * Restoring the memory image will overwrite the ttbr1 page tables.
+ * Create a second copy of just the linear map, and use this when
+ * restoring.
+ */
+ rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
+ if (rc)
+ return rc;
+
+ /*
+ * We need a zero page that is zero before & after resume in order to
+ * to break before make on the ttbr1 page tables.
+ */
+ zero_page = (void *)get_safe_page(GFP_ATOMIC);
+ if (!zero_page) {
+ pr_err("Failed to allocate zero page.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Locate the exit code in the bottom-but-one page, so that *NULL
+ * still has disastrous affects.
+ */
+ hibernate_exit = (void *)PAGE_SIZE;
+ exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
+ /*
+ * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
+ * a new set of ttbr0 page tables and load them.
+ */
+ rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
+ (unsigned long)hibernate_exit,
+ &phys_hibernate_exit);
+ if (rc) {
+ pr_err("Failed to create safe executable page for hibernate_exit code.\n");
+ return rc;
+ }
+
+ /*
+ * The hibernate exit text contains a set of el2 vectors, that will
+ * be executed at el2 with the mmu off in order to reload hyp-stub.
+ */
+ __flush_dcache_area(hibernate_exit, exit_size);
+
+ /*
+ * KASLR will cause the el2 vectors to be in a different location in
+ * the resumed kernel. Load hibernate's temporary copy into el2.
+ *
+ * We can skip this step if we booted at EL1, or are running with VHE.
+ */
+ if (el2_reset_needed()) {
+ phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
+ el2_vectors += hibernate_el2_vectors -
+ __hibernate_exit_text_start; /* offset */
+
+ __hyp_set_vectors(el2_vectors);
+ }
+
+ hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
+ resume_hdr.reenter_kernel, restore_pblist,
+ resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
+
+ return 0;
+}
+
+int hibernate_resume_nonboot_cpu_disable(void)
+{
+ if (sleep_cpu < 0) {
+ pr_err("Failing to resume from hibernate on an unknown CPU.\n");
+ return -ENODEV;
+ }
+
+ return freeze_secondary_cpus(sleep_cpu);
+}
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
new file mode 100644
index 000000000..e5a0c38f1
--- /dev/null
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -0,0 +1,1031 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
+ * using the CPU's debug registers.
+ *
+ * Copyright (C) 2012 ARM Limited
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "hw-breakpoint: " fmt
+
+#include <linux/compat.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/kprobes.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/uaccess.h>
+
+#include <asm/current.h>
+#include <asm/debug-monitors.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/traps.h>
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+
+/* Breakpoint currently in use for each BRP. */
+static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
+
+/* Watchpoint currently in use for each WRP. */
+static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
+
+/* Currently stepping a per-CPU kernel breakpoint. */
+static DEFINE_PER_CPU(int, stepping_kernel_bp);
+
+/* Number of BRP/WRP registers on this CPU. */
+static int core_num_brps;
+static int core_num_wrps;
+
+int hw_breakpoint_slots(int type)
+{
+ /*
+ * We can be called early, so don't rely on
+ * our static variables being initialised.
+ */
+ switch (type) {
+ case TYPE_INST:
+ return get_num_brps();
+ case TYPE_DATA:
+ return get_num_wrps();
+ default:
+ pr_warn("unknown slot type: %d\n", type);
+ return 0;
+ }
+}
+
+#define READ_WB_REG_CASE(OFF, N, REG, VAL) \
+ case (OFF + N): \
+ AARCH64_DBG_READ(N, REG, VAL); \
+ break
+
+#define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
+ case (OFF + N): \
+ AARCH64_DBG_WRITE(N, REG, VAL); \
+ break
+
+#define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
+ READ_WB_REG_CASE(OFF, 0, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 1, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 2, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 3, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 4, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 5, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 6, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 7, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 8, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 9, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 10, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 11, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 12, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 13, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 14, REG, VAL); \
+ READ_WB_REG_CASE(OFF, 15, REG, VAL)
+
+#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
+ WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
+ WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
+
+static u64 read_wb_reg(int reg, int n)
+{
+ u64 val = 0;
+
+ switch (reg + n) {
+ GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
+ GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
+ GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
+ GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
+ default:
+ pr_warn("attempt to read from unknown breakpoint register %d\n", n);
+ }
+
+ return val;
+}
+NOKPROBE_SYMBOL(read_wb_reg);
+
+static void write_wb_reg(int reg, int n, u64 val)
+{
+ switch (reg + n) {
+ GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
+ GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
+ GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
+ GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
+ default:
+ pr_warn("attempt to write to unknown breakpoint register %d\n", n);
+ }
+ isb();
+}
+NOKPROBE_SYMBOL(write_wb_reg);
+
+/*
+ * Convert a breakpoint privilege level to the corresponding exception
+ * level.
+ */
+static enum dbg_active_el debug_exception_level(int privilege)
+{
+ switch (privilege) {
+ case AARCH64_BREAKPOINT_EL0:
+ return DBG_ACTIVE_EL0;
+ case AARCH64_BREAKPOINT_EL1:
+ return DBG_ACTIVE_EL1;
+ default:
+ pr_warn("invalid breakpoint privilege level %d\n", privilege);
+ return -EINVAL;
+ }
+}
+NOKPROBE_SYMBOL(debug_exception_level);
+
+enum hw_breakpoint_ops {
+ HW_BREAKPOINT_INSTALL,
+ HW_BREAKPOINT_UNINSTALL,
+ HW_BREAKPOINT_RESTORE
+};
+
+static int is_compat_bp(struct perf_event *bp)
+{
+ struct task_struct *tsk = bp->hw.target;
+
+ /*
+ * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
+ * In this case, use the native interface, since we don't have
+ * the notion of a "compat CPU" and could end up relying on
+ * deprecated behaviour if we use unaligned watchpoints in
+ * AArch64 state.
+ */
+ return tsk && is_compat_thread(task_thread_info(tsk));
+}
+
+/**
+ * hw_breakpoint_slot_setup - Find and setup a perf slot according to
+ * operations
+ *
+ * @slots: pointer to array of slots
+ * @max_slots: max number of slots
+ * @bp: perf_event to setup
+ * @ops: operation to be carried out on the slot
+ *
+ * Return:
+ * slot index on success
+ * -ENOSPC if no slot is available/matches
+ * -EINVAL on wrong operations parameter
+ */
+static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
+ struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ int i;
+ struct perf_event **slot;
+
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ if (!*slot) {
+ *slot = bp;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ if (*slot == bp) {
+ *slot = NULL;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_RESTORE:
+ if (*slot == bp)
+ return i;
+ break;
+ default:
+ pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
+ return -EINVAL;
+ }
+ }
+ return -ENOSPC;
+}
+
+static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ struct perf_event **slots;
+ struct debug_info *debug_info = &current->thread.debug;
+ int i, max_slots, ctrl_reg, val_reg, reg_enable;
+ enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
+ u32 ctrl;
+
+ if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+ /* Breakpoint */
+ ctrl_reg = AARCH64_DBG_REG_BCR;
+ val_reg = AARCH64_DBG_REG_BVR;
+ slots = this_cpu_ptr(bp_on_reg);
+ max_slots = core_num_brps;
+ reg_enable = !debug_info->bps_disabled;
+ } else {
+ /* Watchpoint */
+ ctrl_reg = AARCH64_DBG_REG_WCR;
+ val_reg = AARCH64_DBG_REG_WVR;
+ slots = this_cpu_ptr(wp_on_reg);
+ max_slots = core_num_wrps;
+ reg_enable = !debug_info->wps_disabled;
+ }
+
+ i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
+
+ if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
+ return i;
+
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /*
+ * Ensure debug monitors are enabled at the correct exception
+ * level.
+ */
+ enable_debug_monitors(dbg_el);
+ fallthrough;
+ case HW_BREAKPOINT_RESTORE:
+ /* Setup the address register. */
+ write_wb_reg(val_reg, i, info->address);
+
+ /* Setup the control register. */
+ ctrl = encode_ctrl_reg(info->ctrl);
+ write_wb_reg(ctrl_reg, i,
+ reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the control register. */
+ write_wb_reg(ctrl_reg, i, 0);
+
+ /*
+ * Release the debug monitors for the correct exception
+ * level.
+ */
+ disable_debug_monitors(dbg_el);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
+}
+
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
+}
+
+static int get_hbp_len(u8 hbp_len)
+{
+ unsigned int len_in_bytes = 0;
+
+ switch (hbp_len) {
+ case ARM_BREAKPOINT_LEN_1:
+ len_in_bytes = 1;
+ break;
+ case ARM_BREAKPOINT_LEN_2:
+ len_in_bytes = 2;
+ break;
+ case ARM_BREAKPOINT_LEN_3:
+ len_in_bytes = 3;
+ break;
+ case ARM_BREAKPOINT_LEN_4:
+ len_in_bytes = 4;
+ break;
+ case ARM_BREAKPOINT_LEN_5:
+ len_in_bytes = 5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ len_in_bytes = 6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ len_in_bytes = 7;
+ break;
+ case ARM_BREAKPOINT_LEN_8:
+ len_in_bytes = 8;
+ break;
+ }
+
+ return len_in_bytes;
+}
+
+/*
+ * Check whether bp virtual address is in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+{
+ unsigned int len;
+ unsigned long va;
+
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
+ * Hopefully this will disappear when ptrace can bypass the conversion
+ * to generic breakpoint descriptions.
+ */
+int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+ int *gen_len, int *gen_type, int *offset)
+{
+ /* Type */
+ switch (ctrl.type) {
+ case ARM_BREAKPOINT_EXECUTE:
+ *gen_type = HW_BREAKPOINT_X;
+ break;
+ case ARM_BREAKPOINT_LOAD:
+ *gen_type = HW_BREAKPOINT_R;
+ break;
+ case ARM_BREAKPOINT_STORE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
+ *gen_type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ctrl.len)
+ return -EINVAL;
+ *offset = __ffs(ctrl.len);
+
+ /* Len */
+ switch (ctrl.len >> *offset) {
+ case ARM_BREAKPOINT_LEN_1:
+ *gen_len = HW_BREAKPOINT_LEN_1;
+ break;
+ case ARM_BREAKPOINT_LEN_2:
+ *gen_len = HW_BREAKPOINT_LEN_2;
+ break;
+ case ARM_BREAKPOINT_LEN_3:
+ *gen_len = HW_BREAKPOINT_LEN_3;
+ break;
+ case ARM_BREAKPOINT_LEN_4:
+ *gen_len = HW_BREAKPOINT_LEN_4;
+ break;
+ case ARM_BREAKPOINT_LEN_5:
+ *gen_len = HW_BREAKPOINT_LEN_5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ *gen_len = HW_BREAKPOINT_LEN_6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ *gen_len = HW_BREAKPOINT_LEN_7;
+ break;
+ case ARM_BREAKPOINT_LEN_8:
+ *gen_len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Construct an arch_hw_breakpoint from a perf_event.
+ */
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ /* Type */
+ switch (attr->bp_type) {
+ case HW_BREAKPOINT_X:
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ break;
+ case HW_BREAKPOINT_R:
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
+ break;
+ case HW_BREAKPOINT_W:
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
+ break;
+ case HW_BREAKPOINT_RW:
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Len */
+ switch (attr->bp_len) {
+ case HW_BREAKPOINT_LEN_1:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ break;
+ case HW_BREAKPOINT_LEN_2:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ break;
+ case HW_BREAKPOINT_LEN_3:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
+ break;
+ case HW_BREAKPOINT_LEN_4:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ break;
+ case HW_BREAKPOINT_LEN_5:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
+ break;
+ case HW_BREAKPOINT_LEN_6:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
+ break;
+ case HW_BREAKPOINT_LEN_7:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
+ break;
+ case HW_BREAKPOINT_LEN_8:
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * On AArch64, we only permit breakpoints of length 4, whereas
+ * AArch32 also requires breakpoints of length 2 for Thumb.
+ * Watchpoints can be of length 1, 2, 4 or 8 bytes.
+ */
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+ if (is_compat_bp(bp)) {
+ if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ return -EINVAL;
+ } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
+ /*
+ * FIXME: Some tools (I'm looking at you perf) assume
+ * that breakpoints should be sizeof(long). This
+ * is nonsense. For now, we fix up the parameter
+ * but we should probably return -EINVAL instead.
+ */
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ }
+ }
+
+ /* Address */
+ hw->address = attr->bp_addr;
+
+ /*
+ * Privilege
+ * Note that we disallow combined EL0/EL1 breakpoints because
+ * that would complicate the stepping code.
+ */
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
+ else
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
+
+ /* Enabled? */
+ hw->ctrl.enabled = !attr->disabled;
+
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings.
+ */
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ int ret;
+ u64 alignment_mask, offset;
+
+ /* Build the arch_hw_breakpoint. */
+ ret = arch_build_bp_info(bp, attr, hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Check address alignment.
+ * We don't do any clever alignment correction for watchpoints
+ * because using 64-bit unaligned addresses is deprecated for
+ * AArch64.
+ *
+ * AArch32 tasks expect some simple alignment fixups, so emulate
+ * that here.
+ */
+ if (is_compat_bp(bp)) {
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ alignment_mask = 0x7;
+ else
+ alignment_mask = 0x3;
+ offset = hw->address & alignment_mask;
+ switch (offset) {
+ case 0:
+ /* Aligned */
+ break;
+ case 1:
+ case 2:
+ /* Allow halfword watchpoints and breakpoints. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ break;
+
+ fallthrough;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
+
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
+ alignment_mask = 0x3;
+ else
+ alignment_mask = 0x7;
+ offset = hw->address & alignment_mask;
+ }
+
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
+
+ /*
+ * Disallow per-task kernel breakpoints since these would
+ * complicate the stepping code.
+ */
+ if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Enable/disable all of the breakpoints active at the specified
+ * exception level at the register level.
+ * This is used when single-stepping after a breakpoint exception.
+ */
+static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
+{
+ int i, max_slots, privilege;
+ u32 ctrl;
+ struct perf_event **slots;
+
+ switch (reg) {
+ case AARCH64_DBG_REG_BCR:
+ slots = this_cpu_ptr(bp_on_reg);
+ max_slots = core_num_brps;
+ break;
+ case AARCH64_DBG_REG_WCR:
+ slots = this_cpu_ptr(wp_on_reg);
+ max_slots = core_num_wrps;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < max_slots; ++i) {
+ if (!slots[i])
+ continue;
+
+ privilege = counter_arch_bp(slots[i])->ctrl.privilege;
+ if (debug_exception_level(privilege) != el)
+ continue;
+
+ ctrl = read_wb_reg(reg, i);
+ if (enable)
+ ctrl |= 0x1;
+ else
+ ctrl &= ~0x1;
+ write_wb_reg(reg, i, ctrl);
+ }
+}
+NOKPROBE_SYMBOL(toggle_bp_registers);
+
+/*
+ * Debug exception handlers.
+ */
+static int breakpoint_handler(unsigned long unused, unsigned int esr,
+ struct pt_regs *regs)
+{
+ int i, step = 0, *kernel_step;
+ u32 ctrl_reg;
+ u64 addr, val;
+ struct perf_event *bp, **slots;
+ struct debug_info *debug_info;
+ struct arch_hw_breakpoint_ctrl ctrl;
+
+ slots = this_cpu_ptr(bp_on_reg);
+ addr = instruction_pointer(regs);
+ debug_info = &current->thread.debug;
+
+ for (i = 0; i < core_num_brps; ++i) {
+ rcu_read_lock();
+
+ bp = slots[i];
+
+ if (bp == NULL)
+ goto unlock;
+
+ /* Check if the breakpoint value matches. */
+ val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
+ if (val != (addr & ~0x3))
+ goto unlock;
+
+ /* Possible match, check the byte address select to confirm. */
+ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ if (!((1 << (addr & 0x3)) & ctrl.len))
+ goto unlock;
+
+ counter_arch_bp(bp)->trigger = addr;
+ perf_bp_event(bp, regs);
+
+ /* Do we need to handle the stepping? */
+ if (uses_default_overflow_handler(bp))
+ step = 1;
+unlock:
+ rcu_read_unlock();
+ }
+
+ if (!step)
+ return 0;
+
+ if (user_mode(regs)) {
+ debug_info->bps_disabled = 1;
+ toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
+
+ /* If we're already stepping a watchpoint, just return. */
+ if (debug_info->wps_disabled)
+ return 0;
+
+ if (test_thread_flag(TIF_SINGLESTEP))
+ debug_info->suspended_step = 1;
+ else
+ user_enable_single_step(current);
+ } else {
+ toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+
+ if (*kernel_step != ARM_KERNEL_STEP_NONE)
+ return 0;
+
+ if (kernel_active_single_step()) {
+ *kernel_step = ARM_KERNEL_STEP_SUSPEND;
+ } else {
+ *kernel_step = ARM_KERNEL_STEP_ACTIVE;
+ kernel_enable_single_step(regs);
+ }
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(breakpoint_handler);
+
+/*
+ * Arm64 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
+ * exception" of ARMv8 Architecture Reference Manual for details.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ u64 wp_low, wp_high;
+ u32 lens, lene;
+
+ addr = untagged_addr(addr);
+
+ lens = __ffs(ctrl->len);
+ lene = __fls(ctrl->len);
+
+ wp_low = val + lens;
+ wp_high = val + lene;
+ if (addr < wp_low)
+ return wp_low - addr;
+ else if (addr > wp_high)
+ return addr - wp_high;
+ else
+ return 0;
+}
+
+static int watchpoint_report(struct perf_event *wp, unsigned long addr,
+ struct pt_regs *regs)
+{
+ int step = uses_default_overflow_handler(wp);
+ struct arch_hw_breakpoint *info = counter_arch_bp(wp);
+
+ info->trigger = addr;
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine, then
+ * handle the stepping ourselves since userspace really can't help
+ * us with this.
+ */
+ if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
+ step = 1;
+ else
+ perf_bp_event(wp, regs);
+
+ return step;
+}
+
+static int watchpoint_handler(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ int i, step = 0, *kernel_step, access, closest_match = 0;
+ u64 min_dist = -1, dist;
+ u32 ctrl_reg;
+ u64 val;
+ struct perf_event *wp, **slots;
+ struct debug_info *debug_info;
+ struct arch_hw_breakpoint_ctrl ctrl;
+
+ slots = this_cpu_ptr(wp_on_reg);
+ debug_info = &current->thread.debug;
+
+ /*
+ * Find all watchpoints that match the reported address. If no exact
+ * match is found. Attribute the hit to the closest watchpoint.
+ */
+ rcu_read_lock();
+ for (i = 0; i < core_num_wrps; ++i) {
+ wp = slots[i];
+ if (wp == NULL)
+ continue;
+
+ /*
+ * Check that the access type matches.
+ * 0 => load, otherwise => store
+ */
+ access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
+ HW_BREAKPOINT_R;
+ if (!(access & hw_breakpoint_type(wp)))
+ continue;
+
+ /* Check if the watchpoint value and byte select match. */
+ val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ dist = get_distance_from_watchpoint(addr, val, &ctrl);
+ if (dist < min_dist) {
+ min_dist = dist;
+ closest_match = i;
+ }
+ /* Is this an exact match? */
+ if (dist != 0)
+ continue;
+
+ step = watchpoint_report(wp, addr, regs);
+ }
+
+ /* No exact match found? */
+ if (min_dist > 0 && min_dist != -1)
+ step = watchpoint_report(slots[closest_match], addr, regs);
+
+ rcu_read_unlock();
+
+ if (!step)
+ return 0;
+
+ /*
+ * We always disable EL0 watchpoints because the kernel can
+ * cause these to fire via an unprivileged access.
+ */
+ toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
+
+ if (user_mode(regs)) {
+ debug_info->wps_disabled = 1;
+
+ /* If we're already stepping a breakpoint, just return. */
+ if (debug_info->bps_disabled)
+ return 0;
+
+ if (test_thread_flag(TIF_SINGLESTEP))
+ debug_info->suspended_step = 1;
+ else
+ user_enable_single_step(current);
+ } else {
+ toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+
+ if (*kernel_step != ARM_KERNEL_STEP_NONE)
+ return 0;
+
+ if (kernel_active_single_step()) {
+ *kernel_step = ARM_KERNEL_STEP_SUSPEND;
+ } else {
+ *kernel_step = ARM_KERNEL_STEP_ACTIVE;
+ kernel_enable_single_step(regs);
+ }
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(watchpoint_handler);
+
+/*
+ * Handle single-step exception.
+ */
+int reinstall_suspended_bps(struct pt_regs *regs)
+{
+ struct debug_info *debug_info = &current->thread.debug;
+ int handled_exception = 0, *kernel_step;
+
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
+
+ /*
+ * Called from single-step exception handler.
+ * Return 0 if execution can resume, 1 if a SIGTRAP should be
+ * reported.
+ */
+ if (user_mode(regs)) {
+ if (debug_info->bps_disabled) {
+ debug_info->bps_disabled = 0;
+ toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
+ handled_exception = 1;
+ }
+
+ if (debug_info->wps_disabled) {
+ debug_info->wps_disabled = 0;
+ toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
+ handled_exception = 1;
+ }
+
+ if (handled_exception) {
+ if (debug_info->suspended_step) {
+ debug_info->suspended_step = 0;
+ /* Allow exception handling to fall-through. */
+ handled_exception = 0;
+ } else {
+ user_disable_single_step(current);
+ }
+ }
+ } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
+ toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
+ toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
+
+ if (!debug_info->wps_disabled)
+ toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
+
+ if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
+ kernel_disable_single_step();
+ handled_exception = 1;
+ } else {
+ handled_exception = 0;
+ }
+
+ *kernel_step = ARM_KERNEL_STEP_NONE;
+ }
+
+ return !handled_exception;
+}
+NOKPROBE_SYMBOL(reinstall_suspended_bps);
+
+/*
+ * Context-switcher for restoring suspended breakpoints.
+ */
+void hw_breakpoint_thread_switch(struct task_struct *next)
+{
+ /*
+ * current next
+ * disabled: 0 0 => The usual case, NOTIFY_DONE
+ * 0 1 => Disable the registers
+ * 1 0 => Enable the registers
+ * 1 1 => NOTIFY_DONE. per-task bps will
+ * get taken care of by perf.
+ */
+
+ struct debug_info *current_debug_info, *next_debug_info;
+
+ current_debug_info = &current->thread.debug;
+ next_debug_info = &next->thread.debug;
+
+ /* Update breakpoints. */
+ if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
+ toggle_bp_registers(AARCH64_DBG_REG_BCR,
+ DBG_ACTIVE_EL0,
+ !next_debug_info->bps_disabled);
+
+ /* Update watchpoints. */
+ if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
+ toggle_bp_registers(AARCH64_DBG_REG_WCR,
+ DBG_ACTIVE_EL0,
+ !next_debug_info->wps_disabled);
+}
+
+/*
+ * CPU initialisation.
+ */
+static int hw_breakpoint_reset(unsigned int cpu)
+{
+ int i;
+ struct perf_event **slots;
+ /*
+ * When a CPU goes through cold-boot, it does not have any installed
+ * slot, so it is safe to share the same function for restoring and
+ * resetting breakpoints; when a CPU is hotplugged in, it goes
+ * through the slots, which are all empty, hence it just resets control
+ * and value for debug registers.
+ * When this function is triggered on warm-boot through a CPU PM
+ * notifier some slots might be initialized; if so they are
+ * reprogrammed according to the debug slots content.
+ */
+ for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ }
+ }
+
+ for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CPU_PM
+extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
+#else
+static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
+{
+}
+#endif
+
+/*
+ * One-time initialisation.
+ */
+static int __init arch_hw_breakpoint_init(void)
+{
+ int ret;
+
+ core_num_brps = get_num_brps();
+ core_num_wrps = get_num_wrps();
+
+ pr_info("found %d breakpoint and %d watchpoint registers.\n",
+ core_num_brps, core_num_wrps);
+
+ /* Register debug fault handlers. */
+ hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
+ TRAP_HWBKPT, "hw-breakpoint handler");
+ hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
+ TRAP_HWBKPT, "hw-watchpoint handler");
+
+ /*
+ * Reset the breakpoint resources. We assume that a halting
+ * debugger will leave the world in a nice state for us.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
+ "perf/arm64/hw_breakpoint:starting",
+ hw_breakpoint_reset, NULL);
+ if (ret)
+ pr_err("failed to register CPU hotplug notifier: %d\n", ret);
+
+ /* Register cpu_suspend hw breakpoint restore hook */
+ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
+
+ return ret;
+}
+arch_initcall(arch_hw_breakpoint_init);
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+}
+
+/*
+ * Dummy function to register with die_notifier.
+ */
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
new file mode 100644
index 000000000..160f5881a
--- /dev/null
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Hypervisor stub
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/assembler.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/ptrace.h>
+#include <asm/virt.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+ .align 11
+
+SYM_CODE_START(__hyp_stub_vectors)
+ ventry el2_sync_invalid // Synchronous EL2t
+ ventry el2_irq_invalid // IRQ EL2t
+ ventry el2_fiq_invalid // FIQ EL2t
+ ventry el2_error_invalid // Error EL2t
+
+ ventry el2_sync_invalid // Synchronous EL2h
+ ventry el2_irq_invalid // IRQ EL2h
+ ventry el2_fiq_invalid // FIQ EL2h
+ ventry el2_error_invalid // Error EL2h
+
+ ventry el1_sync // Synchronous 64-bit EL1
+ ventry el1_irq_invalid // IRQ 64-bit EL1
+ ventry el1_fiq_invalid // FIQ 64-bit EL1
+ ventry el1_error_invalid // Error 64-bit EL1
+
+ ventry el1_sync_invalid // Synchronous 32-bit EL1
+ ventry el1_irq_invalid // IRQ 32-bit EL1
+ ventry el1_fiq_invalid // FIQ 32-bit EL1
+ ventry el1_error_invalid // Error 32-bit EL1
+SYM_CODE_END(__hyp_stub_vectors)
+
+ .align 11
+
+SYM_CODE_START_LOCAL(el1_sync)
+ cmp x0, #HVC_SET_VECTORS
+ b.ne 2f
+ msr vbar_el2, x1
+ b 9f
+
+2: cmp x0, #HVC_SOFT_RESTART
+ b.ne 3f
+ mov x0, x2
+ mov x2, x4
+ mov x4, x1
+ mov x1, x3
+ br x4 // no return
+
+3: cmp x0, #HVC_RESET_VECTORS
+ beq 9f // Nothing to reset!
+
+ /* Someone called kvm_call_hyp() against the hyp-stub... */
+ mov_q x0, HVC_STUB_ERR
+ eret
+
+9: mov x0, xzr
+ eret
+SYM_CODE_END(el1_sync)
+
+.macro invalid_vector label
+SYM_CODE_START_LOCAL(\label)
+ b \label
+SYM_CODE_END(\label)
+.endm
+
+ invalid_vector el2_sync_invalid
+ invalid_vector el2_irq_invalid
+ invalid_vector el2_fiq_invalid
+ invalid_vector el2_error_invalid
+ invalid_vector el1_sync_invalid
+ invalid_vector el1_irq_invalid
+ invalid_vector el1_fiq_invalid
+ invalid_vector el1_error_invalid
+
+/*
+ * __hyp_set_vectors: Call this after boot to set the initial hypervisor
+ * vectors as part of hypervisor installation. On an SMP system, this should
+ * be called on each CPU.
+ *
+ * x0 must be the physical address of the new vector table, and must be
+ * 2KB aligned.
+ *
+ * Before calling this, you must check that the stub hypervisor is installed
+ * everywhere, by waiting for any secondary CPUs to be brought up and then
+ * checking that is_hyp_mode_available() is true.
+ *
+ * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
+ * something else went wrong... in such cases, trying to install a new
+ * hypervisor is unlikely to work as desired.
+ *
+ * When you call into your shiny new hypervisor, sp_el2 will contain junk,
+ * so you will need to set that to something sensible at the new hypervisor's
+ * initialisation entry point.
+ */
+
+SYM_FUNC_START(__hyp_set_vectors)
+ mov x1, x0
+ mov x0, #HVC_SET_VECTORS
+ hvc #0
+ ret
+SYM_FUNC_END(__hyp_set_vectors)
+
+SYM_FUNC_START(__hyp_reset_vectors)
+ mov x0, #HVC_RESET_VECTORS
+ hvc #0
+ ret
+SYM_FUNC_END(__hyp_reset_vectors)
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
new file mode 100644
index 000000000..48e43b29a
--- /dev/null
+++ b/arch/arm64/kernel/image-vars.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linker script variables to be set after section resolution, as
+ * ld.lld does not like variables assigned before SECTIONS is processed.
+ */
+#ifndef __ARM64_KERNEL_IMAGE_VARS_H
+#define __ARM64_KERNEL_IMAGE_VARS_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+#ifdef CONFIG_EFI
+
+__efistub_kernel_size = _edata - _text;
+__efistub_primary_entry_offset = primary_entry - _text;
+
+
+/*
+ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+ * isolate it from the kernel proper. The following symbols are legally
+ * accessed by the stub, so provide some aliases to make them accessible.
+ * Only include data symbols here, or text symbols of functions that are
+ * guaranteed to be safe when executed at another offset than they were
+ * linked at. The routines below are all implemented in assembler in a
+ * position independent manner
+ */
+__efistub_memcmp = __pi_memcmp;
+__efistub_memchr = __pi_memchr;
+__efistub_memcpy = __pi_memcpy;
+__efistub_memmove = __pi_memmove;
+__efistub_memset = __pi_memset;
+__efistub_strlen = __pi_strlen;
+__efistub_strnlen = __pi_strnlen;
+__efistub_strcmp = __pi_strcmp;
+__efistub_strncmp = __pi_strncmp;
+__efistub_strrchr = __pi_strrchr;
+__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+
+#ifdef CONFIG_KASAN
+__efistub___memcpy = __pi_memcpy;
+__efistub___memmove = __pi_memmove;
+__efistub___memset = __pi_memset;
+#endif
+
+__efistub__text = _text;
+__efistub__end = _end;
+__efistub__edata = _edata;
+__efistub_screen_info = screen_info;
+__efistub__ctype = _ctype;
+
+#endif
+
+#ifdef CONFIG_KVM
+
+/*
+ * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, to
+ * separate it from the kernel proper. The following symbols are legally
+ * accessed by it, therefore provide aliases to make them linkable.
+ * Do not include symbols which may not be safely accessed under hypervisor
+ * memory mappings.
+ */
+
+/* Alternative callbacks for init-time patching of nVHE hyp code. */
+KVM_NVHE_ALIAS(kvm_patch_vector_branch);
+KVM_NVHE_ALIAS(kvm_update_va_mask);
+
+/* Global kernel state accessed by nVHE hyp code. */
+KVM_NVHE_ALIAS(kvm_vgic_global_state);
+
+/* Kernel constant needed to compute idmap addresses. */
+KVM_NVHE_ALIAS(kimage_voffset);
+
+/* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */
+KVM_NVHE_ALIAS(__hyp_panic_string);
+KVM_NVHE_ALIAS(panic);
+
+/* Vectors installed by hyp-init on reset HVC. */
+KVM_NVHE_ALIAS(__hyp_stub_vectors);
+
+/* IDMAP TCR_EL1.T0SZ as computed by the EL1 init code */
+KVM_NVHE_ALIAS(idmap_t0sz);
+
+/* Kernel symbol used by icache_is_vpipt(). */
+KVM_NVHE_ALIAS(__icache_flags);
+
+/* Kernel symbols needed for cpus_have_final/const_caps checks. */
+KVM_NVHE_ALIAS(arm64_const_caps_ready);
+KVM_NVHE_ALIAS(cpu_hwcap_keys);
+
+/* Static keys which are set if a vGIC trap should be handled in hyp. */
+KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
+KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
+
+/* Static key checked in pmr_sync(). */
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+KVM_NVHE_ALIAS(gic_pmr_sync);
+/* Static key checked in GIC_PRIO_IRQOFF. */
+KVM_NVHE_ALIAS(gic_nonsecure_priorities);
+#endif
+
+/* EL2 exception handling */
+KVM_NVHE_ALIAS(__start___kvm_ex_table);
+KVM_NVHE_ALIAS(__stop___kvm_ex_table);
+
+/* Position-independent library routines */
+KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page);
+KVM_NVHE_ALIAS_HYP(copy_page, __pi_copy_page);
+KVM_NVHE_ALIAS_HYP(memcpy, __pi_memcpy);
+KVM_NVHE_ALIAS_HYP(memset, __pi_memset);
+
+#ifdef CONFIG_KASAN
+KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy);
+KVM_NVHE_ALIAS_HYP(__memset, __pi_memset);
+#endif
+
+#endif /* CONFIG_KVM */
+
+#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
new file mode 100644
index 000000000..7bc3ba897
--- /dev/null
+++ b/arch/arm64/kernel/image.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linker script macros to generate Image header fields.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ */
+#ifndef __ARM64_KERNEL_IMAGE_H
+#define __ARM64_KERNEL_IMAGE_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+#include <asm/image.h>
+
+/*
+ * There aren't any ELF relocations we can use to endian-swap values known only
+ * at link time (e.g. the subtraction of two symbol addresses), so we must get
+ * the linker to endian-swap certain values before emitting them.
+ *
+ * Note that, in order for this to work when building the ELF64 PIE executable
+ * (for KASLR), these values should not be referenced via R_AARCH64_ABS64
+ * relocations, since these are fixed up at runtime rather than at build time
+ * when PIE is in effect. So we need to split them up in 32-bit high and low
+ * words.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define DATA_LE32(data) \
+ ((((data) & 0x000000ff) << 24) | \
+ (((data) & 0x0000ff00) << 8) | \
+ (((data) & 0x00ff0000) >> 8) | \
+ (((data) & 0xff000000) >> 24))
+#else
+#define DATA_LE32(data) ((data) & 0xffffffff)
+#endif
+
+#define DEFINE_IMAGE_LE64(sym, data) \
+ sym##_lo32 = DATA_LE32((data) & 0xffffffff); \
+ sym##_hi32 = DATA_LE32((data) >> 32)
+
+#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \
+ ARM64_IMAGE_FLAG_##field##_SHIFT)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE
+#else
+#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE
+#endif
+
+#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
+
+#define __HEAD_FLAG_PHYS_BASE 1
+
+#define __HEAD_FLAGS (__HEAD_FLAG(BE) | \
+ __HEAD_FLAG(PAGE_SIZE) | \
+ __HEAD_FLAG(PHYS_BASE))
+
+/*
+ * These will output as part of the Image header, which should be little-endian
+ * regardless of the endianness of the kernel. While constant values could be
+ * endian swapped in head.S, all are done here for consistency.
+ */
+#define HEAD_SYMBOLS \
+ DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \
+ DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
+
+#endif /* __ARM64_KERNEL_IMAGE_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
new file mode 100644
index 000000000..7d4fdf974
--- /dev/null
+++ b/arch/arm64/kernel/insn.c
@@ -0,0 +1,1699 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
+ */
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/stop_machine.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
+#include <asm/fixmap.h>
+#include <asm/insn.h>
+#include <asm/kprobes.h>
+#include <asm/sections.h>
+
+#define AARCH64_INSN_SF_BIT BIT(31)
+#define AARCH64_INSN_N_BIT BIT(22)
+#define AARCH64_INSN_LSL_12 BIT(22)
+
+static const int aarch64_insn_encoding_class[] = {
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+};
+
+enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
+{
+ return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
+}
+
+bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
+{
+ if (!aarch64_insn_is_hint(insn))
+ return false;
+
+ switch (insn & 0xFE0) {
+ case AARCH64_INSN_HINT_XPACLRI:
+ case AARCH64_INSN_HINT_PACIA_1716:
+ case AARCH64_INSN_HINT_PACIB_1716:
+ case AARCH64_INSN_HINT_PACIAZ:
+ case AARCH64_INSN_HINT_PACIASP:
+ case AARCH64_INSN_HINT_PACIBZ:
+ case AARCH64_INSN_HINT_PACIBSP:
+ case AARCH64_INSN_HINT_BTI:
+ case AARCH64_INSN_HINT_BTIC:
+ case AARCH64_INSN_HINT_BTIJ:
+ case AARCH64_INSN_HINT_BTIJC:
+ case AARCH64_INSN_HINT_NOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool aarch64_insn_is_branch_imm(u32 insn)
+{
+ return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn));
+}
+
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
+static bool is_exit_text(unsigned long addr)
+{
+ /* discarded with init text/data */
+ return system_state < SYSTEM_RUNNING &&
+ addr >= (unsigned long)__exittext_begin &&
+ addr < (unsigned long)__exittext_end;
+}
+
+static bool is_image_text(unsigned long addr)
+{
+ return core_kernel_text(addr) || is_exit_text(addr);
+}
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+ unsigned long uintaddr = (uintptr_t) addr;
+ bool image = is_image_text(uintaddr);
+ struct page *page;
+
+ if (image)
+ page = phys_to_page(__pa_symbol(addr));
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+ else
+ return addr;
+
+ BUG_ON(!page);
+ return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+ (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ __le32 val;
+
+ ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
+{
+ void *waddr = addr;
+ unsigned long flags = 0;
+ int ret;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
+
+ patch_unmap(FIX_TEXT_POKE0);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ return __aarch64_insn_write(addr, cpu_to_le32(insn));
+}
+
+bool __kprobes aarch64_insn_uses_literal(u32 insn)
+{
+ /* ldr/ldrsw (literal), prfm */
+
+ return aarch64_insn_is_ldr_lit(insn) ||
+ aarch64_insn_is_ldrsw_lit(insn) ||
+ aarch64_insn_is_adr_adrp(insn) ||
+ aarch64_insn_is_prfm_lit(insn);
+}
+
+bool __kprobes aarch64_insn_is_branch(u32 insn)
+{
+ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_ret(insn) ||
+ aarch64_insn_is_ret_auth(insn) ||
+ aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_br_auth(insn) ||
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_blr_auth(insn) ||
+ aarch64_insn_is_bcond(insn);
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ __flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The last CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /* Notify other processors with an additional increment. */
+ atomic_inc(&pp->cpu_count);
+ } else {
+ while (atomic_read(&pp->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
+
+static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
+ u32 *maskp, int *shiftp)
+{
+ u32 mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_26:
+ mask = BIT(26) - 1;
+ shift = 0;
+ break;
+ case AARCH64_INSN_IMM_19:
+ mask = BIT(19) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_16:
+ mask = BIT(16) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_14:
+ mask = BIT(14) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_12:
+ mask = BIT(12) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_9:
+ mask = BIT(9) - 1;
+ shift = 12;
+ break;
+ case AARCH64_INSN_IMM_7:
+ mask = BIT(7) - 1;
+ shift = 15;
+ break;
+ case AARCH64_INSN_IMM_6:
+ case AARCH64_INSN_IMM_S:
+ mask = BIT(6) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_R:
+ mask = BIT(6) - 1;
+ shift = 16;
+ break;
+ case AARCH64_INSN_IMM_N:
+ mask = 1;
+ shift = 22;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *maskp = mask;
+ *shiftp = shift;
+
+ return 0;
+}
+
+#define ADR_IMM_HILOSPLIT 2
+#define ADR_IMM_SIZE SZ_2M
+#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_LOSHIFT 29
+#define ADR_IMM_HISHIFT 5
+
+u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
+{
+ u32 immlo, immhi, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
+ immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
+ insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
+ mask = ADR_IMM_SIZE - 1;
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
+ pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+ }
+
+ return (insn >> shift) & mask;
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, mask;
+ int shift;
+
+ if (insn == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
+ imm >>= ADR_IMM_HILOSPLIT;
+ immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
+ imm = immlo | immhi;
+ mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
+ (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return AARCH64_BREAK_FAULT;
+ }
+ }
+
+ /* Update the immediate field. */
+ insn &= ~(mask << shift);
+ insn |= (imm & mask) << shift;
+
+ return insn;
+}
+
+u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
+ u32 insn)
+{
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_REGTYPE_RT:
+ case AARCH64_INSN_REGTYPE_RD:
+ shift = 0;
+ break;
+ case AARCH64_INSN_REGTYPE_RN:
+ shift = 5;
+ break;
+ case AARCH64_INSN_REGTYPE_RT2:
+ case AARCH64_INSN_REGTYPE_RA:
+ shift = 10;
+ break;
+ case AARCH64_INSN_REGTYPE_RM:
+ shift = 16;
+ break;
+ default:
+ pr_err("%s: unknown register type encoding %d\n", __func__,
+ type);
+ return 0;
+ }
+
+ return (insn >> shift) & GENMASK(4, 0);
+}
+
+static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
+ u32 insn,
+ enum aarch64_insn_register reg)
+{
+ int shift;
+
+ if (insn == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
+
+ if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
+ pr_err("%s: unknown register encoding %d\n", __func__, reg);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (type) {
+ case AARCH64_INSN_REGTYPE_RT:
+ case AARCH64_INSN_REGTYPE_RD:
+ shift = 0;
+ break;
+ case AARCH64_INSN_REGTYPE_RN:
+ shift = 5;
+ break;
+ case AARCH64_INSN_REGTYPE_RT2:
+ case AARCH64_INSN_REGTYPE_RA:
+ shift = 10;
+ break;
+ case AARCH64_INSN_REGTYPE_RM:
+ case AARCH64_INSN_REGTYPE_RS:
+ shift = 16;
+ break;
+ default:
+ pr_err("%s: unknown register type encoding %d\n", __func__,
+ type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn &= ~(GENMASK(4, 0) << shift);
+ insn |= reg << shift;
+
+ return insn;
+}
+
+static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
+ u32 insn)
+{
+ u32 size;
+
+ switch (type) {
+ case AARCH64_INSN_SIZE_8:
+ size = 0;
+ break;
+ case AARCH64_INSN_SIZE_16:
+ size = 1;
+ break;
+ case AARCH64_INSN_SIZE_32:
+ size = 2;
+ break;
+ case AARCH64_INSN_SIZE_64:
+ size = 3;
+ break;
+ default:
+ pr_err("%s: unknown size encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn &= ~GENMASK(31, 30);
+ insn |= size << 30;
+
+ return insn;
+}
+
+static inline long branch_imm_common(unsigned long pc, unsigned long addr,
+ long range)
+{
+ long offset;
+
+ if ((pc & 0x3) || (addr & 0x3)) {
+ pr_err("%s: A64 instructions must be word aligned\n", __func__);
+ return range;
+ }
+
+ offset = ((long)addr - (long)pc);
+
+ if (offset < -range || offset >= range) {
+ pr_err("%s: offset out of range\n", __func__);
+ return range;
+ }
+
+ return offset;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ /*
+ * B/BL support [-128M, 128M) offset
+ * ARM64 virtual address arrangement guarantees all kernel and module
+ * texts are within +/-128M.
+ */
+ offset = branch_imm_common(pc, addr, SZ_128M);
+ if (offset >= SZ_128M)
+ return AARCH64_BREAK_FAULT;
+
+ switch (type) {
+ case AARCH64_INSN_BRANCH_LINK:
+ insn = aarch64_insn_get_bl_value();
+ break;
+ case AARCH64_INSN_BRANCH_NOLINK:
+ insn = aarch64_insn_get_b_value();
+ break;
+ default:
+ pr_err("%s: unknown branch encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+}
+
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ offset = branch_imm_common(pc, addr, SZ_1M);
+ if (offset >= SZ_1M)
+ return AARCH64_BREAK_FAULT;
+
+ switch (type) {
+ case AARCH64_INSN_BRANCH_COMP_ZERO:
+ insn = aarch64_insn_get_cbz_value();
+ break;
+ case AARCH64_INSN_BRANCH_COMP_NONZERO:
+ insn = aarch64_insn_get_cbnz_value();
+ break;
+ default:
+ pr_err("%s: unknown branch encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+}
+
+u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_condition cond)
+{
+ u32 insn;
+ long offset;
+
+ offset = branch_imm_common(pc, addr, SZ_1M);
+
+ insn = aarch64_insn_get_bcond_value();
+
+ if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
+ pr_err("%s: unknown condition encoding %d\n", __func__, cond);
+ return AARCH64_BREAK_FAULT;
+ }
+ insn |= cond;
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+}
+
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
+{
+ return aarch64_insn_get_hint_value() | op;
+}
+
+u32 __kprobes aarch64_insn_gen_nop(void)
+{
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
+
+u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_BRANCH_NOLINK:
+ insn = aarch64_insn_get_br_value();
+ break;
+ case AARCH64_INSN_BRANCH_LINK:
+ insn = aarch64_insn_get_blr_value();
+ break;
+ case AARCH64_INSN_BRANCH_RETURN:
+ insn = aarch64_insn_get_ret_value();
+ break;
+ default:
+ pr_err("%s: unknown branch encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
+}
+
+u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register offset,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
+ insn = aarch64_insn_get_ldr_reg_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_REG_OFFSET:
+ insn = aarch64_insn_get_str_reg_value();
+ break;
+ default:
+ pr_err("%s: unknown load/store encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+ offset);
+}
+
+u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_register base,
+ int offset,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
+ insn = aarch64_insn_get_ldp_pre_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
+ insn = aarch64_insn_get_stp_pre_value();
+ break;
+ case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
+ insn = aarch64_insn_get_ldp_post_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
+ insn = aarch64_insn_get_stp_post_value();
+ break;
+ default:
+ pr_err("%s: unknown load/store encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
+ pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
+ __func__, offset);
+ return AARCH64_BREAK_FAULT;
+ }
+ shift = 2;
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
+ pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
+ __func__, offset);
+ return AARCH64_BREAK_FAULT;
+ }
+ shift = 3;
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ reg1);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
+ reg2);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
+ offset >> shift);
+}
+
+u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register state,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_EX:
+ insn = aarch64_insn_get_load_ex_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_EX:
+ insn = aarch64_insn_get_store_ex_value();
+ break;
+ default:
+ pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ reg);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
+ AARCH64_INSN_REG_ZR);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ state);
+}
+
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ u32 insn = aarch64_insn_get_ldadd_value();
+
+ switch (size) {
+ case AARCH64_INSN_SIZE_32:
+ case AARCH64_INSN_SIZE_64:
+ break;
+ default:
+ pr_err("%s: unimplemented size encoding %d\n", __func__, size);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ result);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ address);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ value);
+}
+
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ /*
+ * STADD is simply encoded as an alias for LDADD with XZR as
+ * the destination register.
+ */
+ return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
+ value, size);
+}
+
+static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
+ enum aarch64_insn_prfm_target target,
+ enum aarch64_insn_prfm_policy policy,
+ u32 insn)
+{
+ u32 imm_type = 0, imm_target = 0, imm_policy = 0;
+
+ switch (type) {
+ case AARCH64_INSN_PRFM_TYPE_PLD:
+ break;
+ case AARCH64_INSN_PRFM_TYPE_PLI:
+ imm_type = BIT(0);
+ break;
+ case AARCH64_INSN_PRFM_TYPE_PST:
+ imm_type = BIT(1);
+ break;
+ default:
+ pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (target) {
+ case AARCH64_INSN_PRFM_TARGET_L1:
+ break;
+ case AARCH64_INSN_PRFM_TARGET_L2:
+ imm_target = BIT(0);
+ break;
+ case AARCH64_INSN_PRFM_TARGET_L3:
+ imm_target = BIT(1);
+ break;
+ default:
+ pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (policy) {
+ case AARCH64_INSN_PRFM_POLICY_KEEP:
+ break;
+ case AARCH64_INSN_PRFM_POLICY_STRM:
+ imm_policy = BIT(0);
+ break;
+ default:
+ pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ /* In this case, imm5 is encoded into Rt field. */
+ insn &= ~GENMASK(4, 0);
+ insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
+
+ return insn;
+}
+
+u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
+ enum aarch64_insn_prfm_type type,
+ enum aarch64_insn_prfm_target target,
+ enum aarch64_insn_prfm_policy policy)
+{
+ u32 insn = aarch64_insn_get_prfm_value();
+
+ insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
+
+ insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
+}
+
+u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int imm, enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_ADSB_ADD:
+ insn = aarch64_insn_get_add_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB:
+ insn = aarch64_insn_get_sub_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+ insn = aarch64_insn_get_adds_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+ insn = aarch64_insn_get_subs_imm_value();
+ break;
+ default:
+ pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ /* We can't encode more than a 24bit value (12bit + 12bit shift) */
+ if (imm & ~(BIT(24) - 1))
+ goto out;
+
+ /* If we have something in the top 12 bits... */
+ if (imm & ~(SZ_4K - 1)) {
+ /* ... and in the low 12 bits -> error */
+ if (imm & (SZ_4K - 1))
+ goto out;
+
+ imm >>= 12;
+ insn |= AARCH64_INSN_LSL_12;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
+
+out:
+ pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
+ return AARCH64_BREAK_FAULT;
+}
+
+u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int immr, int imms,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_bitfield_type type)
+{
+ u32 insn;
+ u32 mask;
+
+ switch (type) {
+ case AARCH64_INSN_BITFIELD_MOVE:
+ insn = aarch64_insn_get_bfm_value();
+ break;
+ case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
+ insn = aarch64_insn_get_ubfm_value();
+ break;
+ case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
+ insn = aarch64_insn_get_sbfm_value();
+ break;
+ default:
+ pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ mask = GENMASK(4, 0);
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
+ mask = GENMASK(5, 0);
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ if (immr & ~mask) {
+ pr_err("%s: invalid immr encoding %d\n", __func__, immr);
+ return AARCH64_BREAK_FAULT;
+ }
+ if (imms & ~mask) {
+ pr_err("%s: invalid imms encoding %d\n", __func__, imms);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
+}
+
+u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
+ int imm, int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_movewide_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_MOVEWIDE_ZERO:
+ insn = aarch64_insn_get_movz_value();
+ break;
+ case AARCH64_INSN_MOVEWIDE_KEEP:
+ insn = aarch64_insn_get_movk_value();
+ break;
+ case AARCH64_INSN_MOVEWIDE_INVERSE:
+ insn = aarch64_insn_get_movn_value();
+ break;
+ default:
+ pr_err("%s: unknown movewide encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ if (imm & ~(SZ_64K - 1)) {
+ pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ if (shift != 0 && shift != 16) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn |= (shift >> 4) << 21;
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+}
+
+u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_ADSB_ADD:
+ insn = aarch64_insn_get_add_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB:
+ insn = aarch64_insn_get_sub_value();
+ break;
+ case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+ insn = aarch64_insn_get_adds_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+ insn = aarch64_insn_get_subs_value();
+ break;
+ default:
+ pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ if (shift & ~(SZ_32 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ if (shift & ~(SZ_64 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
+
+u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data1_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA1_REVERSE_16:
+ insn = aarch64_insn_get_rev16_value();
+ break;
+ case AARCH64_INSN_DATA1_REVERSE_32:
+ insn = aarch64_insn_get_rev32_value();
+ break;
+ case AARCH64_INSN_DATA1_REVERSE_64:
+ if (variant != AARCH64_INSN_VARIANT_64BIT) {
+ pr_err("%s: invalid variant for reverse64 %d\n",
+ __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+ insn = aarch64_insn_get_rev64_value();
+ break;
+ default:
+ pr_err("%s: unknown data1 encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+}
+
+u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data2_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA2_UDIV:
+ insn = aarch64_insn_get_udiv_value();
+ break;
+ case AARCH64_INSN_DATA2_SDIV:
+ insn = aarch64_insn_get_sdiv_value();
+ break;
+ case AARCH64_INSN_DATA2_LSLV:
+ insn = aarch64_insn_get_lslv_value();
+ break;
+ case AARCH64_INSN_DATA2_LSRV:
+ insn = aarch64_insn_get_lsrv_value();
+ break;
+ case AARCH64_INSN_DATA2_ASRV:
+ insn = aarch64_insn_get_asrv_value();
+ break;
+ case AARCH64_INSN_DATA2_RORV:
+ insn = aarch64_insn_get_rorv_value();
+ break;
+ default:
+ pr_err("%s: unknown data2 encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+}
+
+u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data3_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA3_MADD:
+ insn = aarch64_insn_get_madd_value();
+ break;
+ case AARCH64_INSN_DATA3_MSUB:
+ insn = aarch64_insn_get_msub_value();
+ break;
+ default:
+ pr_err("%s: unknown data3 encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ reg1);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+ reg2);
+}
+
+u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_logic_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LOGIC_AND:
+ insn = aarch64_insn_get_and_value();
+ break;
+ case AARCH64_INSN_LOGIC_BIC:
+ insn = aarch64_insn_get_bic_value();
+ break;
+ case AARCH64_INSN_LOGIC_ORR:
+ insn = aarch64_insn_get_orr_value();
+ break;
+ case AARCH64_INSN_LOGIC_ORN:
+ insn = aarch64_insn_get_orn_value();
+ break;
+ case AARCH64_INSN_LOGIC_EOR:
+ insn = aarch64_insn_get_eor_value();
+ break;
+ case AARCH64_INSN_LOGIC_EON:
+ insn = aarch64_insn_get_eon_value();
+ break;
+ case AARCH64_INSN_LOGIC_AND_SETFLAGS:
+ insn = aarch64_insn_get_ands_value();
+ break;
+ case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
+ insn = aarch64_insn_get_bics_value();
+ break;
+ default:
+ pr_err("%s: unknown logical encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ if (shift & ~(SZ_32 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ if (shift & ~(SZ_64 - 1)) {
+ pr_err("%s: invalid shift encoding %d\n", __func__,
+ shift);
+ return AARCH64_BREAK_FAULT;
+ }
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
+
+/*
+ * MOV (register) is architecturally an alias of ORR (shifted register) where
+ * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
+ */
+u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant)
+{
+ return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
+ src, 0, variant,
+ AARCH64_INSN_LOGIC_ORR);
+}
+
+u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_adr_type type)
+{
+ u32 insn;
+ s32 offset;
+
+ switch (type) {
+ case AARCH64_INSN_ADR_TYPE_ADR:
+ insn = aarch64_insn_get_adr_value();
+ offset = addr - pc;
+ break;
+ case AARCH64_INSN_ADR_TYPE_ADRP:
+ insn = aarch64_insn_get_adrp_value();
+ offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
+ break;
+ default:
+ pr_err("%s: unknown adr encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ if (offset < -SZ_1M || offset >= SZ_1M)
+ return AARCH64_BREAK_FAULT;
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
+}
+
+/*
+ * Decode the imm field of a branch, and return the byte offset as a
+ * signed value (so it can be used when computing a new branch
+ * target).
+ */
+s32 aarch64_get_branch_offset(u32 insn)
+{
+ s32 imm;
+
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
+ return (imm << 6) >> 4;
+ }
+
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
+ return (imm << 13) >> 11;
+ }
+
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
+ return (imm << 18) >> 16;
+ }
+
+ /* Unhandled instruction */
+ BUG();
+}
+
+/*
+ * Encode the displacement of a branch in the imm field and return the
+ * updated instruction.
+ */
+u32 aarch64_set_branch_offset(u32 insn, s32 offset)
+{
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
+ offset >> 2);
+
+ /* Unhandled instruction */
+ BUG();
+}
+
+s32 aarch64_insn_adrp_get_offset(u32 insn)
+{
+ BUG_ON(!aarch64_insn_is_adrp(insn));
+ return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
+}
+
+u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
+{
+ BUG_ON(!aarch64_insn_is_adrp(insn));
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
+ offset >> 12);
+}
+
+/*
+ * Extract the Op/CR data from a msr/mrs instruction.
+ */
+u32 aarch64_insn_extract_system_reg(u32 insn)
+{
+ return (insn & 0x1FFFE0) >> 5;
+}
+
+bool aarch32_insn_is_wide(u32 insn)
+{
+ return insn >= 0xe800;
+}
+
+/*
+ * Macros/defines for extracting register numbers from instruction.
+ */
+u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
+{
+ return (insn & (0xf << offset)) >> offset;
+}
+
+#define OPC2_MASK 0x7
+#define OPC2_OFFSET 5
+u32 aarch32_insn_mcr_extract_opc2(u32 insn)
+{
+ return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
+}
+
+#define CRM_MASK 0xf
+u32 aarch32_insn_mcr_extract_crm(u32 insn)
+{
+ return insn & CRM_MASK;
+}
+
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+ return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+ __check_eq, __check_ne, __check_cs, __check_cc,
+ __check_mi, __check_pl, __check_vs, __check_vc,
+ __check_hi, __check_ls, __check_ge, __check_lt,
+ __check_gt, __check_le, __check_al, __check_al
+};
+
+static bool range_of_ones(u64 val)
+{
+ /* Doesn't handle full ones or full zeroes */
+ u64 sval = val >> __ffs64(val);
+
+ /* One of Sean Eron Anderson's bithack tricks */
+ return ((sval + 1) & (sval)) == 0;
+}
+
+static u32 aarch64_encode_immediate(u64 imm,
+ enum aarch64_insn_variant variant,
+ u32 insn)
+{
+ unsigned int immr, imms, n, ones, ror, esz, tmp;
+ u64 mask;
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ esz = 32;
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ esz = 64;
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ mask = GENMASK(esz - 1, 0);
+
+ /* Can't encode full zeroes, full ones, or value wider than the mask */
+ if (!imm || imm == mask || imm & ~mask)
+ return AARCH64_BREAK_FAULT;
+
+ /*
+ * Inverse of Replicate(). Try to spot a repeating pattern
+ * with a pow2 stride.
+ */
+ for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
+ u64 emask = BIT(tmp) - 1;
+
+ if ((imm & emask) != ((imm >> tmp) & emask))
+ break;
+
+ esz = tmp;
+ mask = emask;
+ }
+
+ /* N is only set if we're encoding a 64bit value */
+ n = esz == 64;
+
+ /* Trim imm to the element size */
+ imm &= mask;
+
+ /* That's how many ones we need to encode */
+ ones = hweight64(imm);
+
+ /*
+ * imms is set to (ones - 1), prefixed with a string of ones
+ * and a zero if they fit. Cap it to 6 bits.
+ */
+ imms = ones - 1;
+ imms |= 0xf << ffs(esz);
+ imms &= BIT(6) - 1;
+
+ /* Compute the rotation */
+ if (range_of_ones(imm)) {
+ /*
+ * Pattern: 0..01..10..0
+ *
+ * Compute how many rotate we need to align it right
+ */
+ ror = __ffs64(imm);
+ } else {
+ /*
+ * Pattern: 0..01..10..01..1
+ *
+ * Fill the unused top bits with ones, and check if
+ * the result is a valid immediate (all ones with a
+ * contiguous ranges of zeroes).
+ */
+ imm |= ~mask;
+ if (!range_of_ones(~imm))
+ return AARCH64_BREAK_FAULT;
+
+ /*
+ * Compute the rotation to get a continuous set of
+ * ones, with the first bit set at position 0
+ */
+ ror = fls(~imm);
+ }
+
+ /*
+ * immr is the number of bits we need to rotate back to the
+ * original set of ones. Note that this is relative to the
+ * element size...
+ */
+ immr = (esz - ror) % esz;
+
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
+}
+
+u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_register Rn,
+ enum aarch64_insn_register Rd,
+ u64 imm)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LOGIC_AND:
+ insn = aarch64_insn_get_and_imm_value();
+ break;
+ case AARCH64_INSN_LOGIC_ORR:
+ insn = aarch64_insn_get_orr_imm_value();
+ break;
+ case AARCH64_INSN_LOGIC_EOR:
+ insn = aarch64_insn_get_eor_imm_value();
+ break;
+ case AARCH64_INSN_LOGIC_AND_SETFLAGS:
+ insn = aarch64_insn_get_ands_imm_value();
+ break;
+ default:
+ pr_err("%s: unknown logical encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
+ return aarch64_encode_immediate(imm, variant, insn);
+}
+
+u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
+ enum aarch64_insn_register Rm,
+ enum aarch64_insn_register Rn,
+ enum aarch64_insn_register Rd,
+ u8 lsb)
+{
+ u32 insn;
+
+ insn = aarch64_insn_get_extr_value();
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ if (lsb > 31)
+ return AARCH64_BREAK_FAULT;
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ if (lsb > 63)
+ return AARCH64_BREAK_FAULT;
+ insn |= AARCH64_INSN_SF_BIT;
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
+ break;
+ default:
+ pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
+}
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
new file mode 100644
index 000000000..aa7a4ec6a
--- /dev/null
+++ b/arch/arm64/kernel/io.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/io.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)from, 8)) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ *(u64 *)to = __raw_readq(from);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(*(u64 *)from, to);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ */
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+ u64 qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+ qc |= qc << 32;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(qc, dst);
+ dst += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memset_io);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
new file mode 100644
index 000000000..60456a62d
--- /dev/null
+++ b/arch/arm64/kernel/irq.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
+ * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
+ * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
+ * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/irq.h>
+#include <linux/memory.h>
+#include <linux/smp.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <linux/kprobes.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <asm/daifflags.h>
+#include <asm/vmap_stack.h>
+
+/* Only access this in an NMI enter/exit */
+DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
+
+DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
+
+#ifdef CONFIG_VMAP_STACK
+static void init_irq_stacks(void)
+{
+ int cpu;
+ unsigned long *p;
+
+ for_each_possible_cpu(cpu) {
+ p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
+ per_cpu(irq_stack_ptr, cpu) = p;
+ }
+}
+#else
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
+DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+
+static void init_irq_stacks(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
+}
+#endif
+
+void __init init_IRQ(void)
+{
+ init_irq_stacks();
+ irqchip_init();
+ if (!handle_arch_irq)
+ panic("No interrupt controller found.");
+
+ if (system_uses_irq_prio_masking()) {
+ /*
+ * Now that we have a stack for our IRQ handler, set
+ * the PMR/PSR pair to a consistent state.
+ */
+ WARN_ON(read_sysreg(daif) & PSR_A_BIT);
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ }
+}
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
new file mode 100644
index 000000000..9a8a0ae1e
--- /dev/null
+++ b/arch/arm64/kernel/jump_label.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/kernel/jump_label.c
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/insn.h>
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ void *addr = (void *)jump_entry_code(entry);
+ u32 insn;
+
+ if (type == JUMP_LABEL_JMP) {
+ insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
+ jump_entry_target(entry),
+ AARCH64_INSN_BRANCH_NOLINK);
+ } else {
+ insn = aarch64_insn_gen_nop();
+ }
+
+ aarch64_insn_patch_text_nosync(addr, insn);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the architected A64 NOP in arch_static_branch, so there's no
+ * need to patch an identical A64 NOP over the top of it here. The core
+ * will call arch_jump_label_transform from a module notifier if the
+ * NOP needs to be replaced by a branch.
+ */
+}
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
new file mode 100644
index 000000000..b181e0544
--- /dev/null
+++ b/arch/arm64/kernel/kaslr.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/libfdt.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/pgtable.h>
+#include <linux/random.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
+#include <asm/sections.h>
+
+enum kaslr_status {
+ KASLR_ENABLED,
+ KASLR_DISABLED_CMDLINE,
+ KASLR_DISABLED_NO_SEED,
+ KASLR_DISABLED_FDT_REMAP,
+};
+
+static enum kaslr_status __initdata kaslr_status;
+u64 __ro_after_init module_alloc_base;
+u16 __initdata memstart_offset_seed;
+
+static __init u64 get_kaslr_seed(void *fdt)
+{
+ int node, len;
+ fdt64_t *prop;
+ u64 ret;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return 0;
+
+ prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
+ if (!prop || len != sizeof(u64))
+ return 0;
+
+ ret = fdt64_to_cpu(*prop);
+ *prop = 0;
+ return ret;
+}
+
+static __init const u8 *kaslr_get_cmdline(void *fdt)
+{
+ static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ int node;
+ const u8 *prop;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ goto out;
+
+ prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ if (!prop)
+ goto out;
+ return prop;
+ }
+out:
+ return default_cmdline;
+}
+
+/*
+ * This routine will be executed with the kernel mapped at its default virtual
+ * address, and if it returns successfully, the kernel will be remapped, and
+ * start_kernel() will be executed from a randomized virtual offset. The
+ * relocation will result in all absolute references (e.g., static variables
+ * containing function pointers) to be reinitialized, and zero-initialized
+ * .bss variables will be reset to 0.
+ */
+u64 __init kaslr_early_init(u64 dt_phys)
+{
+ void *fdt;
+ u64 seed, offset, mask, module_range;
+ const u8 *cmdline, *str;
+ unsigned long raw;
+ int size;
+
+ /*
+ * Set a reasonable default for module_alloc_base in case
+ * we end up running with module randomization disabled.
+ */
+ module_alloc_base = (u64)_etext - MODULES_VSIZE;
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+
+ /*
+ * Try to map the FDT early. If this fails, we simply bail,
+ * and proceed with KASLR disabled. We will make another
+ * attempt at mapping the FDT in setup_machine()
+ */
+ early_fixmap_init();
+ fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ if (!fdt) {
+ kaslr_status = KASLR_DISABLED_FDT_REMAP;
+ return 0;
+ }
+
+ /*
+ * Retrieve (and wipe) the seed from the FDT
+ */
+ seed = get_kaslr_seed(fdt);
+
+ /*
+ * Check if 'nokaslr' appears on the command line, and
+ * return 0 if that is the case.
+ */
+ cmdline = kaslr_get_cmdline(fdt);
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) {
+ kaslr_status = KASLR_DISABLED_CMDLINE;
+ return 0;
+ }
+
+ /*
+ * Mix in any entropy obtainable architecturally if enabled
+ * and supported.
+ */
+
+ if (arch_get_random_seed_long_early(&raw))
+ seed ^= raw;
+
+ if (!seed) {
+ kaslr_status = KASLR_DISABLED_NO_SEED;
+ return 0;
+ }
+
+ /*
+ * OK, so we are proceeding with KASLR enabled. Calculate a suitable
+ * kernel image offset from the seed. Let's place the kernel in the
+ * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
+ * the lower and upper quarters to avoid colliding with other
+ * allocations.
+ * Even if we could randomize at page granularity for 16k and 64k pages,
+ * let's always round to 2 MB so we don't interfere with the ability to
+ * map using contiguous PTEs
+ */
+ mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
+ offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
+
+ /* use the top 16 bits to randomize the linear region */
+ memstart_offset_seed = seed >> 48;
+
+ if (IS_ENABLED(CONFIG_KASAN))
+ /*
+ * KASAN does not expect the module region to intersect the
+ * vmalloc region, since shadow memory is allocated for each
+ * module at load time, whereas the vmalloc region is shadowed
+ * by KASAN zero pages. So keep modules out of the vmalloc
+ * region if KASAN is enabled, and put the kernel well within
+ * 4 GB of the module region.
+ */
+ return offset % SZ_2G;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
+ /*
+ * Randomize the module region over a 2 GB window covering the
+ * kernel. This reduces the risk of modules leaking information
+ * about the address of the kernel itself, but results in
+ * branches between modules and the core kernel that are
+ * resolved via PLTs. (Branches between modules will be
+ * resolved normally.)
+ */
+ module_range = SZ_2G - (u64)(_end - _stext);
+ module_alloc_base = max((u64)_end + offset - SZ_2G,
+ (u64)MODULES_VADDR);
+ } else {
+ /*
+ * Randomize the module region by setting module_alloc_base to
+ * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
+ * _stext) . This guarantees that the resulting region still
+ * covers [_stext, _etext], and that all relative branches can
+ * be resolved without veneers.
+ */
+ module_range = MODULES_VSIZE - (u64)(_etext - _stext);
+ module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
+ }
+
+ /* use the lower 21 bits to randomize the base of the module region */
+ module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
+ module_alloc_base &= PAGE_MASK;
+
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
+ return offset;
+}
+
+static int __init kaslr_init(void)
+{
+ switch (kaslr_status) {
+ case KASLR_ENABLED:
+ pr_info("KASLR enabled\n");
+ break;
+ case KASLR_DISABLED_CMDLINE:
+ pr_info("KASLR disabled on command line\n");
+ break;
+ case KASLR_DISABLED_NO_SEED:
+ pr_warn("KASLR disabled due to lack of seed\n");
+ break;
+ case KASLR_DISABLED_FDT_REMAP:
+ pr_warn("KASLR disabled due to FDT remapping failure\n");
+ break;
+ }
+
+ return 0;
+}
+core_initcall(kaslr_init)
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
new file mode 100644
index 000000000..9ec34690e
--- /dev/null
+++ b/arch/arm64/kernel/kexec_image.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kexec image loader
+
+ * Copyright (C) 2018 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#define pr_fmt(fmt) "kexec_file(Image): " fmt
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/pe.h>
+#include <linux/string.h>
+#include <linux/verification.h>
+#include <asm/byteorder.h>
+#include <asm/cpufeature.h>
+#include <asm/image.h>
+#include <asm/memory.h>
+
+static int image_probe(const char *kernel_buf, unsigned long kernel_len)
+{
+ const struct arm64_image_header *h =
+ (const struct arm64_image_header *)(kernel_buf);
+
+ if (!h || (kernel_len < sizeof(*h)))
+ return -EINVAL;
+
+ if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *image_load(struct kimage *image,
+ char *kernel, unsigned long kernel_len,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline, unsigned long cmdline_len)
+{
+ struct arm64_image_header *h;
+ u64 flags, value;
+ bool be_image, be_kernel;
+ struct kexec_buf kbuf;
+ unsigned long text_offset, kernel_segment_number;
+ struct kexec_segment *kernel_segment;
+ int ret;
+
+ /*
+ * We require a kernel with an unambiguous Image header. Per
+ * Documentation/arm64/booting.rst, this is the case when image_size
+ * is non-zero (practically speaking, since v3.17).
+ */
+ h = (struct arm64_image_header *)kernel;
+ if (!h->image_size)
+ return ERR_PTR(-EINVAL);
+
+ /* Check cpu features */
+ flags = le64_to_cpu(h->flags);
+ be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE);
+ be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ if ((be_image != be_kernel) && !system_supports_mixed_endian())
+ return ERR_PTR(-EINVAL);
+
+ value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE);
+ if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) &&
+ !system_supports_4kb_granule()) ||
+ ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) &&
+ !system_supports_64kb_granule()) ||
+ ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) &&
+ !system_supports_16kb_granule()))
+ return ERR_PTR(-EINVAL);
+
+ /* Load the kernel */
+ kbuf.image = image;
+ kbuf.buf_min = 0;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = false;
+
+ kbuf.buffer = kernel;
+ kbuf.bufsz = kernel_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = le64_to_cpu(h->image_size);
+ text_offset = le64_to_cpu(h->text_offset);
+ kbuf.buf_align = MIN_KIMG_ALIGN;
+
+ /* Adjust kernel segment with TEXT_OFFSET */
+ kbuf.memsz += text_offset;
+
+ kernel_segment_number = image->nr_segments;
+
+ /*
+ * The location of the kernel segment may make it impossible to satisfy
+ * the other segment requirements, so we try repeatedly to find a
+ * location that will work.
+ */
+ while ((ret = kexec_add_buffer(&kbuf)) == 0) {
+ /* Try to load additional data */
+ kernel_segment = &image->segment[kernel_segment_number];
+ ret = load_other_segments(image, kernel_segment->mem,
+ kernel_segment->memsz, initrd,
+ initrd_len, cmdline);
+ if (!ret)
+ break;
+
+ /*
+ * We couldn't find space for the other segments; erase the
+ * kernel segment and try the next available hole.
+ */
+ image->nr_segments -= 1;
+ kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ }
+
+ if (ret) {
+ pr_err("Could not find any suitable kernel location!");
+ return ERR_PTR(ret);
+ }
+
+ kernel_segment = &image->segment[kernel_segment_number];
+ kernel_segment->mem += text_offset;
+ kernel_segment->memsz -= text_offset;
+ image->start = kernel_segment->mem;
+
+ pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz,
+ kernel_segment->memsz);
+
+ return NULL;
+}
+
+#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+static int image_verify_sig(const char *kernel, unsigned long kernel_len)
+{
+ return verify_pefile_signature(kernel, kernel_len, NULL,
+ VERIFYING_KEXEC_PE_SIGNATURE);
+}
+#endif
+
+const struct kexec_file_ops kexec_image_ops = {
+ .probe = image_probe,
+ .load = image_load,
+#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+ .verify_sig = image_verify_sig,
+#endif
+};
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
new file mode 100644
index 000000000..e4e95821b
--- /dev/null
+++ b/arch/arm64/kernel/kgdb.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/kernel/kgdb.c
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <linux/kprobes.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
+#include <asm/traps.h>
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "x0", 8, offsetof(struct pt_regs, regs[0])},
+ { "x1", 8, offsetof(struct pt_regs, regs[1])},
+ { "x2", 8, offsetof(struct pt_regs, regs[2])},
+ { "x3", 8, offsetof(struct pt_regs, regs[3])},
+ { "x4", 8, offsetof(struct pt_regs, regs[4])},
+ { "x5", 8, offsetof(struct pt_regs, regs[5])},
+ { "x6", 8, offsetof(struct pt_regs, regs[6])},
+ { "x7", 8, offsetof(struct pt_regs, regs[7])},
+ { "x8", 8, offsetof(struct pt_regs, regs[8])},
+ { "x9", 8, offsetof(struct pt_regs, regs[9])},
+ { "x10", 8, offsetof(struct pt_regs, regs[10])},
+ { "x11", 8, offsetof(struct pt_regs, regs[11])},
+ { "x12", 8, offsetof(struct pt_regs, regs[12])},
+ { "x13", 8, offsetof(struct pt_regs, regs[13])},
+ { "x14", 8, offsetof(struct pt_regs, regs[14])},
+ { "x15", 8, offsetof(struct pt_regs, regs[15])},
+ { "x16", 8, offsetof(struct pt_regs, regs[16])},
+ { "x17", 8, offsetof(struct pt_regs, regs[17])},
+ { "x18", 8, offsetof(struct pt_regs, regs[18])},
+ { "x19", 8, offsetof(struct pt_regs, regs[19])},
+ { "x20", 8, offsetof(struct pt_regs, regs[20])},
+ { "x21", 8, offsetof(struct pt_regs, regs[21])},
+ { "x22", 8, offsetof(struct pt_regs, regs[22])},
+ { "x23", 8, offsetof(struct pt_regs, regs[23])},
+ { "x24", 8, offsetof(struct pt_regs, regs[24])},
+ { "x25", 8, offsetof(struct pt_regs, regs[25])},
+ { "x26", 8, offsetof(struct pt_regs, regs[26])},
+ { "x27", 8, offsetof(struct pt_regs, regs[27])},
+ { "x28", 8, offsetof(struct pt_regs, regs[28])},
+ { "x29", 8, offsetof(struct pt_regs, regs[29])},
+ { "x30", 8, offsetof(struct pt_regs, regs[30])},
+ { "sp", 8, offsetof(struct pt_regs, sp)},
+ { "pc", 8, offsetof(struct pt_regs, pc)},
+ /*
+ * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
+ * protocol disagrees. Therefore we must extract only the lower
+ * 32-bits. Look for the big comment in asm/kgdb.h for more
+ * detail.
+ */
+ { "pstate", 4, offsetof(struct pt_regs, pstate)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ + 4
+#endif
+ },
+ { "v0", 16, -1 },
+ { "v1", 16, -1 },
+ { "v2", 16, -1 },
+ { "v3", 16, -1 },
+ { "v4", 16, -1 },
+ { "v5", 16, -1 },
+ { "v6", 16, -1 },
+ { "v7", 16, -1 },
+ { "v8", 16, -1 },
+ { "v9", 16, -1 },
+ { "v10", 16, -1 },
+ { "v11", 16, -1 },
+ { "v12", 16, -1 },
+ { "v13", 16, -1 },
+ { "v14", 16, -1 },
+ { "v15", 16, -1 },
+ { "v16", 16, -1 },
+ { "v17", 16, -1 },
+ { "v18", 16, -1 },
+ { "v19", 16, -1 },
+ { "v20", 16, -1 },
+ { "v21", 16, -1 },
+ { "v22", 16, -1 },
+ { "v23", 16, -1 },
+ { "v24", 16, -1 },
+ { "v25", 16, -1 },
+ { "v26", 16, -1 },
+ { "v27", 16, -1 },
+ { "v28", 16, -1 },
+ { "v29", 16, -1 },
+ { "v30", 16, -1 },
+ { "v31", 16, -1 },
+ { "fpsr", 4, -1 },
+ { "fpcr", 4, -1 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+ struct cpu_context *cpu_context = &task->thread.cpu_context;
+
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+
+ gdb_regs[19] = cpu_context->x19;
+ gdb_regs[20] = cpu_context->x20;
+ gdb_regs[21] = cpu_context->x21;
+ gdb_regs[22] = cpu_context->x22;
+ gdb_regs[23] = cpu_context->x23;
+ gdb_regs[24] = cpu_context->x24;
+ gdb_regs[25] = cpu_context->x25;
+ gdb_regs[26] = cpu_context->x26;
+ gdb_regs[27] = cpu_context->x27;
+ gdb_regs[28] = cpu_context->x28;
+ gdb_regs[29] = cpu_context->fp;
+
+ gdb_regs[31] = cpu_context->sp;
+ gdb_regs[32] = cpu_context->pc;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->pc = pc;
+}
+
+static int compiled_break;
+
+static void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ kgdb_arch_set_pc(regs, addr);
+ else if (compiled_break == 1)
+ kgdb_arch_set_pc(regs, regs->pc + 4);
+
+ compiled_break = 0;
+}
+
+int kgdb_arch_handle_exception(int exception_vector, int signo,
+ int err_code, char *remcom_in_buffer,
+ char *remcom_out_buffer,
+ struct pt_regs *linux_regs)
+{
+ int err;
+
+ switch (remcom_in_buffer[0]) {
+ case 'D':
+ case 'k':
+ /*
+ * Packet D (Detach), k (kill). No special handling
+ * is required here. Handle same as c packet.
+ */
+ case 'c':
+ /*
+ * Packet c (Continue) to continue executing.
+ * Set pc to required address.
+ * Try to read optional parameter and set pc.
+ * If this was a compiled breakpoint, we need to move
+ * to the next instruction else we will just breakpoint
+ * over and over again.
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+ kgdb_single_step = 0;
+
+ /*
+ * Received continue command, disable single step
+ */
+ if (kernel_active_single_step())
+ kernel_disable_single_step();
+
+ err = 0;
+ break;
+ case 's':
+ /*
+ * Update step address value with address passed
+ * with step packet.
+ * On debug exception return PC is copied to ELR
+ * So just update PC.
+ * If no step address is passed, resume from the address
+ * pointed by PC. Do not update PC
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+ kgdb_single_step = 1;
+
+ /*
+ * Enable single step handling
+ */
+ if (!kernel_active_single_step())
+ kernel_enable_single_step(linux_regs);
+ else
+ kernel_rewind_single_step(linux_regs);
+ err = 0;
+ break;
+ default:
+ err = -1;
+ }
+ return err;
+}
+
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ return DBG_HOOK_HANDLED;
+}
+NOKPROBE_SYMBOL(kgdb_brk_fn)
+
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ compiled_break = 1;
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+ return DBG_HOOK_HANDLED;
+}
+NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
+
+static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ if (!kgdb_single_step)
+ return DBG_HOOK_ERROR;
+
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
+ return DBG_HOOK_HANDLED;
+}
+NOKPROBE_SYMBOL(kgdb_step_brk_fn);
+
+static struct break_hook kgdb_brkpt_hook = {
+ .fn = kgdb_brk_fn,
+ .imm = KGDB_DYN_DBG_BRK_IMM,
+};
+
+static struct break_hook kgdb_compiled_brkpt_hook = {
+ .fn = kgdb_compiled_brk_fn,
+ .imm = KGDB_COMPILED_DBG_BRK_IMM,
+};
+
+static struct step_hook kgdb_step_hook = {
+ .fn = kgdb_step_brk_fn
+};
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ struct pt_regs *regs = args->regs;
+
+ if (kgdb_handle_exception(1, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+ /*
+ * Want to be lowest priority
+ */
+ .priority = -INT_MAX,
+};
+
+/*
+ * kgdb_arch_init - Perform any architecture specific initialization.
+ * This function will handle the initialization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+ int ret = register_die_notifier(&kgdb_notifier);
+
+ if (ret != 0)
+ return ret;
+
+ register_kernel_break_hook(&kgdb_brkpt_hook);
+ register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ register_kernel_step_hook(&kgdb_step_hook);
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_kernel_break_hook(&kgdb_brkpt_hook);
+ unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ unregister_kernel_step_hook(&kgdb_step_hook);
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+const struct kgdb_arch arch_kgdb_ops;
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int err;
+
+ BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
+
+ err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
+ if (err)
+ return err;
+
+ return aarch64_insn_write((void *)bpt->bpt_addr,
+ (u32)AARCH64_BREAK_KGDB_DYN_DBG);
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+ return aarch64_insn_write((void *)bpt->bpt_addr,
+ *(u32 *)bpt->saved_instr);
+}
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
new file mode 100644
index 000000000..42bd8c0c6
--- /dev/null
+++ b/arch/arm64/kernel/kuser32.S
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AArch32 user helpers.
+ * Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
+ *
+ * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright (C) 2012-2018 ARM Ltd.
+ *
+ * The kuser helpers below are mapped at a fixed address by
+ * aarch32_setup_additional_pages() and are provided for compatibility
+ * reasons with 32 bit (aarch32) applications that need them.
+ *
+ * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
+ */
+
+#include <asm/unistd.h>
+
+ .align 5
+ .globl __kuser_helper_start
+__kuser_helper_start:
+
+__kuser_cmpxchg64: // 0xffff0f60
+ .inst 0xe92d00f0 // push {r4, r5, r6, r7}
+ .inst 0xe1c040d0 // ldrd r4, r5, [r0]
+ .inst 0xe1c160d0 // ldrd r6, r7, [r1]
+ .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
+ .inst 0xe0303004 // eors r3, r0, r4
+ .inst 0x00313005 // eoreqs r3, r1, r5
+ .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
+ .inst 0x03330001 // teqeq r3, #1
+ .inst 0x0afffff9 // beq 1b
+ .inst 0xf57ff05b // dmb ish
+ .inst 0xe2730000 // rsbs r0, r3, #0
+ .inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
+ .inst 0xe12fff1e // bx lr
+
+ .align 5
+__kuser_memory_barrier: // 0xffff0fa0
+ .inst 0xf57ff05b // dmb ish
+ .inst 0xe12fff1e // bx lr
+
+ .align 5
+__kuser_cmpxchg: // 0xffff0fc0
+ .inst 0xe1923f9f // 1: ldrex r3, [r2]
+ .inst 0xe0533000 // subs r3, r3, r0
+ .inst 0x01823e91 // stlexeq r3, r1, [r2]
+ .inst 0x03330001 // teqeq r3, #1
+ .inst 0x0afffffa // beq 1b
+ .inst 0xf57ff05b // dmb ish
+ .inst 0xe2730000 // rsbs r0, r3, #0
+ .inst 0xe12fff1e // bx lr
+
+ .align 5
+__kuser_get_tls: // 0xffff0fe0
+ .inst 0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3
+ .inst 0xe12fff1e // bx lr
+ .rep 5
+ .word 0
+ .endr
+
+__kuser_helper_version: // 0xffff0ffc
+ .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
+ .globl __kuser_helper_end
+__kuser_helper_end:
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 000000000..a0b144cfa
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/page-flags.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+
+#include "cpu-reset.h"
+
+/* Global variables for the arm64_relocate_new_kernel routine. */
+extern const unsigned char arm64_relocate_new_kernel[];
+extern const unsigned long arm64_relocate_new_kernel_size;
+
+/**
+ * kexec_image_info - For debugging output.
+ */
+#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
+static void _kexec_image_info(const char *func, int line,
+ const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("%s:%d:\n", func, line);
+ pr_debug(" kexec kimage info:\n");
+ pr_debug(" type: %d\n", kimage->type);
+ pr_debug(" start: %lx\n", kimage->start);
+ pr_debug(" head: %lx\n", kimage->head);
+ pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
+ i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz,
+ kimage->segment[i].memsz,
+ kimage->segment[i].memsz / PAGE_SIZE);
+ }
+}
+
+void machine_kexec_cleanup(struct kimage *kimage)
+{
+ /* Empty routine needed to avoid build errors. */
+}
+
+/**
+ * machine_kexec_prepare - Prepare for a kexec reboot.
+ *
+ * Called from the core kexec code when a kernel image is loaded.
+ * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
+ * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
+ */
+int machine_kexec_prepare(struct kimage *kimage)
+{
+ kexec_image_info(kimage);
+
+ if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
+ pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
+ */
+static void kexec_list_flush(struct kimage *kimage)
+{
+ kimage_entry_t *entry;
+
+ for (entry = &kimage->head; ; entry++) {
+ unsigned int flag;
+ void *addr;
+
+ /* flush the list entries. */
+ __flush_dcache_area(entry, sizeof(kimage_entry_t));
+
+ flag = *entry & IND_FLAGS;
+ if (flag == IND_DONE)
+ break;
+
+ addr = phys_to_virt(*entry & PAGE_MASK);
+
+ switch (flag) {
+ case IND_INDIRECTION:
+ /* Set entry point just before the new list page. */
+ entry = (kimage_entry_t *)addr - 1;
+ break;
+ case IND_SOURCE:
+ /* flush the source pages. */
+ __flush_dcache_area(addr, PAGE_SIZE);
+ break;
+ case IND_DESTINATION:
+ break;
+ default:
+ BUG();
+ }
+ }
+}
+
+/**
+ * kexec_segment_flush - Helper to flush the kimage segments to PoC.
+ */
+static void kexec_segment_flush(const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("%s:\n", __func__);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
+ i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz,
+ kimage->segment[i].memsz,
+ kimage->segment[i].memsz / PAGE_SIZE);
+
+ __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
+ kimage->segment[i].memsz);
+ }
+}
+
+/**
+ * machine_kexec - Do the kexec reboot.
+ *
+ * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
+ */
+void machine_kexec(struct kimage *kimage)
+{
+ phys_addr_t reboot_code_buffer_phys;
+ void *reboot_code_buffer;
+ bool in_kexec_crash = (kimage == kexec_crash_image);
+ bool stuck_cpus = cpus_are_stuck_in_kernel();
+
+ /*
+ * New cpus may have become stuck_in_kernel after we loaded the image.
+ */
+ BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
+ WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
+ "Some CPUs may be stale, kdump will be unreliable.\n");
+
+ reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
+ reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
+
+ kexec_image_info(kimage);
+
+ /*
+ * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
+ * after the kernel is shut down.
+ */
+ memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
+ arm64_relocate_new_kernel_size);
+
+ /* Flush the reboot_code_buffer in preparation for its execution. */
+ __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
+
+ /*
+ * Although we've killed off the secondary CPUs, we don't update
+ * the online mask if we're handling a crash kernel and consequently
+ * need to avoid flush_icache_range(), which will attempt to IPI
+ * the offline CPUs. Therefore, we must use the __* variant here.
+ */
+ __flush_icache_range((uintptr_t)reboot_code_buffer,
+ (uintptr_t)reboot_code_buffer +
+ arm64_relocate_new_kernel_size);
+
+ /* Flush the kimage list and its buffers. */
+ kexec_list_flush(kimage);
+
+ /* Flush the new image if already in place. */
+ if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE))
+ kexec_segment_flush(kimage);
+
+ pr_info("Bye!\n");
+
+ local_daif_mask();
+
+ /*
+ * cpu_soft_restart will shutdown the MMU, disable data caches, then
+ * transfer control to the reboot_code_buffer which contains a copy of
+ * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
+ * uses physical addressing to relocate the new image to its final
+ * position and transfers control to the image entry point when the
+ * relocation is complete.
+ * In kexec case, kimage->start points to purgatory assuming that
+ * kernel entry and dtb address are embedded in purgatory by
+ * userspace (kexec-tools).
+ * In kexec_file case, the kernel starts directly without purgatory.
+ */
+ cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
+#ifdef CONFIG_KEXEC_FILE
+ kimage->arch.dtb_mem);
+#else
+ 0);
+#endif
+
+ BUG(); /* Should never get here. */
+}
+
+static void machine_kexec_mask_interrupts(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_chip *chip;
+ int ret;
+
+ chip = irq_desc_get_chip(desc);
+ if (!chip)
+ continue;
+
+ /*
+ * First try to remove the active state. If this
+ * fails, try to EOI the interrupt.
+ */
+ ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+
+ if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+ chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+
+ if (chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
+ chip->irq_disable(&desc->irq_data);
+ }
+}
+
+/**
+ * machine_crash_shutdown - shutdown non-crashing cpus and save registers
+ */
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+
+ /* shutdown non-crashing cpus */
+ crash_smp_send_stop();
+
+ /* for crashing cpu */
+ crash_save_cpu(regs, smp_processor_id());
+ machine_kexec_mask_interrupts();
+
+ pr_info("Starting crashdump kernel...\n");
+}
+
+void arch_kexec_protect_crashkres(void)
+{
+ int i;
+
+ kexec_segment_flush(kexec_crash_image);
+
+ for (i = 0; i < kexec_crash_image->nr_segments; i++)
+ set_memory_valid(
+ __phys_to_virt(kexec_crash_image->segment[i].mem),
+ kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
+}
+
+void arch_kexec_unprotect_crashkres(void)
+{
+ int i;
+
+ for (i = 0; i < kexec_crash_image->nr_segments; i++)
+ set_memory_valid(
+ __phys_to_virt(kexec_crash_image->segment[i].mem),
+ kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
+}
+
+#ifdef CONFIG_HIBERNATION
+/*
+ * To preserve the crash dump kernel image, the relevant memory segments
+ * should be mapped again around the hibernation.
+ */
+void crash_prepare_suspend(void)
+{
+ if (kexec_crash_image)
+ arch_kexec_unprotect_crashkres();
+}
+
+void crash_post_resume(void)
+{
+ if (kexec_crash_image)
+ arch_kexec_protect_crashkres();
+}
+
+/*
+ * crash_is_nosave
+ *
+ * Return true only if a page is part of reserved memory for crash dump kernel,
+ * but does not hold any data of loaded kernel image.
+ *
+ * Note that all the pages in crash dump kernel memory have been initially
+ * marked as Reserved as memory was allocated via memblock_reserve().
+ *
+ * In hibernation, the pages which are Reserved and yet "nosave" are excluded
+ * from the hibernation iamge. crash_is_nosave() does thich check for crash
+ * dump kernel and will reduce the total size of hibernation image.
+ */
+
+bool crash_is_nosave(unsigned long pfn)
+{
+ int i;
+ phys_addr_t addr;
+
+ if (!crashk_res.end)
+ return false;
+
+ /* in reserved memory? */
+ addr = __pfn_to_phys(pfn);
+ if ((addr < crashk_res.start) || (crashk_res.end < addr))
+ return false;
+
+ if (!kexec_crash_image)
+ return true;
+
+ /* not part of loaded kernel image? */
+ for (i = 0; i < kexec_crash_image->nr_segments; i++)
+ if (addr >= kexec_crash_image->segment[i].mem &&
+ addr < (kexec_crash_image->segment[i].mem +
+ kexec_crash_image->segment[i].memsz))
+ return false;
+
+ return true;
+}
+
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+ struct page *page;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ page = phys_to_page(addr);
+ free_reserved_page(page);
+ }
+}
+#endif /* CONFIG_HIBERNATION */
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
new file mode 100644
index 000000000..0cde47a63
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kexec_file for arm64
+ *
+ * Copyright (C) 2018 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * Most code is derived from arm64 port of kexec-tools
+ */
+
+#define pr_fmt(fmt) "kexec_file: " fmt
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/libfdt.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+/* relevant device tree properties */
+#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr"
+#define FDT_PROP_MEM_RANGE "linux,usable-memory-range"
+#define FDT_PROP_INITRD_START "linux,initrd-start"
+#define FDT_PROP_INITRD_END "linux,initrd-end"
+#define FDT_PROP_BOOTARGS "bootargs"
+#define FDT_PROP_KASLR_SEED "kaslr-seed"
+#define FDT_PROP_RNG_SEED "rng-seed"
+#define RNG_SEED_SIZE 128
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+ &kexec_image_ops,
+ NULL
+};
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ vfree(image->arch.dtb);
+ image->arch.dtb = NULL;
+
+ vfree(image->arch.elf_headers);
+ image->arch.elf_headers = NULL;
+ image->arch.elf_headers_sz = 0;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
+
+static int setup_dtb(struct kimage *image,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ char *cmdline, void *dtb)
+{
+ int off, ret;
+
+ ret = fdt_path_offset(dtb, "/chosen");
+ if (ret < 0)
+ goto out;
+
+ off = ret;
+
+ ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR);
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ goto out;
+ ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE);
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ goto out;
+
+ if (image->type == KEXEC_TYPE_CRASH) {
+ /* add linux,elfcorehdr */
+ ret = fdt_appendprop_addrrange(dtb, 0, off,
+ FDT_PROP_KEXEC_ELFHDR,
+ image->arch.elf_headers_mem,
+ image->arch.elf_headers_sz);
+ if (ret)
+ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+
+ /* add linux,usable-memory-range */
+ ret = fdt_appendprop_addrrange(dtb, 0, off,
+ FDT_PROP_MEM_RANGE,
+ crashk_res.start,
+ crashk_res.end - crashk_res.start + 1);
+ if (ret)
+ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+ }
+
+ /* add bootargs */
+ if (cmdline) {
+ ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
+ if (ret)
+ goto out;
+ } else {
+ ret = fdt_delprop(dtb, off, FDT_PROP_BOOTARGS);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+ }
+
+ /* add initrd-* */
+ if (initrd_load_addr) {
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_START,
+ initrd_load_addr);
+ if (ret)
+ goto out;
+
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_END,
+ initrd_load_addr + initrd_len);
+ if (ret)
+ goto out;
+ } else {
+ ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_START);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+
+ ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_END);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+ }
+
+ /* add kaslr-seed */
+ ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
+ if (ret == -FDT_ERR_NOTFOUND)
+ ret = 0;
+ else if (ret)
+ goto out;
+
+ if (rng_is_initialized()) {
+ u64 seed = get_random_u64();
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_KASLR_SEED, seed);
+ if (ret)
+ goto out;
+ } else {
+ pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+ FDT_PROP_KASLR_SEED);
+ }
+
+ /* add rng-seed */
+ if (rng_is_initialized()) {
+ void *rng_seed;
+ ret = fdt_setprop_placeholder(dtb, off, FDT_PROP_RNG_SEED,
+ RNG_SEED_SIZE, &rng_seed);
+ if (ret)
+ goto out;
+ get_random_bytes(rng_seed, RNG_SEED_SIZE);
+ } else {
+ pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+ FDT_PROP_RNG_SEED);
+ }
+
+out:
+ if (ret)
+ return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
+
+ return 0;
+}
+
+/*
+ * More space needed so that we can add initrd, bootargs, kaslr-seed,
+ * rng-seed, userable-memory-range and elfcorehdr.
+ */
+#define DTB_EXTRA_SPACE 0x1000
+
+static int create_dtb(struct kimage *image,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ char *cmdline, void **dtb)
+{
+ void *buf;
+ size_t buf_size;
+ size_t cmdline_len;
+ int ret;
+
+ cmdline_len = cmdline ? strlen(cmdline) : 0;
+ buf_size = fdt_totalsize(initial_boot_params)
+ + cmdline_len + DTB_EXTRA_SPACE;
+
+ for (;;) {
+ buf = vmalloc(buf_size);
+ if (!buf)
+ return -ENOMEM;
+
+ /* duplicate a device tree blob */
+ ret = fdt_open_into(initial_boot_params, buf, buf_size);
+ if (ret) {
+ vfree(buf);
+ return -EINVAL;
+ }
+
+ ret = setup_dtb(image, initrd_load_addr, initrd_len,
+ cmdline, buf);
+ if (ret) {
+ vfree(buf);
+ if (ret == -ENOMEM) {
+ /* unlikely, but just in case */
+ buf_size += DTB_EXTRA_SPACE;
+ continue;
+ } else {
+ return ret;
+ }
+ }
+
+ /* trim it */
+ fdt_pack(buf);
+ *dtb = buf;
+
+ return 0;
+ }
+}
+
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+ struct crash_mem *cmem;
+ unsigned int nr_ranges;
+ int ret;
+ u64 i;
+ phys_addr_t start, end;
+
+ nr_ranges = 1; /* for exclusion of crashkernel region */
+ for_each_mem_range(i, &start, &end)
+ nr_ranges++;
+
+ cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
+ if (!cmem)
+ return -ENOMEM;
+
+ cmem->max_nr_ranges = nr_ranges;
+ cmem->nr_ranges = 0;
+ for_each_mem_range(i, &start, &end) {
+ cmem->ranges[cmem->nr_ranges].start = start;
+ cmem->ranges[cmem->nr_ranges].end = end - 1;
+ cmem->nr_ranges++;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+
+ if (!ret)
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+ kfree(cmem);
+ return ret;
+}
+
+/*
+ * Tries to add the initrd and DTB to the image. If it is not possible to find
+ * valid locations, this function will undo changes to the image and return non
+ * zero.
+ */
+int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr,
+ unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline)
+{
+ struct kexec_buf kbuf;
+ void *headers, *dtb = NULL;
+ unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
+ orig_segments = image->nr_segments;
+ int ret = 0;
+
+ kbuf.image = image;
+ /* not allocate anything below the kernel */
+ kbuf.buf_min = kernel_load_addr + kernel_size;
+
+ /* load elf core header */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = prepare_elf_headers(&headers, &headers_sz);
+ if (ret) {
+ pr_err("Preparing elf core header failed\n");
+ goto out_err;
+ }
+
+ kbuf.buffer = headers;
+ kbuf.bufsz = headers_sz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = headers_sz;
+ kbuf.buf_align = SZ_64K; /* largest supported page size */
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ vfree(headers);
+ goto out_err;
+ }
+ image->arch.elf_headers = headers;
+ image->arch.elf_headers_mem = kbuf.mem;
+ image->arch.elf_headers_sz = headers_sz;
+
+ pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->arch.elf_headers_mem, kbuf.bufsz, kbuf.memsz);
+ }
+
+ /* load initrd */
+ if (initrd) {
+ kbuf.buffer = initrd;
+ kbuf.bufsz = initrd_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = initrd_len;
+ kbuf.buf_align = 0;
+ /* within 1GB-aligned window of up to 32GB in size */
+ kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
+ + (unsigned long)SZ_1G * 32;
+ kbuf.top_down = false;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out_err;
+ initrd_load_addr = kbuf.mem;
+
+ pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ initrd_load_addr, kbuf.bufsz, kbuf.memsz);
+ }
+
+ /* load dtb */
+ ret = create_dtb(image, initrd_load_addr, initrd_len, cmdline, &dtb);
+ if (ret) {
+ pr_err("Preparing for new dtb failed\n");
+ goto out_err;
+ }
+
+ dtb_len = fdt_totalsize(dtb);
+ kbuf.buffer = dtb;
+ kbuf.bufsz = dtb_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = dtb_len;
+ /* not across 2MB boundary */
+ kbuf.buf_align = SZ_2M;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out_err;
+ image->arch.dtb = dtb;
+ image->arch.dtb_mem = kbuf.mem;
+
+ pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kbuf.mem, kbuf.bufsz, kbuf.memsz);
+
+ return 0;
+
+out_err:
+ image->nr_segments = orig_segments;
+ vfree(dtb);
+ return ret;
+}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
new file mode 100644
index 000000000..29569284f
--- /dev/null
+++ b/arch/arm64/kernel/module-plts.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/elf.h>
+#include <linux/ftrace.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/sort.h>
+
+static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
+ enum aarch64_insn_register reg)
+{
+ u32 adrp, add;
+
+ adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
+ add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_ADSB_ADD);
+
+ return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
+}
+
+struct plt_entry get_plt_entry(u64 dst, void *pc)
+{
+ struct plt_entry plt;
+ static u32 br;
+
+ if (!br)
+ br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
+ plt.br = cpu_to_le32(br);
+
+ return plt;
+}
+
+bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
+{
+ u64 p, q;
+
+ /*
+ * Check whether both entries refer to the same target:
+ * do the cheapest checks first.
+ * If the 'add' or 'br' opcodes are different, then the target
+ * cannot be the same.
+ */
+ if (a->add != b->add || a->br != b->br)
+ return false;
+
+ p = ALIGN_DOWN((u64)a, SZ_4K);
+ q = ALIGN_DOWN((u64)b, SZ_4K);
+
+ /*
+ * If the 'adrp' opcodes are the same then we just need to check
+ * that they refer to the same 4k region.
+ */
+ if (a->adrp == b->adrp && p == q)
+ return true;
+
+ return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
+ (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
+}
+
+static bool in_init(const struct module *mod, void *loc)
+{
+ return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
+}
+
+u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, const Elf64_Rela *rela,
+ Elf64_Sym *sym)
+{
+ struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+ &mod->arch.init;
+ struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
+ int i = pltsec->plt_num_entries;
+ int j = i - 1;
+ u64 val = sym->st_value + rela->r_addend;
+
+ if (is_forbidden_offset_for_adrp(&plt[i].adrp))
+ i++;
+
+ plt[i] = get_plt_entry(val, &plt[i]);
+
+ /*
+ * Check if the entry we just created is a duplicate. Given that the
+ * relocations are sorted, this will be the last entry we allocated.
+ * (if one exists).
+ */
+ if (j >= 0 && plt_entries_equal(plt + i, plt + j))
+ return (u64)&plt[j];
+
+ pltsec->plt_num_entries += i - j;
+ if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
+ return 0;
+
+ return (u64)&plt[i];
+}
+
+#ifdef CONFIG_ARM64_ERRATUM_843419
+u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, u64 val)
+{
+ struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+ &mod->arch.init;
+ struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
+ int i = pltsec->plt_num_entries++;
+ u32 br;
+ int rd;
+
+ if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
+ return 0;
+
+ if (is_forbidden_offset_for_adrp(&plt[i].adrp))
+ i = pltsec->plt_num_entries++;
+
+ /* get the destination register of the ADRP instruction */
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
+ le32_to_cpup((__le32 *)loc));
+
+ br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
+ plt[i].br = cpu_to_le32(br);
+
+ return (u64)&plt[i];
+}
+#endif
+
+#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rela(const void *a, const void *b)
+{
+ const Elf64_Rela *x = a, *y = b;
+ int i;
+
+ /* sort by type, symbol index and addend */
+ i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
+ if (i == 0)
+ i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
+ if (i == 0)
+ i = cmp_3way(x->r_addend, y->r_addend);
+ return i;
+}
+
+static bool duplicate_rel(const Elf64_Rela *rela, int num)
+{
+ /*
+ * Entries are sorted by type, symbol index and addend. That means
+ * that, if a duplicate entry exists, it must be in the preceding
+ * slot.
+ */
+ return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
+}
+
+static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
+ Elf64_Word dstidx, Elf_Shdr *dstsec)
+{
+ unsigned int ret = 0;
+ Elf64_Sym *s;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ u64 min_align;
+
+ switch (ELF64_R_TYPE(rela[i].r_info)) {
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ break;
+
+ /*
+ * We only have to consider branch targets that resolve
+ * to symbols that are defined in a different section.
+ * This is not simply a heuristic, it is a fundamental
+ * limitation, since there is no guaranteed way to emit
+ * PLT entries sufficiently close to the branch if the
+ * section size exceeds the range of a branch
+ * instruction. So ignore relocations against defined
+ * symbols if they live in the same section as the
+ * relocation target.
+ */
+ s = syms + ELF64_R_SYM(rela[i].r_info);
+ if (s->st_shndx == dstidx)
+ break;
+
+ /*
+ * Jump relocations with non-zero addends against
+ * undefined symbols are supported by the ELF spec, but
+ * do not occur in practice (e.g., 'jump n bytes past
+ * the entry point of undefined function symbol f').
+ * So we need to support them, but there is no need to
+ * take them into consideration when trying to optimize
+ * this code. So let's only check for duplicates when
+ * the addend is zero: this allows us to record the PLT
+ * entry address in the symbol table itself, rather than
+ * having to search the list for duplicates each time we
+ * emit one.
+ */
+ if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
+ ret++;
+ break;
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ case R_AARCH64_ADR_PREL_PG_HI21:
+ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
+ !cpus_have_const_cap(ARM64_WORKAROUND_843419))
+ break;
+
+ /*
+ * Determine the minimal safe alignment for this ADRP
+ * instruction: the section alignment at which it is
+ * guaranteed not to appear at a vulnerable offset.
+ *
+ * This comes down to finding the least significant zero
+ * bit in bits [11:3] of the section offset, and
+ * increasing the section's alignment so that the
+ * resulting address of this instruction is guaranteed
+ * to equal the offset in that particular bit (as well
+ * as all less signficant bits). This ensures that the
+ * address modulo 4 KB != 0xfff8 or 0xfffc (which would
+ * have all ones in bits [11:3])
+ */
+ min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
+
+ /*
+ * Allocate veneer space for each ADRP that may appear
+ * at a vulnerable offset nonetheless. At relocation
+ * time, some of these will remain unused since some
+ * ADRP instructions can be patched to ADR instructions
+ * instead.
+ */
+ if (min_align > SZ_4K)
+ ret++;
+ else
+ dstsec->sh_addralign = max(dstsec->sh_addralign,
+ min_align);
+ break;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_843419))
+ /*
+ * Add some slack so we can skip PLT slots that may trigger
+ * the erratum due to the placement of the ADRP instruction.
+ */
+ ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
+
+ return ret;
+}
+
+static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
+ Elf64_Word dstidx)
+{
+
+ Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
+
+ if (s->st_shndx == dstidx)
+ return false;
+
+ return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
+ ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
+}
+
+/* Group branch PLT relas at the front end of the array. */
+static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
+ int numrels, Elf64_Word dstidx)
+{
+ int i = 0, j = numrels - 1;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return 0;
+
+ while (i < j) {
+ if (branch_rela_needs_plt(syms, &rela[i], dstidx))
+ i++;
+ else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
+ swap(rela[i], rela[j]);
+ else
+ j--;
+ }
+
+ return i;
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned long core_plts = 0;
+ unsigned long init_plts = 0;
+ Elf64_Sym *syms = NULL;
+ Elf_Shdr *pltsec, *tramp = NULL;
+ int i;
+
+ /*
+ * Find the empty .plt section so we can expand it to store the PLT
+ * entries. Record the symtab address as well.
+ */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+ mod->arch.core.plt_shndx = i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
+ mod->arch.init.plt_shndx = i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name,
+ ".text.ftrace_trampoline"))
+ tramp = sechdrs + i;
+ else if (sechdrs[i].sh_type == SHT_SYMTAB)
+ syms = (Elf64_Sym *)sechdrs[i].sh_addr;
+ }
+
+ if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
+ pr_err("%s: module PLT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!syms) {
+ pr_err("%s: module symtab section missing\n", mod->name);
+ return -ENOEXEC;
+ }
+
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
+ int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
+ Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dstsec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ /*
+ * sort branch relocations requiring a PLT by type, symbol index
+ * and addend
+ */
+ nents = partition_branch_plt_relas(syms, rels, numrels,
+ sechdrs[i].sh_info);
+ if (nents)
+ sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
+
+ if (!module_init_layout_section(secstrings + dstsec->sh_name))
+ core_plts += count_plts(syms, rels, numrels,
+ sechdrs[i].sh_info, dstsec);
+ else
+ init_plts += count_plts(syms, rels, numrels,
+ sechdrs[i].sh_info, dstsec);
+ }
+
+ pltsec = sechdrs + mod->arch.core.plt_shndx;
+ pltsec->sh_type = SHT_NOBITS;
+ pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ pltsec->sh_addralign = L1_CACHE_BYTES;
+ pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.core.plt_num_entries = 0;
+ mod->arch.core.plt_max_entries = core_plts;
+
+ pltsec = sechdrs + mod->arch.init.plt_shndx;
+ pltsec->sh_type = SHT_NOBITS;
+ pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ pltsec->sh_addralign = L1_CACHE_BYTES;
+ pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.init.plt_num_entries = 0;
+ mod->arch.init.plt_max_entries = init_plts;
+
+ if (tramp) {
+ tramp->sh_type = SHT_NOBITS;
+ tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ tramp->sh_addralign = __alignof__(struct plt_entry);
+ tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
+ }
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
new file mode 100644
index 000000000..2a1ad95d9
--- /dev/null
+++ b/arch/arm64/kernel/module.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AArch64 loadable module support.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/elf.h>
+#include <linux/ftrace.h>
+#include <linux/gfp.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleloader.h>
+#include <linux/vmalloc.h>
+#include <asm/alternative.h>
+#include <asm/insn.h>
+#include <asm/sections.h>
+
+void *module_alloc(unsigned long size)
+{
+ u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
+ gfp_t gfp_mask = GFP_KERNEL;
+ void *p;
+
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
+ if (IS_ENABLED(CONFIG_KASAN))
+ /* don't exceed the static module region - see below */
+ module_alloc_end = MODULES_END;
+
+ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
+ module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
+ NUMA_NO_NODE, __builtin_return_address(0));
+
+ if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+ !IS_ENABLED(CONFIG_KASAN))
+ /*
+ * KASAN can only deal with module allocations being served
+ * from the reserved module region, since the remainder of
+ * the vmalloc region is already backed by zero shadow pages,
+ * and punching holes into it is non-trivial. Since the module
+ * region is not randomized when KASAN is enabled, it is even
+ * less likely that the module region gets exhausted, so we
+ * can simply omit this fallback in that case.
+ */
+ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
+ module_alloc_base + SZ_2G, GFP_KERNEL,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+
+ if (p && (kasan_module_alloc(p, size) < 0)) {
+ vfree(p);
+ return NULL;
+ }
+
+ return p;
+}
+
+enum aarch64_reloc_op {
+ RELOC_OP_NONE,
+ RELOC_OP_ABS,
+ RELOC_OP_PREL,
+ RELOC_OP_PAGE,
+};
+
+static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
+{
+ switch (reloc_op) {
+ case RELOC_OP_ABS:
+ return val;
+ case RELOC_OP_PREL:
+ return val - (u64)place;
+ case RELOC_OP_PAGE:
+ return (val & ~0xfff) - ((u64)place & ~0xfff);
+ case RELOC_OP_NONE:
+ return 0;
+ }
+
+ pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
+ return 0;
+}
+
+static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
+{
+ s64 sval = do_reloc(op, place, val);
+
+ /*
+ * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
+ * relative and absolute relocations as having a range of [-2^15, 2^16)
+ * or [-2^31, 2^32), respectively. However, in order to be able to
+ * detect overflows reliably, we have to choose whether we interpret
+ * such quantities as signed or as unsigned, and stick with it.
+ * The way we organize our address space requires a signed
+ * interpretation of 32-bit relative references, so let's use that
+ * for all R_AARCH64_PRELxx relocations. This means our upper
+ * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
+ */
+
+ switch (len) {
+ case 16:
+ *(s16 *)place = sval;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U16_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S16_MIN || sval > S16_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 16-bit data relocation (%d)\n", op);
+ return 0;
+ }
+ break;
+ case 32:
+ *(s32 *)place = sval;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U32_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S32_MIN || sval > S32_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 32-bit data relocation (%d)\n", op);
+ return 0;
+ }
+ break;
+ case 64:
+ *(s64 *)place = sval;
+ break;
+ default:
+ pr_err("Invalid length (%d) for data relocation\n", len);
+ return 0;
+ }
+ return 0;
+}
+
+enum aarch64_insn_movw_imm_type {
+ AARCH64_INSN_IMM_MOVNZ,
+ AARCH64_INSN_IMM_MOVKZ,
+};
+
+static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
+ int lsb, enum aarch64_insn_movw_imm_type imm_type)
+{
+ u64 imm;
+ s64 sval;
+ u32 insn = le32_to_cpu(*place);
+
+ sval = do_reloc(op, place, val);
+ imm = sval >> lsb;
+
+ if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
+ /*
+ * For signed MOVW relocations, we have to manipulate the
+ * instruction encoding depending on whether or not the
+ * immediate is less than zero.
+ */
+ insn &= ~(3 << 29);
+ if (sval >= 0) {
+ /* >=0: Set the instruction to MOVZ (opcode 10b). */
+ insn |= 2 << 29;
+ } else {
+ /*
+ * <0: Set the instruction to MOVN (opcode 00b).
+ * Since we've masked the opcode already, we
+ * don't need to do anything other than
+ * inverting the new immediate field.
+ */
+ imm = ~imm;
+ }
+ }
+
+ /* Update the instruction with the new encoding. */
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+ *place = cpu_to_le32(insn);
+
+ if (imm > U16_MAX)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
+ int lsb, int len, enum aarch64_insn_imm_type imm_type)
+{
+ u64 imm, imm_mask;
+ s64 sval;
+ u32 insn = le32_to_cpu(*place);
+
+ /* Calculate the relocation value. */
+ sval = do_reloc(op, place, val);
+ sval >>= lsb;
+
+ /* Extract the value bits and shift them to bit 0. */
+ imm_mask = (BIT(lsb + len) - 1) >> lsb;
+ imm = sval & imm_mask;
+
+ /* Update the instruction's immediate field. */
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *place = cpu_to_le32(insn);
+
+ /*
+ * Extract the upper value bits (including the sign bit) and
+ * shift them to bit 0.
+ */
+ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
+
+ /*
+ * Overflow has occurred if the upper bits are not all equal to
+ * the sign bit of the value.
+ */
+ if ((u64)(sval + 1) >= 2)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
+ __le32 *place, u64 val)
+{
+ u32 insn;
+
+ if (!is_forbidden_offset_for_adrp(place))
+ return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
+ AARCH64_INSN_IMM_ADR);
+
+ /* patch ADRP to ADR if it is in range */
+ if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
+ AARCH64_INSN_IMM_ADR)) {
+ insn = le32_to_cpu(*place);
+ insn &= ~BIT(31);
+ } else {
+ /* out of range for ADR -> emit a veneer */
+ val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
+ if (!val)
+ return -ENOEXEC;
+ insn = aarch64_insn_gen_branch_imm((u64)place, val,
+ AARCH64_INSN_BRANCH_NOLINK);
+ }
+
+ *place = cpu_to_le32(insn);
+ return 0;
+}
+
+int apply_relocate_add(Elf64_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ int ovf;
+ bool overflow_check;
+ Elf64_Sym *sym;
+ void *loc;
+ u64 val;
+ Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* loc corresponds to P in the AArch64 ELF document. */
+ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+
+ /* sym is the ELF symbol we're referring to. */
+ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ + ELF64_R_SYM(rel[i].r_info);
+
+ /* val corresponds to (S + A) in the AArch64 ELF document. */
+ val = sym->st_value + rel[i].r_addend;
+
+ /* Check for overflow by default. */
+ overflow_check = true;
+
+ /* Perform the static relocation. */
+ switch (ELF64_R_TYPE(rel[i].r_info)) {
+ /* Null relocations. */
+ case R_ARM_NONE:
+ case R_AARCH64_NONE:
+ ovf = 0;
+ break;
+
+ /* Data relocations. */
+ case R_AARCH64_ABS64:
+ overflow_check = false;
+ ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
+ break;
+ case R_AARCH64_ABS32:
+ ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
+ break;
+ case R_AARCH64_ABS16:
+ ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
+ break;
+ case R_AARCH64_PREL64:
+ overflow_check = false;
+ ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
+ break;
+ case R_AARCH64_PREL32:
+ ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
+ break;
+ case R_AARCH64_PREL16:
+ ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
+ break;
+
+ /* MOVW instruction relocations. */
+ case R_AARCH64_MOVW_UABS_G0_NC:
+ overflow_check = false;
+ fallthrough;
+ case R_AARCH64_MOVW_UABS_G0:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+ case R_AARCH64_MOVW_UABS_G1_NC:
+ overflow_check = false;
+ fallthrough;
+ case R_AARCH64_MOVW_UABS_G1:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+ case R_AARCH64_MOVW_UABS_G2_NC:
+ overflow_check = false;
+ fallthrough;
+ case R_AARCH64_MOVW_UABS_G2:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+ case R_AARCH64_MOVW_UABS_G3:
+ /* We're using the top bits so we can't overflow. */
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+ case R_AARCH64_MOVW_SABS_G0:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+ case R_AARCH64_MOVW_SABS_G1:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+ case R_AARCH64_MOVW_SABS_G2:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+ case R_AARCH64_MOVW_PREL_G0_NC:
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+ case R_AARCH64_MOVW_PREL_G0:
+ ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+ case R_AARCH64_MOVW_PREL_G1_NC:
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+ case R_AARCH64_MOVW_PREL_G1:
+ ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+ case R_AARCH64_MOVW_PREL_G2_NC:
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+ case R_AARCH64_MOVW_PREL_G2:
+ ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+ case R_AARCH64_MOVW_PREL_G3:
+ /* We're using the top bits so we can't overflow. */
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ /* Immediate instruction relocations. */
+ case R_AARCH64_LD_PREL_LO19:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
+ AARCH64_INSN_IMM_19);
+ break;
+ case R_AARCH64_ADR_PREL_LO21:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
+ AARCH64_INSN_IMM_ADR);
+ break;
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ overflow_check = false;
+ fallthrough;
+ case R_AARCH64_ADR_PREL_PG_HI21:
+ ovf = reloc_insn_adrp(me, sechdrs, loc, val);
+ if (ovf && ovf != -ERANGE)
+ return ovf;
+ break;
+ case R_AARCH64_ADD_ABS_LO12_NC:
+ case R_AARCH64_LDST8_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
+ AARCH64_INSN_IMM_12);
+ break;
+ case R_AARCH64_LDST16_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
+ AARCH64_INSN_IMM_12);
+ break;
+ case R_AARCH64_LDST32_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
+ AARCH64_INSN_IMM_12);
+ break;
+ case R_AARCH64_LDST64_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
+ AARCH64_INSN_IMM_12);
+ break;
+ case R_AARCH64_LDST128_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
+ AARCH64_INSN_IMM_12);
+ break;
+ case R_AARCH64_TSTBR14:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
+ AARCH64_INSN_IMM_14);
+ break;
+ case R_AARCH64_CONDBR19:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
+ AARCH64_INSN_IMM_19);
+ break;
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
+ AARCH64_INSN_IMM_26);
+
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+ ovf == -ERANGE) {
+ val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
+ if (!val)
+ return -ENOEXEC;
+ ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
+ 26, AARCH64_INSN_IMM_26);
+ }
+ break;
+
+ default:
+ pr_err("module %s: unsupported RELA relocation: %llu\n",
+ me->name, ELF64_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+
+ if (overflow_check && ovf == -ERANGE)
+ goto overflow;
+
+ }
+
+ return 0;
+
+overflow:
+ pr_err("module %s: overflow in relocation type %d val %Lx\n",
+ me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
+ return -ENOEXEC;
+}
+
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+
+ return NULL;
+}
+
+static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
+{
+ *plt = get_plt_entry(addr, plt);
+}
+
+static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
+ const Elf_Shdr *s;
+ struct plt_entry *plts;
+
+ s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
+ if (!s)
+ return -ENOEXEC;
+
+ plts = (void *)s->sh_addr;
+
+ __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
+
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
+
+ mod->arch.ftrace_trampolines = plts;
+#endif
+ return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s;
+ s = find_section(hdr, sechdrs, ".altinstructions");
+ if (s)
+ apply_alternatives_module((void *)s->sh_addr, s->sh_size);
+
+ return module_init_ftrace_plt(hdr, sechdrs, me);
+}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
new file mode 100644
index 000000000..4a069f85b
--- /dev/null
+++ b/arch/arm64/kernel/mte.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/prctl.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/string.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/thread_info.h>
+#include <linux/uio.h>
+
+#include <asm/cpufeature.h>
+#include <asm/mte.h>
+#include <asm/ptrace.h>
+#include <asm/sysreg.h>
+
+static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
+{
+ pte_t old_pte = READ_ONCE(*ptep);
+
+ if (check_swap && is_swap_pte(old_pte)) {
+ swp_entry_t entry = pte_to_swp_entry(old_pte);
+
+ if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
+ return;
+ }
+
+ mte_clear_page_tags(page_address(page));
+}
+
+void mte_sync_tags(pte_t *ptep, pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ long i, nr_pages = compound_nr(page);
+ bool check_swap = nr_pages == 1;
+
+ /* if PG_mte_tagged is set, tags have already been initialised */
+ for (i = 0; i < nr_pages; i++, page++) {
+ if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ mte_sync_page_tags(page, ptep, check_swap);
+ }
+
+ /* ensure the tags are visible before the PTE is set */
+ smp_wmb();
+}
+
+int memcmp_pages(struct page *page1, struct page *page2)
+{
+ char *addr1, *addr2;
+ int ret;
+
+ addr1 = page_address(page1);
+ addr2 = page_address(page2);
+ ret = memcmp(addr1, addr2, PAGE_SIZE);
+
+ if (!system_supports_mte() || ret)
+ return ret;
+
+ /*
+ * If the page content is identical but at least one of the pages is
+ * tagged, return non-zero to avoid KSM merging. If only one of the
+ * pages is tagged, set_pte_at() may zero or change the tags of the
+ * other page via mte_sync_tags().
+ */
+ if (test_bit(PG_mte_tagged, &page1->flags) ||
+ test_bit(PG_mte_tagged, &page2->flags))
+ return addr1 != addr2;
+
+ return ret;
+}
+
+static void update_sctlr_el1_tcf0(u64 tcf0)
+{
+ /* ISB required for the kernel uaccess routines */
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
+ isb();
+}
+
+static void set_sctlr_el1_tcf0(u64 tcf0)
+{
+ /*
+ * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
+ * optimisation. Disable preemption so that it does not see
+ * the variable update before the SCTLR_EL1.TCF0 one.
+ */
+ preempt_disable();
+ current->thread.sctlr_tcf0 = tcf0;
+ update_sctlr_el1_tcf0(tcf0);
+ preempt_enable();
+}
+
+static void update_gcr_el1_excl(u64 incl)
+{
+ u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
+
+ /*
+ * Note that 'incl' is an include mask (controlled by the user via
+ * prctl()) while GCR_EL1 accepts an exclude mask.
+ * No need for ISB since this only affects EL0 currently, implicit
+ * with ERET.
+ */
+ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
+}
+
+static void set_gcr_el1_excl(u64 incl)
+{
+ current->thread.gcr_user_incl = incl;
+ update_gcr_el1_excl(incl);
+}
+
+void flush_mte_state(void)
+{
+ if (!system_supports_mte())
+ return;
+
+ /* clear any pending asynchronous tag fault */
+ dsb(ish);
+ write_sysreg_s(0, SYS_TFSRE0_EL1);
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ /* disable tag checking */
+ set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+ /* reset tag generation mask */
+ set_gcr_el1_excl(0);
+}
+
+void mte_thread_switch(struct task_struct *next)
+{
+ if (!system_supports_mte())
+ return;
+
+ /* avoid expensive SCTLR_EL1 accesses if no change */
+ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
+ update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
+ update_gcr_el1_excl(next->thread.gcr_user_incl);
+}
+
+void mte_suspend_exit(void)
+{
+ if (!system_supports_mte())
+ return;
+
+ update_gcr_el1_excl(current->thread.gcr_user_incl);
+}
+
+long set_mte_ctrl(struct task_struct *task, unsigned long arg)
+{
+ u64 tcf0;
+ u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
+
+ if (!system_supports_mte())
+ return 0;
+
+ switch (arg & PR_MTE_TCF_MASK) {
+ case PR_MTE_TCF_NONE:
+ tcf0 = SCTLR_EL1_TCF0_NONE;
+ break;
+ case PR_MTE_TCF_SYNC:
+ tcf0 = SCTLR_EL1_TCF0_SYNC;
+ break;
+ case PR_MTE_TCF_ASYNC:
+ tcf0 = SCTLR_EL1_TCF0_ASYNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (task != current) {
+ task->thread.sctlr_tcf0 = tcf0;
+ task->thread.gcr_user_incl = gcr_incl;
+ } else {
+ set_sctlr_el1_tcf0(tcf0);
+ set_gcr_el1_excl(gcr_incl);
+ }
+
+ return 0;
+}
+
+long get_mte_ctrl(struct task_struct *task)
+{
+ unsigned long ret;
+
+ if (!system_supports_mte())
+ return 0;
+
+ ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
+
+ switch (task->thread.sctlr_tcf0) {
+ case SCTLR_EL1_TCF0_NONE:
+ ret |= PR_MTE_TCF_NONE;
+ break;
+ case SCTLR_EL1_TCF0_SYNC:
+ ret |= PR_MTE_TCF_SYNC;
+ break;
+ case SCTLR_EL1_TCF0_ASYNC:
+ ret |= PR_MTE_TCF_ASYNC;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Access MTE tags in another process' address space as given in mm. Update
+ * the number of tags copied. Return 0 if any tags copied, error otherwise.
+ * Inspired by __access_remote_vm().
+ */
+static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
+ struct iovec *kiov, unsigned int gup_flags)
+{
+ struct vm_area_struct *vma;
+ void __user *buf = kiov->iov_base;
+ size_t len = kiov->iov_len;
+ int ret;
+ int write = gup_flags & FOLL_WRITE;
+
+ if (!access_ok(buf, len))
+ return -EFAULT;
+
+ if (mmap_read_lock_killable(mm))
+ return -EIO;
+
+ while (len) {
+ unsigned long tags, offset;
+ void *maddr;
+ struct page *page = NULL;
+
+ ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
+ &vma, NULL);
+ if (ret <= 0)
+ break;
+
+ /*
+ * Only copy tags if the page has been mapped as PROT_MTE
+ * (PG_mte_tagged set). Otherwise the tags are not valid and
+ * not accessible to user. Moreover, an mprotect(PROT_MTE)
+ * would cause the existing tags to be cleared if the page
+ * was never mapped with PROT_MTE.
+ */
+ if (!(vma->vm_flags & VM_MTE)) {
+ ret = -EOPNOTSUPP;
+ put_page(page);
+ break;
+ }
+ WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
+
+ /* limit access to the end of the page */
+ offset = offset_in_page(addr);
+ tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
+
+ maddr = page_address(page);
+ if (write) {
+ tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
+ set_page_dirty_lock(page);
+ } else {
+ tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
+ }
+ put_page(page);
+
+ /* error accessing the tracer's buffer */
+ if (!tags)
+ break;
+
+ len -= tags;
+ buf += tags;
+ addr += tags * MTE_GRANULE_SIZE;
+ }
+ mmap_read_unlock(mm);
+
+ /* return an error if no tags copied */
+ kiov->iov_len = buf - kiov->iov_base;
+ if (!kiov->iov_len) {
+ /* check for error accessing the tracee's address space */
+ if (ret <= 0)
+ return -EIO;
+ else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Copy MTE tags in another process' address space at 'addr' to/from tracer's
+ * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
+ */
+static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
+ struct iovec *kiov, unsigned int gup_flags)
+{
+ struct mm_struct *mm;
+ int ret;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return -EPERM;
+
+ if (!tsk->ptrace || (current != tsk->parent) ||
+ ((get_dumpable(mm) != SUID_DUMP_USER) &&
+ !ptracer_capable(tsk, mm->user_ns))) {
+ mmput(mm);
+ return -EPERM;
+ }
+
+ ret = __access_remote_tags(mm, addr, kiov, gup_flags);
+ mmput(mm);
+
+ return ret;
+}
+
+int mte_ptrace_copy_tags(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ struct iovec kiov;
+ struct iovec __user *uiov = (void __user *)data;
+ unsigned int gup_flags = FOLL_FORCE;
+
+ if (!system_supports_mte())
+ return -EIO;
+
+ if (get_user(kiov.iov_base, &uiov->iov_base) ||
+ get_user(kiov.iov_len, &uiov->iov_len))
+ return -EFAULT;
+
+ if (request == PTRACE_POKEMTETAGS)
+ gup_flags |= FOLL_WRITE;
+
+ /* align addr to the MTE tag granule */
+ addr &= MTE_GRANULE_MASK;
+
+ ret = access_remote_tags(child, addr, &kiov, gup_flags);
+ if (!ret)
+ ret = put_user(kiov.iov_len, &uiov->iov_len);
+
+ return ret;
+}
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
new file mode 100644
index 000000000..69ec670bc
--- /dev/null
+++ b/arch/arm64/kernel/paravirt.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2013 Citrix Systems
+ *
+ * Author: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+ */
+
+#define pr_fmt(fmt) "arm-pv: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/paravirt.h>
+#include <asm/pvclock-abi.h>
+#include <asm/smp_plat.h>
+
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
+
+struct paravirt_patch_template pv_ops;
+EXPORT_SYMBOL_GPL(pv_ops);
+
+struct pv_time_stolen_time_region {
+ struct pvclock_vcpu_stolen_time __rcu *kaddr;
+};
+
+static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
+
+static bool steal_acc = true;
+static int __init parse_no_stealacc(char *arg)
+{
+ steal_acc = false;
+ return 0;
+}
+
+early_param("no-steal-acc", parse_no_stealacc);
+
+/* return stolen time in ns by asking the hypervisor */
+static u64 pv_steal_clock(int cpu)
+{
+ struct pvclock_vcpu_stolen_time *kaddr = NULL;
+ struct pv_time_stolen_time_region *reg;
+ u64 ret = 0;
+
+ reg = per_cpu_ptr(&stolen_time_region, cpu);
+
+ /*
+ * paravirt_steal_clock() may be called before the CPU
+ * online notification callback runs. Until the callback
+ * has run we just return zero.
+ */
+ rcu_read_lock();
+ kaddr = rcu_dereference(reg->kaddr);
+ if (!kaddr) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
+ rcu_read_unlock();
+ return ret;
+}
+
+static int stolen_time_cpu_down_prepare(unsigned int cpu)
+{
+ struct pvclock_vcpu_stolen_time *kaddr = NULL;
+ struct pv_time_stolen_time_region *reg;
+
+ reg = this_cpu_ptr(&stolen_time_region);
+ if (!reg->kaddr)
+ return 0;
+
+ kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
+ synchronize_rcu();
+ memunmap(kaddr);
+
+ return 0;
+}
+
+static int stolen_time_cpu_online(unsigned int cpu)
+{
+ struct pvclock_vcpu_stolen_time *kaddr = NULL;
+ struct pv_time_stolen_time_region *reg;
+ struct arm_smccc_res res;
+
+ reg = this_cpu_ptr(&stolen_time_region);
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_ST, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return -EINVAL;
+
+ kaddr = memremap(res.a0,
+ sizeof(struct pvclock_vcpu_stolen_time),
+ MEMREMAP_WB);
+
+ rcu_assign_pointer(reg->kaddr, kaddr);
+
+ if (!reg->kaddr) {
+ pr_warn("Failed to map stolen time data structure\n");
+ return -ENOMEM;
+ }
+
+ if (le32_to_cpu(kaddr->revision) != 0 ||
+ le32_to_cpu(kaddr->attributes) != 0) {
+ pr_warn_once("Unexpected revision or attributes in stolen time data\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int __init pv_time_init_stolen_time(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "hypervisor/arm/pvtime:online",
+ stolen_time_cpu_online,
+ stolen_time_cpu_down_prepare);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static bool __init has_pv_steal_clock(void)
+{
+ struct arm_smccc_res res;
+
+ /* To detect the presence of PV time support we require SMCCC 1.1+ */
+ if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE)
+ return false;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_HV_PV_TIME_FEATURES, &res);
+
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return false;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_FEATURES,
+ ARM_SMCCC_HV_PV_TIME_ST, &res);
+
+ return (res.a0 == SMCCC_RET_SUCCESS);
+}
+
+int __init pv_time_init(void)
+{
+ int ret;
+
+ if (!has_pv_steal_clock())
+ return 0;
+
+ ret = pv_time_init_stolen_time();
+ if (ret)
+ return ret;
+
+ pv_ops.time.steal_clock = pv_steal_clock;
+
+ static_key_slow_inc(&paravirt_steal_enabled);
+ if (steal_acc)
+ static_key_slow_inc(&paravirt_steal_rq_enabled);
+
+ pr_info("using stolen time PV\n");
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
new file mode 100644
index 000000000..1006ed2d7
--- /dev/null
+++ b/arch/arm64/kernel/pci.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Code borrowed from powerpc/kernel/pci-common.c
+ *
+ * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2014 ARM Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_ACPI
+/*
+ * Try to assign the IRQ number when probing a new device
+ */
+int pcibios_alloc_irq(struct pci_dev *dev)
+{
+ if (!acpi_disabled)
+ acpi_pci_irq_enable(dev);
+
+ return 0;
+}
+#endif
+
+/*
+ * raw_pci_read/write - Platform-specific PCI config space access.
+ */
+int raw_pci_read(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *val)
+{
+ struct pci_bus *b = pci_find_bus(domain, bus);
+
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->read(b, devfn, reg, len, val);
+}
+
+int raw_pci_write(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 val)
+{
+ struct pci_bus *b = pci_find_bus(domain, bus);
+
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->write(b, devfn, reg, len, val);
+}
+
+#ifdef CONFIG_NUMA
+
+int pcibus_to_node(struct pci_bus *bus)
+{
+ return dev_to_node(&bus->dev);
+}
+EXPORT_SYMBOL(pcibus_to_node);
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+struct acpi_pci_generic_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_config_window *cfg; /* config space mapping */
+};
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+
+ return root->segment;
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ if (!acpi_disabled) {
+ struct pci_config_window *cfg = bridge->bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct device *bus_dev = &bridge->bus->dev;
+
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
+ }
+
+ return 0;
+}
+
+static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
+{
+ struct resource_entry *entry, *tmp;
+ int status;
+
+ status = acpi_pci_probe_root_resources(ci);
+ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
+ if (!(entry->res->flags & IORESOURCE_WINDOW))
+ resource_list_destroy_entry(entry);
+ }
+ return status;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+ struct device *dev = &root->device->dev;
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
+ const struct pci_ecam_ops *ecam_ops;
+ struct resource cfgres;
+ struct acpi_device *adev;
+ struct pci_config_window *cfg;
+ int ret;
+
+ ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
+ if (ret) {
+ dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
+ return NULL;
+ }
+
+ adev = acpi_resource_consumer(&cfgres);
+ if (adev)
+ dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
+ dev_name(&adev->dev));
+ else
+ dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
+ &cfgres);
+
+ cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
+ if (IS_ERR(cfg)) {
+ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
+ PTR_ERR(cfg));
+ return NULL;
+ }
+
+ return cfg;
+}
+
+/* release_info: free resources allocated by init_info */
+static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
+{
+ struct acpi_pci_generic_root_info *ri;
+
+ ri = container_of(ci, struct acpi_pci_generic_root_info, common);
+ pci_ecam_free(ri->cfg);
+ kfree(ci->ops);
+ kfree(ri);
+}
+
+/* Interface called from ACPI code to setup PCI host controller */
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ struct acpi_pci_generic_root_info *ri;
+ struct pci_bus *bus, *child;
+ struct acpi_pci_root_ops *root_ops;
+ struct pci_host_bridge *host;
+
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
+ if (!ri)
+ return NULL;
+
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
+ if (!root_ops) {
+ kfree(ri);
+ return NULL;
+ }
+
+ ri->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!ri->cfg) {
+ kfree(ri);
+ kfree(root_ops);
+ return NULL;
+ }
+
+ root_ops->release_info = pci_acpi_generic_release_info;
+ root_ops->prepare_resources = pci_acpi_root_prepare_resources;
+ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
+ bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
+ if (!bus)
+ return NULL;
+
+ /* If we must preserve the resource configuration, claim now */
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ pci_bus_claim_resources(bus);
+
+ /*
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
+ */
+ pci_assign_unassigned_root_bus_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ return bus;
+}
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
+#endif
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
new file mode 100644
index 000000000..58ae55d78
--- /dev/null
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arm64 callchain support
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/pointer_auth.h>
+#include <asm/stacktrace.h>
+
+struct frame_tail {
+ struct frame_tail __user *fp;
+ unsigned long lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+ struct perf_callchain_entry_ctx *entry)
+{
+ struct frame_tail buftail;
+ unsigned long err;
+ unsigned long lr;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ lr = ptrauth_strip_insn_pac(buftail.lr);
+
+ perf_callchain_store(entry, lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail >= buftail.fp)
+ return NULL;
+
+ return buftail.fp;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
+ u32 sp;
+ u32 lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+ struct perf_callchain_entry_ctx *entry)
+{
+ struct compat_frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= (struct compat_frame_tail __user *)
+ compat_ptr(buftail.fp))
+ return NULL;
+
+ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+
+ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->pc);
+
+ if (!compat_user_mode(regs)) {
+ /* AARCH64 mode */
+ struct frame_tail __user *tail;
+
+ tail = (struct frame_tail __user *)regs->regs[29];
+
+ while (entry->nr < entry->max_stack &&
+ tail && !((unsigned long)tail & 0xf))
+ tail = user_backtrace(tail, entry);
+ } else {
+#ifdef CONFIG_COMPAT
+ /* AARCH32 compat mode */
+ struct compat_frame_tail __user *tail;
+
+ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+ while ((entry->nr < entry->max_stack) &&
+ tail && !((unsigned long)tail & 0x3))
+ tail = compat_user_backtrace(tail, entry);
+#endif
+ }
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static bool callchain_trace(void *data, unsigned long pc)
+{
+ struct perf_callchain_entry_ctx *entry = data;
+ perf_callchain_store(entry, pc);
+ return true;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct stackframe frame;
+
+ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+ walk_stackframe(current, &frame, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+
+ if (guest_cbs && guest_cbs->is_in_guest())
+ return guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ int misc = 0;
+
+ if (guest_cbs && guest_cbs->is_in_guest()) {
+ if (guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ return misc;
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
new file mode 100644
index 000000000..cdb3d4549
--- /dev/null
+++ b/arch/arm64/kernel/perf_event.c
@@ -0,0 +1,1311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARMv8 PMUv3 Performance Events handling code.
+ *
+ * Copyright (C) 2012 ARM Limited
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based heavily on the ARMv7 perf event code.
+ */
+
+#include <asm/irq_regs.h>
+#include <asm/perf_event.h>
+#include <asm/sysreg.h>
+#include <asm/virt.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#include <linux/acpi.h>
+#include <linux/clocksource.h>
+#include <linux/kvm_host.h>
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/sched_clock.h>
+#include <linux/smp.h>
+
+/* ARMv8 Cortex-A53 specific event types. */
+#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
+
+/* ARMv8 Cavium ThunderX specific event types. */
+#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
+#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
+#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
+#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
+#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
+
+/*
+ * ARMv8 Architectural defined events, not all of these may
+ * be supported on any given implementation. Unsupported events will
+ * be disabled at run-time based on the PMCEID registers.
+ */
+static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
+};
+
+static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
+ [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
+ [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
+
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+};
+
+static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
+
+ [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+ [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
+};
+
+static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
+
+ [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+ [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
+};
+
+static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+};
+
+static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
+ [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
+ [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
+
+ [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
+ [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
+};
+
+static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
+
+ [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+ [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
+};
+
+static ssize_t
+armv8pmu_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+}
+
+#define ARMV8_EVENT_ATTR(name, config) \
+ (&((struct perf_pmu_events_attr) { \
+ .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
+ .id = config, \
+ }).attr.attr)
+
+static struct attribute *armv8_pmuv3_event_attrs[] = {
+ ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
+ ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
+ ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
+ ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
+ ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
+ ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
+ ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
+ ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
+ ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
+ ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
+ ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
+ ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
+ ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
+ ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
+ ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
+ ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
+ ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
+ ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
+ ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
+ ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
+ ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
+ ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
+ ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
+ ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
+ ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
+ ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
+ ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
+ /* Don't expose the chain event in /sys, since it's useless in isolation */
+ ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
+ ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
+ ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
+ ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
+ ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
+ ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
+ ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
+ ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
+ ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
+ ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
+ ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
+ ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
+ ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
+ ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
+ ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
+ ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
+ ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
+ ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
+ ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
+ ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
+ ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
+ ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
+ ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
+ ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
+ ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
+ ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
+ ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
+ ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
+ ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
+ ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
+ ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
+ ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
+ ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
+ ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
+ ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
+ ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
+ ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
+ ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
+ ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
+ ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
+ ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
+ ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
+ ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
+ ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
+ ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
+ ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
+ ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
+ NULL,
+};
+
+static umode_t
+armv8pmu_event_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
+
+ if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
+ test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
+ return attr->mode;
+
+ if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
+ u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
+
+ if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
+ test_bit(id, cpu_pmu->pmceid_ext_bitmap))
+ return attr->mode;
+ }
+
+ return 0;
+}
+
+static struct attribute_group armv8_pmuv3_events_attr_group = {
+ .name = "events",
+ .attrs = armv8_pmuv3_event_attrs,
+ .is_visible = armv8pmu_event_attr_is_visible,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-15");
+PMU_FORMAT_ATTR(long, "config1:0");
+
+static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
+{
+ return event->attr.config1 & 0x1;
+}
+
+static struct attribute *armv8_pmuv3_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_long.attr,
+ NULL,
+};
+
+static struct attribute_group armv8_pmuv3_format_attr_group = {
+ .name = "format",
+ .attrs = armv8_pmuv3_format_attrs,
+};
+
+static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
+
+ return sysfs_emit(page, "0x%08x\n", slots);
+}
+
+static DEVICE_ATTR_RO(slots);
+
+static struct attribute *armv8_pmuv3_caps_attrs[] = {
+ &dev_attr_slots.attr,
+ NULL,
+};
+
+static struct attribute_group armv8_pmuv3_caps_attr_group = {
+ .name = "caps",
+ .attrs = armv8_pmuv3_caps_attrs,
+};
+
+/*
+ * Perf Events' indices
+ */
+#define ARMV8_IDX_CYCLE_COUNTER 0
+#define ARMV8_IDX_COUNTER0 1
+
+
+/*
+ * We unconditionally enable ARMv8.5-PMU long event counter support
+ * (64-bit events) where supported. Indicate if this arm_pmu has long
+ * event counter support.
+ */
+static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
+{
+ return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
+}
+
+/*
+ * We must chain two programmable counters for 64 bit events,
+ * except when we have allocated the 64bit cycle counter (for CPU
+ * cycles event). This must be called only when the event has
+ * a counter allocated.
+ */
+static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+
+ return !WARN_ON(idx < 0) &&
+ armv8pmu_event_is_64bit(event) &&
+ !armv8pmu_has_long_event(cpu_pmu) &&
+ (idx != ARMV8_IDX_CYCLE_COUNTER);
+}
+
+/*
+ * ARMv8 low level PMU access
+ */
+
+/*
+ * Perf Event to low level counters mapping
+ */
+#define ARMV8_IDX_TO_COUNTER(x) \
+ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
+
+/*
+ * This code is really good
+ */
+
+#define PMEVN_CASE(n, case_macro) \
+ case n: case_macro(n); break
+
+#define PMEVN_SWITCH(x, case_macro) \
+ do { \
+ switch (x) { \
+ PMEVN_CASE(0, case_macro); \
+ PMEVN_CASE(1, case_macro); \
+ PMEVN_CASE(2, case_macro); \
+ PMEVN_CASE(3, case_macro); \
+ PMEVN_CASE(4, case_macro); \
+ PMEVN_CASE(5, case_macro); \
+ PMEVN_CASE(6, case_macro); \
+ PMEVN_CASE(7, case_macro); \
+ PMEVN_CASE(8, case_macro); \
+ PMEVN_CASE(9, case_macro); \
+ PMEVN_CASE(10, case_macro); \
+ PMEVN_CASE(11, case_macro); \
+ PMEVN_CASE(12, case_macro); \
+ PMEVN_CASE(13, case_macro); \
+ PMEVN_CASE(14, case_macro); \
+ PMEVN_CASE(15, case_macro); \
+ PMEVN_CASE(16, case_macro); \
+ PMEVN_CASE(17, case_macro); \
+ PMEVN_CASE(18, case_macro); \
+ PMEVN_CASE(19, case_macro); \
+ PMEVN_CASE(20, case_macro); \
+ PMEVN_CASE(21, case_macro); \
+ PMEVN_CASE(22, case_macro); \
+ PMEVN_CASE(23, case_macro); \
+ PMEVN_CASE(24, case_macro); \
+ PMEVN_CASE(25, case_macro); \
+ PMEVN_CASE(26, case_macro); \
+ PMEVN_CASE(27, case_macro); \
+ PMEVN_CASE(28, case_macro); \
+ PMEVN_CASE(29, case_macro); \
+ PMEVN_CASE(30, case_macro); \
+ default: WARN(1, "Invalid PMEV* index\n"); \
+ } \
+ } while (0)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline u32 armv8pmu_pmcr_read(void)
+{
+ return read_sysreg(pmcr_el0);
+}
+
+static inline void armv8pmu_pmcr_write(u32 val)
+{
+ val &= ARMV8_PMU_PMCR_MASK;
+ isb();
+ write_sysreg(val, pmcr_el0);
+}
+
+static inline int armv8pmu_has_overflowed(u32 pmovsr)
+{
+ return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
+}
+
+static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+{
+ return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
+}
+
+static inline u64 armv8pmu_read_evcntr(int idx)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+
+ return read_pmevcntrn(counter);
+}
+
+static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ u64 val = 0;
+
+ val = armv8pmu_read_evcntr(idx);
+ if (armv8pmu_event_is_chained(event))
+ val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
+ return val;
+}
+
+/*
+ * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
+ * is set the event counters also become 64-bit counters. Unless the
+ * user has requested a long counter (attr.config1) then we want to
+ * interrupt upon 32-bit overflow - we achieve this by applying a bias.
+ */
+static bool armv8pmu_event_needs_bias(struct perf_event *event)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (armv8pmu_event_is_64bit(event))
+ return false;
+
+ if (armv8pmu_has_long_event(cpu_pmu) ||
+ idx == ARMV8_IDX_CYCLE_COUNTER)
+ return true;
+
+ return false;
+}
+
+static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
+{
+ if (armv8pmu_event_needs_bias(event))
+ value |= GENMASK(63, 32);
+
+ return value;
+}
+
+static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
+{
+ if (armv8pmu_event_needs_bias(event))
+ value &= ~GENMASK(63, 32);
+
+ return value;
+}
+
+static u64 armv8pmu_read_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u64 value = 0;
+
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ value = read_sysreg(pmccntr_el0);
+ else
+ value = armv8pmu_read_hw_counter(event);
+
+ return armv8pmu_unbias_long_counter(event, value);
+}
+
+static inline void armv8pmu_write_evcntr(int idx, u64 value)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+
+ write_pmevcntrn(counter, value);
+}
+
+static inline void armv8pmu_write_hw_counter(struct perf_event *event,
+ u64 value)
+{
+ int idx = event->hw.idx;
+
+ if (armv8pmu_event_is_chained(event)) {
+ armv8pmu_write_evcntr(idx, upper_32_bits(value));
+ armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
+ } else {
+ armv8pmu_write_evcntr(idx, value);
+ }
+}
+
+static void armv8pmu_write_counter(struct perf_event *event, u64 value)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ value = armv8pmu_bias_long_counter(event, value);
+
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ write_sysreg(value, pmccntr_el0);
+ else
+ armv8pmu_write_hw_counter(event, value);
+}
+
+static inline void armv8pmu_write_evtype(int idx, u32 val)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+
+ val &= ARMV8_PMU_EVTYPE_MASK;
+ write_pmevtypern(counter, val);
+}
+
+static inline void armv8pmu_write_event_type(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /*
+ * For chained events, the low counter is programmed to count
+ * the event of interest and the high counter is programmed
+ * with CHAIN event code with filters set to count at all ELs.
+ */
+ if (armv8pmu_event_is_chained(event)) {
+ u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
+ ARMV8_PMU_INCLUDE_EL2;
+
+ armv8pmu_write_evtype(idx - 1, hwc->config_base);
+ armv8pmu_write_evtype(idx, chain_evt);
+ } else {
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ write_sysreg(hwc->config_base, pmccfiltr_el0);
+ else
+ armv8pmu_write_evtype(idx, hwc->config_base);
+ }
+}
+
+static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
+{
+ int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
+ u32 mask = BIT(counter);
+
+ if (armv8pmu_event_is_chained(event))
+ mask |= BIT(counter - 1);
+ return mask;
+}
+
+static inline void armv8pmu_enable_counter(u32 mask)
+{
+ /*
+ * Make sure event configuration register writes are visible before we
+ * enable the counter.
+ * */
+ isb();
+ write_sysreg(mask, pmcntenset_el0);
+}
+
+static inline void armv8pmu_enable_event_counter(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u32 mask = armv8pmu_event_cnten_mask(event);
+
+ kvm_set_pmu_events(mask, attr);
+
+ /* We rely on the hypervisor switch code to enable guest counters */
+ if (!kvm_pmu_counter_deferred(attr))
+ armv8pmu_enable_counter(mask);
+}
+
+static inline void armv8pmu_disable_counter(u32 mask)
+{
+ write_sysreg(mask, pmcntenclr_el0);
+ /*
+ * Make sure the effects of disabling the counter are visible before we
+ * start configuring the event.
+ */
+ isb();
+}
+
+static inline void armv8pmu_disable_event_counter(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u32 mask = armv8pmu_event_cnten_mask(event);
+
+ kvm_clr_pmu_events(mask);
+
+ /* We rely on the hypervisor switch code to disable guest counters */
+ if (!kvm_pmu_counter_deferred(attr))
+ armv8pmu_disable_counter(mask);
+}
+
+static inline void armv8pmu_enable_intens(u32 mask)
+{
+ write_sysreg(mask, pmintenset_el1);
+}
+
+static inline void armv8pmu_enable_event_irq(struct perf_event *event)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
+ armv8pmu_enable_intens(BIT(counter));
+}
+
+static inline void armv8pmu_disable_intens(u32 mask)
+{
+ write_sysreg(mask, pmintenclr_el1);
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ write_sysreg(mask, pmovsclr_el0);
+ isb();
+}
+
+static inline void armv8pmu_disable_event_irq(struct perf_event *event)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
+ armv8pmu_disable_intens(BIT(counter));
+}
+
+static inline u32 armv8pmu_getreset_flags(void)
+{
+ u32 value;
+
+ /* Read */
+ value = read_sysreg(pmovsclr_el0);
+
+ /* Write to clear flags */
+ value &= ARMV8_PMU_OVSR_MASK;
+ write_sysreg(value, pmovsclr_el0);
+
+ return value;
+}
+
+static void armv8pmu_enable_event(struct perf_event *event)
+{
+ /*
+ * Enable counter and interrupt, and set the counter to count
+ * the event that we're interested in.
+ */
+
+ /*
+ * Disable counter
+ */
+ armv8pmu_disable_event_counter(event);
+
+ /*
+ * Set event.
+ */
+ armv8pmu_write_event_type(event);
+
+ /*
+ * Enable interrupt for this counter
+ */
+ armv8pmu_enable_event_irq(event);
+
+ /*
+ * Enable counter
+ */
+ armv8pmu_enable_event_counter(event);
+}
+
+static void armv8pmu_disable_event(struct perf_event *event)
+{
+ /*
+ * Disable counter
+ */
+ armv8pmu_disable_event_counter(event);
+
+ /*
+ * Disable interrupt for this counter
+ */
+ armv8pmu_disable_event_irq(event);
+}
+
+static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+{
+ /* Enable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+}
+
+static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ /* Disable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
+}
+
+static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
+{
+ u32 pmovsr;
+ struct perf_sample_data data;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * Get and reset the IRQ flags
+ */
+ pmovsr = armv8pmu_getreset_flags();
+
+ /*
+ * Did an overflow occur?
+ */
+ if (!armv8pmu_has_overflowed(pmovsr))
+ return IRQ_NONE;
+
+ /*
+ * Handle the counter(s) overflow(s)
+ */
+ regs = get_irq_regs();
+
+ /*
+ * Stop the PMU while processing the counter overflows
+ * to prevent skews in group events.
+ */
+ armv8pmu_stop(cpu_pmu);
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ /*
+ * Perf event overflow will queue the processing of the event as
+ * an irq_work which will be taken care of in the handling of
+ * IPI_IRQ_WORK.
+ */
+ if (perf_event_overflow(event, &data, regs))
+ cpu_pmu->disable(event);
+ }
+ armv8pmu_start(cpu_pmu);
+
+ return IRQ_HANDLED;
+}
+
+static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
+ struct arm_pmu *cpu_pmu)
+{
+ int idx;
+
+ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+ return -EAGAIN;
+}
+
+static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
+ struct arm_pmu *cpu_pmu)
+{
+ int idx;
+
+ /*
+ * Chaining requires two consecutive event counters, where
+ * the lower idx must be even.
+ */
+ for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
+ if (!test_and_set_bit(idx, cpuc->used_mask)) {
+ /* Check if the preceding even counter is available */
+ if (!test_and_set_bit(idx - 1, cpuc->used_mask))
+ return idx;
+ /* Release the Odd counter */
+ clear_bit(idx, cpuc->used_mask);
+ }
+ }
+ return -EAGAIN;
+}
+
+static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
+
+ /* Always prefer to place a cycle counter into the cycle counter. */
+ if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
+ if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
+ return ARMV8_IDX_CYCLE_COUNTER;
+ }
+
+ /*
+ * Otherwise use events counters
+ */
+ if (armv8pmu_event_is_64bit(event) &&
+ !armv8pmu_has_long_event(cpu_pmu))
+ return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
+ else
+ return armv8pmu_get_single_idx(cpuc, cpu_pmu);
+}
+
+static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ clear_bit(idx, cpuc->used_mask);
+ if (armv8pmu_event_is_chained(event))
+ clear_bit(idx - 1, cpuc->used_mask);
+}
+
+/*
+ * Add an event filter to a given event.
+ */
+static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+ struct perf_event_attr *attr)
+{
+ unsigned long config_base = 0;
+
+ if (attr->exclude_idle)
+ return -EPERM;
+
+ /*
+ * If we're running in hyp mode, then we *are* the hypervisor.
+ * Therefore we ignore exclude_hv in this configuration, since
+ * there's no hypervisor to sample anyway. This is consistent
+ * with other architectures (x86 and Power).
+ */
+ if (is_kernel_in_hyp_mode()) {
+ if (!attr->exclude_kernel && !attr->exclude_host)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ if (attr->exclude_guest)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+ if (attr->exclude_host)
+ config_base |= ARMV8_PMU_EXCLUDE_EL0;
+ } else {
+ if (!attr->exclude_hv && !attr->exclude_host)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ }
+
+ /*
+ * Filter out !VHE kernels and guest kernels
+ */
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+
+ if (attr->exclude_user)
+ config_base |= ARMV8_PMU_EXCLUDE_EL0;
+
+ /*
+ * Install the filter into config_base as this is used to
+ * construct the event type.
+ */
+ event->config_base = config_base;
+
+ return 0;
+}
+
+static int armv8pmu_filter_match(struct perf_event *event)
+{
+ unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
+ return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
+}
+
+static void armv8pmu_reset(void *info)
+{
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+ u32 pmcr;
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ armv8pmu_disable_counter(U32_MAX);
+ armv8pmu_disable_intens(U32_MAX);
+
+ /* Clear the counters we flip at guest entry/exit */
+ kvm_clr_pmu_events(U32_MAX);
+
+ /*
+ * Initialize & Reset PMNC. Request overflow interrupt for
+ * 64 bit cycle counter but cheat in armv8pmu_write_counter().
+ */
+ pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
+
+ /* Enable long event counter support where available */
+ if (armv8pmu_has_long_event(cpu_pmu))
+ pmcr |= ARMV8_PMU_PMCR_LP;
+
+ armv8pmu_pmcr_write(pmcr);
+}
+
+static int __armv8_pmuv3_map_event(struct perf_event *event,
+ const unsigned (*extra_event_map)
+ [PERF_COUNT_HW_MAX],
+ const unsigned (*extra_cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX])
+{
+ int hw_event_id;
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+
+ hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
+ &armv8_pmuv3_perf_cache_map,
+ ARMV8_PMU_EVTYPE_EVENT);
+
+ if (armv8pmu_event_is_64bit(event))
+ event->hw.flags |= ARMPMU_EVT_64BIT;
+
+ /* Only expose micro/arch events supported by this PMU */
+ if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
+ && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
+ return hw_event_id;
+ }
+
+ return armpmu_map_event(event, extra_event_map, extra_cache_map,
+ ARMV8_PMU_EVTYPE_EVENT);
+}
+
+static int armv8_pmuv3_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL, NULL);
+}
+
+static int armv8_a53_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
+}
+
+static int armv8_a57_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
+}
+
+static int armv8_a73_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
+}
+
+static int armv8_thunder_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL,
+ &armv8_thunder_perf_cache_map);
+}
+
+static int armv8_vulcan_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL,
+ &armv8_vulcan_perf_cache_map);
+}
+
+struct armv8pmu_probe_info {
+ struct arm_pmu *pmu;
+ bool present;
+};
+
+static void __armv8pmu_probe_pmu(void *info)
+{
+ struct armv8pmu_probe_info *probe = info;
+ struct arm_pmu *cpu_pmu = probe->pmu;
+ u64 dfr0;
+ u64 pmceid_raw[2];
+ u32 pmceid[2];
+ int pmuver;
+
+ dfr0 = read_sysreg(id_aa64dfr0_el1);
+ pmuver = cpuid_feature_extract_unsigned_field(dfr0,
+ ID_AA64DFR0_PMUVER_SHIFT);
+ if (pmuver == 0xf || pmuver == 0)
+ return;
+
+ cpu_pmu->pmuver = pmuver;
+ probe->present = true;
+
+ /* Read the nb of CNTx counters supported from PMNC */
+ cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
+ & ARMV8_PMU_PMCR_N_MASK;
+
+ /* Add the CPU cycles counter */
+ cpu_pmu->num_events += 1;
+
+ pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
+ pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
+
+ bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
+ pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+
+ pmceid[0] = pmceid_raw[0] >> 32;
+ pmceid[1] = pmceid_raw[1] >> 32;
+
+ bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
+ pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+
+ /* store PMMIR_EL1 register for sysfs */
+ if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
+ cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
+ else
+ cpu_pmu->reg_pmmir = 0;
+}
+
+static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
+{
+ struct armv8pmu_probe_info probe = {
+ .pmu = cpu_pmu,
+ .present = false,
+ };
+ int ret;
+
+ ret = smp_call_function_any(&cpu_pmu->supported_cpus,
+ __armv8pmu_probe_pmu,
+ &probe, 1);
+ if (ret)
+ return ret;
+
+ return probe.present ? 0 : -ENODEV;
+}
+
+static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
+ int (*map_event)(struct perf_event *event),
+ const struct attribute_group *events,
+ const struct attribute_group *format,
+ const struct attribute_group *caps)
+{
+ int ret = armv8pmu_probe_pmu(cpu_pmu);
+ if (ret)
+ return ret;
+
+ cpu_pmu->handle_irq = armv8pmu_handle_irq;
+ cpu_pmu->enable = armv8pmu_enable_event;
+ cpu_pmu->disable = armv8pmu_disable_event;
+ cpu_pmu->read_counter = armv8pmu_read_counter;
+ cpu_pmu->write_counter = armv8pmu_write_counter;
+ cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
+ cpu_pmu->start = armv8pmu_start;
+ cpu_pmu->stop = armv8pmu_stop;
+ cpu_pmu->reset = armv8pmu_reset;
+ cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
+ cpu_pmu->filter_match = armv8pmu_filter_match;
+
+ cpu_pmu->name = name;
+ cpu_pmu->map_event = map_event;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
+ events : &armv8_pmuv3_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
+ format : &armv8_pmuv3_format_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
+ caps : &armv8_pmuv3_caps_attr_group;
+
+ return 0;
+}
+
+static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
+ int (*map_event)(struct perf_event *event))
+{
+ return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
+}
+
+static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
+ armv8_a53_map_event);
+}
+
+static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
+ armv8_a53_map_event);
+}
+
+static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
+ armv8_a57_map_event);
+}
+
+static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
+ armv8_a57_map_event);
+}
+
+static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
+ armv8_a73_map_event);
+}
+
+static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1",
+ armv8_pmuv3_map_event);
+}
+
+static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
+ armv8_thunder_map_event);
+}
+
+static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
+ armv8_vulcan_map_event);
+}
+
+static const struct of_device_id armv8_pmu_of_device_ids[] = {
+ {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
+ {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init},
+ {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
+ {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
+ {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init},
+ {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
+ {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init},
+ {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
+ {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
+ {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init},
+ {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init},
+ {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init},
+ {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init},
+ {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init},
+ {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
+ {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
+ {},
+};
+
+static int armv8_pmu_device_probe(struct platform_device *pdev)
+{
+ return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
+}
+
+static struct platform_driver armv8_pmu_driver = {
+ .driver = {
+ .name = ARMV8_PMU_PDEV_NAME,
+ .of_match_table = armv8_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = armv8_pmu_device_probe,
+};
+
+static int __init armv8_pmu_driver_init(void)
+{
+ if (acpi_disabled)
+ return platform_driver_register(&armv8_pmu_driver);
+ else
+ return arm_pmu_acpi_probe(armv8_pmuv3_init);
+}
+device_initcall(armv8_pmu_driver_init)
+
+void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg, u64 now)
+{
+ struct clock_read_data *rd;
+ unsigned int seq;
+ u64 ns;
+
+ userpg->cap_user_time = 0;
+ userpg->cap_user_time_zero = 0;
+ userpg->cap_user_time_short = 0;
+
+ do {
+ rd = sched_clock_read_begin(&seq);
+
+ if (rd->read_sched_clock != arch_timer_read_counter)
+ return;
+
+ userpg->time_mult = rd->mult;
+ userpg->time_shift = rd->shift;
+ userpg->time_zero = rd->epoch_ns;
+ userpg->time_cycles = rd->epoch_cyc;
+ userpg->time_mask = rd->sched_clock_mask;
+
+ /*
+ * Subtract the cycle base, such that software that
+ * doesn't know about cap_user_time_short still 'works'
+ * assuming no wraps.
+ */
+ ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
+ userpg->time_zero -= ns;
+
+ } while (sched_clock_read_retry(seq));
+
+ userpg->time_offset = userpg->time_zero - now;
+
+ /*
+ * time_shift is not expected to be greater than 31 due to
+ * the original published conversion algorithm shifting a
+ * 32-bit value (now specifies a 64-bit value) - refer
+ * perf_event_mmap_page documentation in perf_event.h.
+ */
+ if (userpg->time_shift == 32) {
+ userpg->time_shift = 31;
+ userpg->time_mult >>= 1;
+ }
+
+ /*
+ * Internal timekeeping for enabled/running/stopped times
+ * is always computed with the sched_clock.
+ */
+ userpg->cap_user_time = 1;
+ userpg->cap_user_time_zero = 1;
+ userpg->cap_user_time_short = 1;
+}
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
new file mode 100644
index 000000000..f6f58e626
--- /dev/null
+++ b/arch/arm64/kernel/perf_regs.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compat.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
+ return 0;
+
+ /*
+ * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
+ * we're stuck with it for ABI compatibility reasons.
+ *
+ * For a 32-bit consumer inspecting a 32-bit task, then it will look at
+ * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
+ * These correspond directly to a prefix of the registers saved in our
+ * 'struct pt_regs', with the exception of the PC, so we copy that down
+ * (x15 corresponds to SP_hyp in the architecture).
+ *
+ * So far, so good.
+ *
+ * The oddity arises when a 64-bit consumer looks at a 32-bit task and
+ * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
+ * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
+ * PC registers would normally live. The initial idea was to allow a
+ * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
+ * how well that works in practice, somebody might be relying on it.
+ *
+ * At the time we make a sample, we don't know whether the consumer is
+ * 32-bit or 64-bit, so we have to cater for both possibilities.
+ */
+ if (compat_user_mode(regs)) {
+ if ((u32)idx == PERF_REG_ARM64_SP)
+ return regs->compat_sp;
+ if ((u32)idx == PERF_REG_ARM64_LR)
+ return regs->compat_lr;
+ if (idx == 15)
+ return regs->pc;
+ }
+
+ if ((u32)idx == PERF_REG_ARM64_SP)
+ return regs->sp;
+
+ if ((u32)idx == PERF_REG_ARM64_PC)
+ return regs->pc;
+
+ return regs->regs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (is_compat_thread(task_thread_info(task)))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
new file mode 100644
index 000000000..adb955fd9
--- /dev/null
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compat.h>
+#include <linux/errno.h>
+#include <linux/prctl.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <asm/cpufeature.h>
+#include <asm/pointer_auth.h>
+
+int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
+{
+ struct ptrauth_keys_user *keys = &tsk->thread.keys_user;
+ unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY |
+ PR_PAC_APDAKEY | PR_PAC_APDBKEY;
+ unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY;
+
+ if (!system_supports_address_auth() && !system_supports_generic_auth())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(tsk)))
+ return -EINVAL;
+
+ if (!arg) {
+ ptrauth_keys_init_user(keys);
+ return 0;
+ }
+
+ if (arg & ~key_mask)
+ return -EINVAL;
+
+ if (((arg & addr_key_mask) && !system_supports_address_auth()) ||
+ ((arg & PR_PAC_APGAKEY) && !system_supports_generic_auth()))
+ return -EINVAL;
+
+ if (arg & PR_PAC_APIAKEY)
+ get_random_bytes(&keys->apia, sizeof(keys->apia));
+ if (arg & PR_PAC_APIBKEY)
+ get_random_bytes(&keys->apib, sizeof(keys->apib));
+ if (arg & PR_PAC_APDAKEY)
+ get_random_bytes(&keys->apda, sizeof(keys->apda));
+ if (arg & PR_PAC_APDBKEY)
+ get_random_bytes(&keys->apdb, sizeof(keys->apdb));
+ if (arg & PR_PAC_APGAKEY)
+ get_random_bytes(&keys->apga, sizeof(keys->apga));
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
new file mode 100644
index 000000000..8e4be92e2
--- /dev/null
+++ b/arch/arm64/kernel/probes/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
+ kprobes_trampoline.o \
+ simulate-insn.o
+obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
+ simulate-insn.o
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
new file mode 100644
index 000000000..104101f63
--- /dev/null
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/arm64/kernel/probes/decode-insn.c
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <asm/insn.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+static bool __kprobes aarch64_insn_is_steppable(u32 insn)
+{
+ /*
+ * Branch instructions will write a new value into the PC which is
+ * likely to be relative to the XOL address and therefore invalid.
+ * Deliberate generation of an exception during stepping is also not
+ * currently safe. Lastly, MSR instructions can do any number of nasty
+ * things we can't handle during single-stepping.
+ */
+ if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
+ if (aarch64_insn_is_branch(insn) ||
+ aarch64_insn_is_msr_imm(insn) ||
+ aarch64_insn_is_msr_reg(insn) ||
+ aarch64_insn_is_exception(insn) ||
+ aarch64_insn_is_eret(insn) ||
+ aarch64_insn_is_eret_auth(insn))
+ return false;
+
+ /*
+ * The MRS instruction may not return a correct value when
+ * executing in the single-stepping environment. We do make one
+ * exception, for reading the DAIF bits.
+ */
+ if (aarch64_insn_is_mrs(insn))
+ return aarch64_insn_extract_system_reg(insn)
+ != AARCH64_INSN_SPCLREG_DAIF;
+
+ /*
+ * The HINT instruction is steppable only if it is in whitelist
+ * and the rest of other such instructions are blocked for
+ * single stepping as they may cause exception or other
+ * unintended behaviour.
+ */
+ if (aarch64_insn_is_hint(insn))
+ return aarch64_insn_is_steppable_hint(insn);
+
+ return true;
+ }
+
+ /*
+ * Instructions which load PC relative literals are not going to work
+ * when executed from an XOL slot. Instructions doing an exclusive
+ * load/store are not going to complete successfully when single-step
+ * exception handling happens in the middle of the sequence.
+ */
+ if (aarch64_insn_uses_literal(insn) ||
+ aarch64_insn_is_exclusive(insn))
+ return false;
+
+ return true;
+}
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD If instruction is supported and uses instruction slot,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ */
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
+{
+ /*
+ * Instructions reading or modifying the PC won't work from the XOL
+ * slot.
+ */
+ if (aarch64_insn_is_steppable(insn))
+ return INSN_GOOD;
+
+ if (aarch64_insn_is_bcond(insn)) {
+ api->handler = simulate_b_cond;
+ } else if (aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn)) {
+ api->handler = simulate_cbz_cbnz;
+ } else if (aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn)) {
+ api->handler = simulate_tbz_tbnz;
+ } else if (aarch64_insn_is_adr_adrp(insn)) {
+ api->handler = simulate_adr_adrp;
+ } else if (aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn)) {
+ api->handler = simulate_b_bl;
+ } else if (aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_ret(insn)) {
+ api->handler = simulate_br_blr_ret;
+ } else if (aarch64_insn_is_ldr_lit(insn)) {
+ api->handler = simulate_ldr_literal;
+ } else if (aarch64_insn_is_ldrsw_lit(insn)) {
+ api->handler = simulate_ldrsw_literal;
+ } else {
+ /*
+ * Instruction cannot be stepped out-of-line and we don't
+ * (yet) simulate it.
+ */
+ return INSN_REJECTED;
+ }
+
+ return INSN_GOOD_NO_SLOT;
+}
+
+#ifdef CONFIG_KPROBES
+static bool __kprobes
+is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
+{
+ while (scan_start >= scan_end) {
+ /*
+ * atomic region starts from exclusive load and ends with
+ * exclusive store.
+ */
+ if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
+ return false;
+ else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
+ return true;
+ scan_start--;
+ }
+
+ return false;
+}
+
+enum probe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
+{
+ enum probe_insn decoded;
+ probe_opcode_t insn = le32_to_cpu(*addr);
+ probe_opcode_t *scan_end = NULL;
+ unsigned long size = 0, offset = 0;
+
+ /*
+ * If there's a symbol defined in front of and near enough to
+ * the probe address assume it is the entry point to this
+ * code and use it to further limit how far back we search
+ * when determining if we're in an atomic sequence. If we could
+ * not find any symbol skip the atomic test altogether as we
+ * could otherwise end up searching irrelevant text/literals.
+ * KPROBES depends on KALLSYMS so this last case should never
+ * happen.
+ */
+ if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) {
+ if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t)))
+ scan_end = addr - (offset / sizeof(kprobe_opcode_t));
+ else
+ scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
+ }
+ decoded = arm_probe_decode_insn(insn, &asi->api);
+
+ if (decoded != INSN_REJECTED && scan_end)
+ if (is_probed_address_atomic(addr - 1, scan_end))
+ return INSN_REJECTED;
+
+ return decoded;
+}
+#endif
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
new file mode 100644
index 000000000..8b758c5a2
--- /dev/null
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm64/kernel/probes/decode-insn.h
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_ARM64_H
+#define _ARM_KERNEL_KPROBES_ARM64_H
+
+#include <asm/kprobes.h>
+
+/*
+ * ARM strongly recommends a limit of 128 bytes between LoadExcl and
+ * StoreExcl instructions in a single thread of execution. So keep the
+ * max atomic context size as 32.
+ */
+#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t))
+
+enum probe_insn {
+ INSN_REJECTED,
+ INSN_GOOD_NO_SLOT,
+ INSN_GOOD,
+};
+
+#ifdef CONFIG_KPROBES
+enum probe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
+#endif
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi);
+
+#endif /* _ARM_KERNEL_KPROBES_ARM64_H */
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
new file mode 100644
index 000000000..798c3e78b
--- /dev/null
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/arm64/kernel/probes/kprobes.c
+ *
+ * Kprobes support for ARM64
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
+ */
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/extable.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/sched/debug.h>
+#include <linux/set_memory.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+#include <asm/traps.h>
+#include <asm/ptrace.h>
+#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
+#include <asm/system_misc.h>
+#include <asm/insn.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
+static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+{
+ kprobe_opcode_t *addr = p->ainsn.api.insn;
+ void *addrs[] = {addr, addr + 1};
+ u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
+
+ /* prepare insn slot */
+ aarch64_insn_patch_text(addrs, insns, 2);
+
+ flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
+
+ /*
+ * Needs restoring of return address after stepping xol.
+ */
+ p->ainsn.api.restore = (unsigned long) p->addr +
+ sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes arch_prepare_simulate(struct kprobe *p)
+{
+ /* This instructions is not executed xol. No need to adjust the PC */
+ p->ainsn.api.restore = 0;
+}
+
+static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (p->ainsn.api.handler)
+ p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
+
+ /* single step simulated, now go for post processing */
+ post_kprobe_handler(kcb, regs);
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long probe_addr = (unsigned long)p->addr;
+
+ if (probe_addr & 0x3)
+ return -EINVAL;
+
+ /* copy instruction */
+ p->opcode = le32_to_cpu(*p->addr);
+
+ if (search_exception_tables(probe_addr))
+ return -EINVAL;
+
+ /* decode instruction */
+ switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
+ case INSN_REJECTED: /* insn not supported */
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT: /* insn need simulation */
+ p->ainsn.api.insn = NULL;
+ break;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.api.insn = get_insn_slot();
+ if (!p->ainsn.api.insn)
+ return -ENOMEM;
+ break;
+ }
+
+ /* prepare the instruction */
+ if (p->ainsn.api.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+
+void *alloc_insn_page(void)
+{
+ return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
+}
+
+/* arm kprobe: install breakpoint in text */
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ void *addr = p->addr;
+ u32 insn = BRK64_OPCODE_KPROBES;
+
+ aarch64_insn_patch_text(&addr, &insn, 1);
+}
+
+/* disarm kprobe: remove breakpoint from text */
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ void *addr = p->addr;
+
+ aarch64_insn_patch_text(&addr, &p->opcode, 1);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.api.insn) {
+ free_insn_slot(p->ainsn.api.insn, 0);
+ p->ainsn.api.insn = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * Mask all of DAIF while executing the instruction out-of-line, to keep things
+ * simple and avoid nesting exceptions. Interrupts do have to be disabled since
+ * the kprobe state is per-CPU and doesn't get migrated.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
+ regs->pstate |= DAIF_MASK;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+
+ if (p->ainsn.api.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.api.insn;
+
+ set_ss_context(kcb, slot); /* mark pending ss */
+ kprobes_save_local_irqflag(kcb, regs);
+ instruction_pointer_set(regs, slot);
+ } else {
+ /* insn simulation */
+ arch_simulate_insn(p, regs);
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.api.restore != 0)
+ instruction_pointer_set(regs, cur->ainsn.api.restore);
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)
+ cur->post_handler(cur, regs, 0);
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ instruction_pointer_set(regs, (unsigned long) cur->addr);
+ if (!instruction_pointer(regs))
+ BUG();
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ kprobes_restore_local_irqflag(kcb, regs);
+ reset_current_kprobe();
+ }
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+ }
+ return 0;
+}
+
+static void __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur_kprobe;
+ struct kprobe_ctlblk *kcb;
+ unsigned long addr = instruction_pointer(regs);
+
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe((kprobe_opcode_t *) addr);
+
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs, kcb, 0);
+ } else
+ reset_current_kprobe();
+ }
+ }
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+}
+
+static int __kprobes
+kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ if ((kcb->ss_ctx.ss_pending)
+ && (kcb->ss_ctx.match_addr == addr)) {
+ clear_ss_context(kcb); /* clear pending ss */
+ return DBG_HOOK_HANDLED;
+ }
+ /* not ours, kprobes should ignore it */
+ return DBG_HOOK_ERROR;
+}
+
+static int __kprobes
+kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ int retval;
+
+ /* return error if this is not our step */
+ retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
+
+ if (retval == DBG_HOOK_HANDLED) {
+ kprobes_restore_local_irqflag(kcb, regs);
+ post_kprobe_handler(kcb, regs);
+ }
+
+ return retval;
+}
+
+static struct break_hook kprobes_break_ss_hook = {
+ .imm = KPROBES_BRK_SS_IMM,
+ .fn = kprobe_breakpoint_ss_handler,
+};
+
+static int __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
+{
+ kprobe_handler(regs);
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook kprobes_break_hook = {
+ .imm = KPROBES_BRK_IMM,
+ .fn = kprobe_breakpoint_handler,
+};
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ int ret;
+
+ ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
+ (unsigned long)__entry_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
+ (unsigned long)__idmap_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
+ (unsigned long)__hyp_text_end);
+ if (ret || is_kernel_in_hyp_mode())
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
+ (unsigned long)__hyp_idmap_text_end);
+ return ret;
+}
+
+void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline,
+ (void *)kernel_stack_pointer(regs));
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
+ ri->fp = (void *)kernel_stack_pointer(regs);
+
+ /* replace return addr (x30) with trampoline */
+ regs->regs[30] = (long)&kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+ register_kernel_break_hook(&kprobes_break_hook);
+ register_kernel_break_hook(&kprobes_break_ss_hook);
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
new file mode 100644
index 000000000..890ca72c5
--- /dev/null
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * trampoline entry and return code for kretprobes.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .macro save_all_base_regs
+ stp x0, x1, [sp, #S_X0]
+ stp x2, x3, [sp, #S_X2]
+ stp x4, x5, [sp, #S_X4]
+ stp x6, x7, [sp, #S_X6]
+ stp x8, x9, [sp, #S_X8]
+ stp x10, x11, [sp, #S_X10]
+ stp x12, x13, [sp, #S_X12]
+ stp x14, x15, [sp, #S_X14]
+ stp x16, x17, [sp, #S_X16]
+ stp x18, x19, [sp, #S_X18]
+ stp x20, x21, [sp, #S_X20]
+ stp x22, x23, [sp, #S_X22]
+ stp x24, x25, [sp, #S_X24]
+ stp x26, x27, [sp, #S_X26]
+ stp x28, x29, [sp, #S_X28]
+ add x0, sp, #S_FRAME_SIZE
+ stp lr, x0, [sp, #S_LR]
+ /*
+ * Construct a useful saved PSTATE
+ */
+ mrs x0, nzcv
+ mrs x1, daif
+ orr x0, x0, x1
+ mrs x1, CurrentEL
+ orr x0, x0, x1
+ mrs x1, SPSel
+ orr x0, x0, x1
+ stp xzr, x0, [sp, #S_PC]
+ .endm
+
+ .macro restore_all_base_regs
+ ldr x0, [sp, #S_PSTATE]
+ and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT)
+ msr nzcv, x0
+ ldp x0, x1, [sp, #S_X0]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+ ldp x8, x9, [sp, #S_X8]
+ ldp x10, x11, [sp, #S_X10]
+ ldp x12, x13, [sp, #S_X12]
+ ldp x14, x15, [sp, #S_X14]
+ ldp x16, x17, [sp, #S_X16]
+ ldp x18, x19, [sp, #S_X18]
+ ldp x20, x21, [sp, #S_X20]
+ ldp x22, x23, [sp, #S_X22]
+ ldp x24, x25, [sp, #S_X24]
+ ldp x26, x27, [sp, #S_X26]
+ ldp x28, x29, [sp, #S_X28]
+ .endm
+
+SYM_CODE_START(kretprobe_trampoline)
+ sub sp, sp, #S_FRAME_SIZE
+
+ save_all_base_regs
+
+ mov x0, sp
+ bl trampoline_probe_handler
+ /*
+ * Replace trampoline address in lr with actual orig_ret_addr return
+ * address.
+ */
+ mov lr, x0
+
+ restore_all_base_regs
+
+ add sp, sp, #S_FRAME_SIZE
+ ret
+
+SYM_CODE_END(kretprobe_trampoline)
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
new file mode 100644
index 000000000..25f67ec59
--- /dev/null
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/arm64/kernel/probes/simulate-insn.c
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include <asm/ptrace.h>
+
+#include "simulate-insn.h"
+
+#define bbl_displacement(insn) \
+ sign_extend32(((insn) & 0x3ffffff) << 2, 27)
+
+#define bcond_displacement(insn) \
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
+
+#define cbz_displacement(insn) \
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
+
+#define tbz_displacement(insn) \
+ sign_extend32(((insn >> 5) & 0x3fff) << 2, 15)
+
+#define ldr_displacement(insn) \
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
+
+static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
+{
+ pt_regs_write_reg(regs, reg, val);
+}
+
+static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val)
+{
+ pt_regs_write_reg(regs, reg, lower_32_bits(val));
+}
+
+static inline u64 get_x_reg(struct pt_regs *regs, int reg)
+{
+ return pt_regs_read_reg(regs, reg);
+}
+
+static inline u32 get_w_reg(struct pt_regs *regs, int reg)
+{
+ return lower_32_bits(pt_regs_read_reg(regs, reg));
+}
+
+static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+
+ return (opcode & (1 << 31)) ?
+ (get_x_reg(regs, xn) == 0) : (get_w_reg(regs, xn) == 0);
+}
+
+static bool __kprobes check_cbnz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+
+ return (opcode & (1 << 31)) ?
+ (get_x_reg(regs, xn) != 0) : (get_w_reg(regs, xn) != 0);
+}
+
+static bool __kprobes check_tbz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+ int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
+
+ return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) == 0;
+}
+
+static bool __kprobes check_tbnz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+ int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
+
+ return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) != 0;
+}
+
+/*
+ * instruction simulation functions
+ */
+void __kprobes
+simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs)
+{
+ long imm, xn, val;
+
+ xn = opcode & 0x1f;
+ imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3);
+ imm = sign_extend64(imm, 20);
+ if (opcode & 0x80000000)
+ val = (imm<<12) + (addr & 0xfffffffffffff000);
+ else
+ val = imm + addr;
+
+ set_x_reg(regs, xn, val);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
+
+void __kprobes
+simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = bbl_displacement(opcode);
+
+ /* Link register is x30 */
+ if (opcode & (1 << 31))
+ set_x_reg(regs, 30, addr + 4);
+
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (aarch32_opcode_cond_checks[opcode & 0xf](regs->pstate & 0xffffffff))
+ disp = bcond_displacement(opcode);
+
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int xn = (opcode >> 5) & 0x1f;
+
+ /* update pc first in case we're doing a "blr lr" */
+ instruction_pointer_set(regs, get_x_reg(regs, xn));
+
+ /* Link register is x30 */
+ if (((opcode >> 21) & 0x3) == 1)
+ set_x_reg(regs, 30, addr + 4);
+}
+
+void __kprobes
+simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (opcode & (1 << 24)) {
+ if (check_cbnz(opcode, regs))
+ disp = cbz_displacement(opcode);
+ } else {
+ if (check_cbz(opcode, regs))
+ disp = cbz_displacement(opcode);
+ }
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (opcode & (1 << 24)) {
+ if (check_tbnz(opcode, regs))
+ disp = tbz_displacement(opcode);
+ } else {
+ if (check_tbz(opcode, regs))
+ disp = tbz_displacement(opcode);
+ }
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
+{
+ u64 *load_addr;
+ int xn = opcode & 0x1f;
+ int disp;
+
+ disp = ldr_displacement(opcode);
+ load_addr = (u64 *) (addr + disp);
+
+ if (opcode & (1 << 30)) /* x0-x30 */
+ set_x_reg(regs, xn, *load_addr);
+ else /* w0-w30 */
+ set_w_reg(regs, xn, *load_addr);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
+
+void __kprobes
+simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
+{
+ s32 *load_addr;
+ int xn = opcode & 0x1f;
+ int disp;
+
+ disp = ldr_displacement(opcode);
+ load_addr = (s32 *) (addr + disp);
+
+ set_x_reg(regs, xn, *load_addr);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h
new file mode 100644
index 000000000..e065dc922
--- /dev/null
+++ b/arch/arm64/kernel/probes/simulate-insn.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm64/kernel/probes/simulate-insn.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
+#define _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
+
+void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs);
+
+#endif /* _ARM_KERNEL_KPROBES_SIMULATE_INSN_H */
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
new file mode 100644
index 000000000..2c2476345
--- /dev/null
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ */
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_INV_FAULT_CODE UINT_MAX
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ void *xol_page_kaddr = kmap_atomic(page);
+ void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
+
+ /* Initialize the slot */
+ memcpy(dst, src, len);
+
+ /* flush caches (dcache/icache) */
+ sync_icache_aliases(dst, len);
+
+ kunmap_atomic(xol_page_kaddr);
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ probe_opcode_t insn;
+
+ /* TODO: Currently we do not support AARCH32 instruction probing */
+ if (mm->context.flags & MMCF_AARCH32)
+ return -EOPNOTSUPP;
+ else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+ return -EINVAL;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+ switch (arm_probe_decode_insn(insn, &auprobe->api)) {
+ case INSN_REJECTED:
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT:
+ auprobe->simulate = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /* Initialize with an invalid fault code to detect if ol insn trapped */
+ current->thread.fault_code = UPROBE_INV_FAULT_CODE;
+
+ /* Instruction points to execute ol */
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ user_enable_single_step(current);
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
+
+ /* Instruction points to execute next to breakpoint address */
+ instruction_pointer_set(regs, utask->vaddr + 4);
+
+ user_disable_single_step(current);
+
+ return 0;
+}
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ /*
+ * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
+ * insn itself is trapped, then detect the case with the help of
+ * invalid fault code which is being set in arch_uprobe_pre_xol
+ */
+ if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ probe_opcode_t insn;
+ unsigned long addr;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+ auprobe->api.handler(insn, addr, regs);
+
+ return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Task has received a fatal signal, so reset back to probbed
+ * address.
+ */
+ instruction_pointer_set(regs, utask->vaddr);
+
+ user_disable_single_step(current);
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ /*
+ * If a simple branch instruction (B) was called for retprobed
+ * assembly label then return true even when regs->sp and ret->stack
+ * are same. It will ensure that cleanup and reporting of return
+ * instances corresponding to callee label is done when
+ * handle_trampoline for called function is executed.
+ */
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->sp <= ret->stack;
+ else
+ return regs->sp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr;
+
+ orig_ret_vaddr = procedure_link_pointer(regs);
+ /* Replace the return addr with trampoline addr */
+ procedure_link_pointer_set(regs, trampoline_vaddr);
+
+ return orig_ret_vaddr;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+static int uprobe_breakpoint_handler(struct pt_regs *regs,
+ unsigned int esr)
+{
+ if (uprobe_pre_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
+
+ return DBG_HOOK_ERROR;
+}
+
+static int uprobe_single_step_handler(struct pt_regs *regs,
+ unsigned int esr)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
+ if (uprobe_post_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
+
+ return DBG_HOOK_ERROR;
+}
+
+/* uprobe breakpoint handler hook */
+static struct break_hook uprobes_break_hook = {
+ .imm = UPROBES_BRK_IMM,
+ .fn = uprobe_breakpoint_handler,
+};
+
+/* uprobe single step handler hook */
+static struct step_hook uprobes_step_hook = {
+ .fn = uprobe_single_step_handler,
+};
+
+static int __init arch_init_uprobes(void)
+{
+ register_user_break_hook(&uprobes_break_hook);
+ register_user_step_hook(&uprobes_step_hook);
+
+ return 0;
+}
+
+device_initcall(arch_init_uprobes);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
new file mode 100644
index 000000000..3696dbcbf
--- /dev/null
+++ b/arch/arm64/kernel/process.c
@@ -0,0 +1,741 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/process.c
+ *
+ * Original Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1996-2000 Russell King - Converted to ARM.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <stdarg.h>
+
+#include <linux/compat.h>
+#include <linux/efi.h>
+#include <linux/elf.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kernel.h>
+#include <linux/lockdep.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/nospec.h>
+#include <linux/stddef.h>
+#include <linux/sysctl.h>
+#include <linux/unistd.h>
+#include <linux/user.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/elfcore.h>
+#include <linux/pm.h>
+#include <linux/tick.h>
+#include <linux/utsname.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/personality.h>
+#include <linux/notifier.h>
+#include <trace/events/power.h>
+#include <linux/percpu.h>
+#include <linux/thread_info.h>
+#include <linux/prctl.h>
+
+#include <asm/alternative.h>
+#include <asm/arch_gicv3.h>
+#include <asm/compat.h>
+#include <asm/cpufeature.h>
+#include <asm/cacheflush.h>
+#include <asm/exec.h>
+#include <asm/fpsimd.h>
+#include <asm/mmu_context.h>
+#include <asm/mte.h>
+#include <asm/processor.h>
+#include <asm/pointer_auth.h>
+#include <asm/stacktrace.h>
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __ro_after_init;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+/*
+ * Function pointers to optional machine specific functions
+ */
+void (*pm_power_off)(void);
+EXPORT_SYMBOL_GPL(pm_power_off);
+
+void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+
+static void noinstr __cpu_do_idle(void)
+{
+ dsb(sy);
+ wfi();
+}
+
+static void noinstr __cpu_do_idle_irqprio(void)
+{
+ unsigned long pmr;
+ unsigned long daif_bits;
+
+ daif_bits = read_sysreg(daif);
+ write_sysreg(daif_bits | PSR_I_BIT, daif);
+
+ /*
+ * Unmask PMR before going idle to make sure interrupts can
+ * be raised.
+ */
+ pmr = gic_read_pmr();
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+ __cpu_do_idle();
+
+ gic_write_pmr(pmr);
+ write_sysreg(daif_bits, daif);
+}
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (wait for interrupt).
+ *
+ * If the CPU supports priority masking we must do additional work to
+ * ensure that interrupts are not masked at the PMR (because the core will
+ * not wake up if we block the wake up signal in the interrupt controller).
+ */
+void noinstr cpu_do_idle(void)
+{
+ if (system_uses_irq_prio_masking())
+ __cpu_do_idle_irqprio();
+ else
+ __cpu_do_idle();
+}
+
+/*
+ * This is our default idle handler.
+ */
+void noinstr arch_cpu_idle(void)
+{
+ /*
+ * This should do all the clock switching and wait for interrupt
+ * tricks
+ */
+ cpu_do_idle();
+ raw_local_irq_enable();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+#endif
+
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
+ */
+void machine_shutdown(void)
+{
+ smp_shutdown_nonboot_cpus(reboot_cpu);
+}
+
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
+void machine_halt(void)
+{
+ local_irq_disable();
+ smp_send_stop();
+ while (1);
+}
+
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
+void machine_power_off(void)
+{
+ local_irq_disable();
+ smp_send_stop();
+ if (pm_power_off)
+ pm_power_off();
+}
+
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
+void machine_restart(char *cmd)
+{
+ /* Disable interrupts first */
+ local_irq_disable();
+ smp_send_stop();
+
+ /*
+ * UpdateCapsule() depends on the system being reset via
+ * ResetSystem().
+ */
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi_reboot(reboot_mode, NULL);
+
+ /* Now call the architecture specific reboot code. */
+ if (arm_pm_restart)
+ arm_pm_restart(reboot_mode, cmd);
+ else
+ do_kernel_restart(cmd);
+
+ /*
+ * Whoops - the architecture was unable to reboot.
+ */
+ printk("Reboot failed -- System halted\n");
+ while (1);
+}
+
+#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
+static const char *const btypes[] = {
+ bstr(NONE, "--"),
+ bstr( JC, "jc"),
+ bstr( C, "-c"),
+ bstr( J , "j-")
+};
+#undef bstr
+
+static void print_pstate(struct pt_regs *regs)
+{
+ u64 pstate = regs->pstate;
+
+ if (compat_user_mode(regs)) {
+ printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
+ pstate,
+ pstate & PSR_AA32_N_BIT ? 'N' : 'n',
+ pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
+ pstate & PSR_AA32_C_BIT ? 'C' : 'c',
+ pstate & PSR_AA32_V_BIT ? 'V' : 'v',
+ pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
+ pstate & PSR_AA32_T_BIT ? "T32" : "A32",
+ pstate & PSR_AA32_E_BIT ? "BE" : "LE",
+ pstate & PSR_AA32_A_BIT ? 'A' : 'a',
+ pstate & PSR_AA32_I_BIT ? 'I' : 'i',
+ pstate & PSR_AA32_F_BIT ? 'F' : 'f');
+ } else {
+ const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
+ PSR_BTYPE_SHIFT];
+
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
+ pstate,
+ pstate & PSR_N_BIT ? 'N' : 'n',
+ pstate & PSR_Z_BIT ? 'Z' : 'z',
+ pstate & PSR_C_BIT ? 'C' : 'c',
+ pstate & PSR_V_BIT ? 'V' : 'v',
+ pstate & PSR_D_BIT ? 'D' : 'd',
+ pstate & PSR_A_BIT ? 'A' : 'a',
+ pstate & PSR_I_BIT ? 'I' : 'i',
+ pstate & PSR_F_BIT ? 'F' : 'f',
+ pstate & PSR_PAN_BIT ? '+' : '-',
+ pstate & PSR_UAO_BIT ? '+' : '-',
+ pstate & PSR_TCO_BIT ? '+' : '-',
+ btype_str);
+ }
+}
+
+void __show_regs(struct pt_regs *regs)
+{
+ int i, top_reg;
+ u64 lr, sp;
+
+ if (compat_user_mode(regs)) {
+ lr = regs->compat_lr;
+ sp = regs->compat_sp;
+ top_reg = 12;
+ } else {
+ lr = regs->regs[30];
+ sp = regs->sp;
+ top_reg = 29;
+ }
+
+ show_regs_print_info(KERN_DEFAULT);
+ print_pstate(regs);
+
+ if (!user_mode(regs)) {
+ printk("pc : %pS\n", (void *)regs->pc);
+ printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
+ } else {
+ printk("pc : %016llx\n", regs->pc);
+ printk("lr : %016llx\n", lr);
+ }
+
+ printk("sp : %016llx\n", sp);
+
+ if (system_uses_irq_prio_masking())
+ printk("pmr_save: %08llx\n", regs->pmr_save);
+
+ i = top_reg;
+
+ while (i >= 0) {
+ printk("x%-2d: %016llx ", i, regs->regs[i]);
+ i--;
+
+ if (i % 2 == 0) {
+ pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
+ i--;
+ }
+
+ pr_cont("\n");
+ }
+}
+
+void show_regs(struct pt_regs * regs)
+{
+ __show_regs(regs);
+ dump_backtrace(regs, NULL, KERN_DEFAULT);
+}
+
+static void tls_thread_flush(void)
+{
+ write_sysreg(0, tpidr_el0);
+
+ if (is_compat_task()) {
+ current->thread.uw.tp_value = 0;
+
+ /*
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ */
+ barrier();
+ write_sysreg(0, tpidrro_el0);
+ }
+}
+
+static void flush_tagged_addr_state(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
+ clear_thread_flag(TIF_TAGGED_ADDR);
+}
+
+void flush_thread(void)
+{
+ fpsimd_flush_thread();
+ tls_thread_flush();
+ flush_ptrace_hw_breakpoint(current);
+ flush_tagged_addr_state();
+ flush_mte_state();
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+void arch_release_task_struct(struct task_struct *tsk)
+{
+ fpsimd_release_task(tsk);
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ if (current->mm)
+ fpsimd_preserve_current_state();
+ *dst = *src;
+
+ /* We rely on the above assignment to initialize dst's thread_flags: */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+ /*
+ * Detach src's sve_state (if any) from dst so that it does not
+ * get erroneously used or freed prematurely. dst's sve_state
+ * will be allocated on demand later on if dst uses SVE.
+ * For consistency, also clear TIF_SVE here: this could be done
+ * later in copy_process(), but to avoid tripping up future
+ * maintainers it is best not to leave TIF_SVE and sve_state in
+ * an inconsistent state, even temporarily.
+ */
+ dst->thread.sve_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SVE);
+
+ /* clear any pending asynchronous tag fault raised by the parent */
+ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
+
+ return 0;
+}
+
+asmlinkage void ret_from_fork(void) asm("ret_from_fork");
+
+int copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, unsigned long tls)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+
+ memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
+
+ /*
+ * In case p was allocated the same task_struct pointer as some
+ * other recently-exited task, make sure p is disassociated from
+ * any cpu that may have run that now-exited task recently.
+ * Otherwise we could erroneously skip reloading the FPSIMD
+ * registers for p.
+ */
+ fpsimd_flush_task_state(p);
+
+ ptrauth_thread_init_kernel(p);
+
+ if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
+ *childregs = *current_pt_regs();
+ childregs->regs[0] = 0;
+
+ /*
+ * Read the current TLS pointer from tpidr_el0 as it may be
+ * out-of-sync with the saved value.
+ */
+ *task_user_tls(p) = read_sysreg(tpidr_el0);
+
+ if (stack_start) {
+ if (is_compat_thread(task_thread_info(p)))
+ childregs->compat_sp = stack_start;
+ else
+ childregs->sp = stack_start;
+ }
+
+ /*
+ * If a TLS pointer was passed to clone, use it for the new
+ * thread.
+ */
+ if (clone_flags & CLONE_SETTLS)
+ p->thread.uw.tp_value = tls;
+ } else {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->pstate = PSR_MODE_EL1h;
+ if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+ cpus_have_const_cap(ARM64_HAS_UAO))
+ childregs->pstate |= PSR_UAO_BIT;
+
+ spectre_v4_enable_task_mitigation(p);
+
+ if (system_uses_irq_prio_masking())
+ childregs->pmr_save = GIC_PRIO_IRQON;
+
+ p->thread.cpu_context.x19 = stack_start;
+ p->thread.cpu_context.x20 = stk_sz;
+ }
+ p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
+ p->thread.cpu_context.sp = (unsigned long)childregs;
+
+ ptrace_hw_copy_thread(p);
+
+ return 0;
+}
+
+void tls_preserve_current_state(void)
+{
+ *task_user_tls(current) = read_sysreg(tpidr_el0);
+}
+
+static void tls_thread_switch(struct task_struct *next)
+{
+ tls_preserve_current_state();
+
+ if (is_compat_thread(task_thread_info(next)))
+ write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
+ else if (!arm64_kernel_unmapped_at_el0())
+ write_sysreg(0, tpidrro_el0);
+
+ write_sysreg(*task_user_tls(next), tpidr_el0);
+}
+
+/* Restore the UAO state depending on next's addr_limit */
+void uao_thread_switch(struct task_struct *next)
+{
+ if (IS_ENABLED(CONFIG_ARM64_UAO)) {
+ if (task_thread_info(next)->addr_limit == KERNEL_DS)
+ asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
+ else
+ asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
+ }
+}
+
+/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
+{
+ /*
+ * Nothing to do for kernel threads, but 'regs' may be junk
+ * (e.g. idle task) so check the flags and bail early.
+ */
+ if (unlikely(next->flags & PF_KTHREAD))
+ return;
+
+ /*
+ * If all CPUs implement the SSBS extension, then we just need to
+ * context-switch the PSTATE field.
+ */
+ if (cpus_have_const_cap(ARM64_SSBS))
+ return;
+
+ spectre_v4_enable_task_mitigation(next);
+}
+
+/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+ __this_cpu_write(__entry_task, next);
+}
+
+/*
+ * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
+ * Ensure access is disabled when switching to a 32bit task, ensure
+ * access is enabled when switching to a 64bit task.
+ */
+static void erratum_1418040_thread_switch(struct task_struct *next)
+{
+ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
+ !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
+ return;
+
+ if (is_compat_thread(task_thread_info(next)))
+ sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
+ else
+ sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
+}
+
+static void erratum_1418040_new_exec(void)
+{
+ preempt_disable();
+ erratum_1418040_thread_switch(current);
+ preempt_enable();
+}
+
+/*
+ * Thread switching.
+ */
+__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct task_struct *last;
+
+ fpsimd_thread_switch(next);
+ tls_thread_switch(next);
+ hw_breakpoint_thread_switch(next);
+ contextidr_thread_switch(next);
+ entry_task_switch(next);
+ uao_thread_switch(next);
+ ssbs_thread_switch(next);
+ erratum_1418040_thread_switch(next);
+
+ /*
+ * Complete any pending TLB or cache maintenance on this CPU in case
+ * the thread migrates to a different CPU.
+ * This full barrier is also required by the membarrier system
+ * call.
+ */
+ dsb(ish);
+
+ /*
+ * MTE thread switching must happen after the DSB above to ensure that
+ * any asynchronous tag check faults have been logged in the TFSR*_EL1
+ * registers.
+ */
+ mte_thread_switch(next);
+
+ /* the actual thread switch */
+ last = cpu_switch_to(prev, next);
+
+ return last;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ struct stackframe frame;
+ unsigned long stack_page, ret = 0;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = (unsigned long)try_get_task_stack(p);
+ if (!stack_page)
+ return 0;
+
+ start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
+
+ do {
+ if (unwind_frame(p, &frame))
+ goto out;
+ if (!in_sched_functions(frame.pc)) {
+ ret = frame.pc;
+ goto out;
+ }
+ } while (count ++ < 16);
+
+out:
+ put_task_stack(p);
+ return ret;
+}
+
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_int() & ~PAGE_MASK;
+ return sp & ~0xf;
+}
+
+/*
+ * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
+ */
+void arch_setup_new_exec(void)
+{
+ current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+
+ ptrauth_thread_init_user(current);
+ erratum_1418040_new_exec();
+
+ if (task_spec_ssb_noexec(current)) {
+ arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
+ PR_SPEC_ENABLE);
+ }
+}
+
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
+{
+ unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
+ return -EINVAL;
+
+ if (system_supports_mte())
+ valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
+
+ if (arg & ~valid_mask)
+ return -EINVAL;
+
+ /*
+ * Do not allow the enabling of the tagged address ABI if globally
+ * disabled via sysctl abi.tagged_addr_disabled.
+ */
+ if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
+ return -EINVAL;
+
+ if (set_mte_ctrl(task, arg) != 0)
+ return -EINVAL;
+
+ update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+
+ return 0;
+}
+
+long get_tagged_addr_ctrl(struct task_struct *task)
+{
+ long ret = 0;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
+ return -EINVAL;
+
+ if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
+ ret = PR_TAGGED_ADDR_ENABLE;
+
+ ret |= get_mte_ctrl(task);
+
+ return ret;
+}
+
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+
+static struct ctl_table tagged_addr_sysctl_table[] = {
+ {
+ .procname = "tagged_addr_disabled",
+ .mode = 0644,
+ .data = &tagged_addr_disabled,
+ .maxlen = sizeof(int),
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
+static int __init tagged_addr_init(void)
+{
+ if (!register_sysctl("abi", tagged_addr_sysctl_table))
+ return -EINVAL;
+ return 0;
+}
+
+core_initcall(tagged_addr_init);
+#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
+
+asmlinkage void __sched arm64_preempt_schedule_irq(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (system_capabilities_finalized())
+ preempt_schedule_irq();
+}
+
+#ifdef CONFIG_BINFMT_ELF
+int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
+ bool has_interp, bool is_interp)
+{
+ /*
+ * For dynamically linked executables the interpreter is
+ * responsible for setting PROT_BTI on everything except
+ * itself.
+ */
+ if (is_interp != has_interp)
+ return prot;
+
+ if (!(state->flags & ARM64_ELF_BTI))
+ return prot;
+
+ if (prot & PROT_EXEC)
+ prot |= PROT_BTI;
+
+ return prot;
+}
+#endif
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
new file mode 100644
index 000000000..9c0e9d9ee
--- /dev/null
+++ b/arch/arm64/kernel/proton-pack.c
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
+ * detailed at:
+ *
+ * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
+ *
+ * This code was originally written hastily under an awful lot of stress and so
+ * aspects of it are somewhat hacky. Unfortunately, changing anything in here
+ * instantly makes me feel ill. Thanks, Jann. Thann.
+ *
+ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+ * Copyright (C) 2020 Google LLC
+ *
+ * "If there's something strange in your neighbourhood, who you gonna call?"
+ *
+ * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bpf.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/insn.h>
+#include <asm/spectre.h>
+#include <asm/traps.h>
+#include <asm/vectors.h>
+#include <asm/virt.h>
+
+/*
+ * We try to ensure that the mitigation state can never change as the result of
+ * onlining a late CPU.
+ */
+static void update_mitigation_state(enum mitigation_state *oldp,
+ enum mitigation_state new)
+{
+ enum mitigation_state state;
+
+ do {
+ state = READ_ONCE(*oldp);
+ if (new <= state)
+ break;
+
+ /* Userspace almost certainly can't deal with this. */
+ if (WARN_ON(system_capabilities_finalized()))
+ break;
+ } while (cmpxchg_relaxed(oldp, state, new) != state);
+}
+
+/*
+ * Spectre v1.
+ *
+ * The kernel can't protect userspace for this one: it's each person for
+ * themselves. Advertise what we're doing and be done with it.
+ */
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+/*
+ * Spectre v2.
+ *
+ * This one sucks. A CPU is either:
+ *
+ * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in software by firmware.
+ * - Mitigated in software by a CPU-specific dance in the kernel and a
+ * firmware call at EL2.
+ * - Vulnerable.
+ *
+ * It's not unlikely for different CPUs in a big.LITTLE system to fall into
+ * different camps.
+ */
+static enum mitigation_state spectre_v2_state;
+
+static bool __read_mostly __nospectre_v2;
+static int __init parse_spectre_v2_param(char *str)
+{
+ __nospectre_v2 = true;
+ return 0;
+}
+early_param("nospectre_v2", parse_spectre_v2_param);
+
+static bool spectre_v2_mitigations_off(void)
+{
+ bool ret = __nospectre_v2 || cpu_mitigations_off();
+
+ if (ret)
+ pr_info_once("spectre-v2 mitigation disabled by command line option\n");
+
+ return ret;
+}
+
+static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
+{
+ switch (bhb_state) {
+ case SPECTRE_UNAFFECTED:
+ return "";
+ default:
+ case SPECTRE_VULNERABLE:
+ return ", but not BHB";
+ case SPECTRE_MITIGATED:
+ return ", BHB";
+ }
+}
+
+static bool _unprivileged_ebpf_enabled(void)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ return !sysctl_unprivileged_bpf_disabled;
+#else
+ return false;
+#endif
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
+ const char *bhb_str = get_bhb_affected_string(bhb_state);
+ const char *v2_str = "Branch predictor hardening";
+
+ switch (spectre_v2_state) {
+ case SPECTRE_UNAFFECTED:
+ if (bhb_state == SPECTRE_UNAFFECTED)
+ return sprintf(buf, "Not affected\n");
+
+ /*
+ * Platforms affected by Spectre-BHB can't report
+ * "Not affected" for Spectre-v2.
+ */
+ v2_str = "CSV2";
+ fallthrough;
+ case SPECTRE_MITIGATED:
+ if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
+ return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
+
+ return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+}
+
+static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
+{
+ u64 pfr0;
+ static const struct midr_range spectre_v2_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ { /* sentinel */ }
+ };
+
+ /* If the CPU has CSV2 set, we're safe */
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return SPECTRE_UNAFFECTED;
+
+ /* Alternatively, we have a list of unaffected CPUs */
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ return SPECTRE_UNAFFECTED;
+
+ return SPECTRE_VULNERABLE;
+}
+
+static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ fallthrough;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
+ return false;
+
+ if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
+ return false;
+
+ return true;
+}
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+enum mitigation_state arm64_get_spectre_v2_state(void)
+{
+ return spectre_v2_state;
+}
+
+#ifdef CONFIG_KVM
+#include <asm/cacheflush.h>
+#include <asm/kvm_asm.h>
+
+atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static DEFINE_RAW_SPINLOCK(bp_lock);
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+ int cpu, slot = -1;
+ const char *hyp_vecs_start = __smccc_workaround_1_smc;
+ const char *hyp_vecs_end = __smccc_workaround_1_smc +
+ __SMCCC_WORKAROUND_1_SMC_SZ;
+
+ /*
+ * Vinz Clortho takes the hyp_vecs start/end "keys" at
+ * the door when we're a guest. Skip the hyp-vectors work.
+ */
+ if (!is_hyp_mode_available()) {
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ return;
+ }
+
+ raw_spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+ raw_spin_unlock(&bp_lock);
+}
+#else
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void qcom_link_stack_sanitisation(void)
+{
+ u64 tmp;
+
+ asm volatile("mov %0, x30 \n"
+ ".rept 16 \n"
+ "bl . + 4 \n"
+ ".endr \n"
+ "mov x30, %0 \n"
+ : "=&r" (tmp));
+}
+
+static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
+{
+ u32 midr = read_cpuid_id();
+ if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
+ ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
+ return NULL;
+
+ return qcom_link_stack_sanitisation;
+}
+
+static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
+{
+ bp_hardening_cb_t cb;
+ enum mitigation_state state;
+
+ state = spectre_v2_get_cpu_fw_mitigation_state();
+ if (state != SPECTRE_MITIGATED)
+ return state;
+
+ if (spectre_v2_mitigations_off())
+ return SPECTRE_VULNERABLE;
+
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ cb = call_hvc_arch_workaround_1;
+ break;
+
+ case SMCCC_CONDUIT_SMC:
+ cb = call_smc_arch_workaround_1;
+ break;
+
+ default:
+ return SPECTRE_VULNERABLE;
+ }
+
+ /*
+ * Prefer a CPU-specific workaround if it exists. Note that we
+ * still rely on firmware for the mitigation at EL2.
+ */
+ cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
+ install_bp_hardening_cb(cb);
+ return SPECTRE_MITIGATED;
+}
+
+void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+ enum mitigation_state state;
+
+ WARN_ON(preemptible());
+
+ state = spectre_v2_get_cpu_hw_mitigation_state();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v2_enable_fw_mitigation();
+
+ update_mitigation_state(&spectre_v2_state, state);
+}
+
+/*
+ * Spectre v4.
+ *
+ * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
+ * either:
+ *
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in hardware via PSTATE.SSBS.
+ * - Mitigated in software by firmware (sometimes referred to as SSBD).
+ *
+ * Wait, that doesn't sound so bad, does it? Keep reading...
+ *
+ * A major source of headaches is that the software mitigation is enabled both
+ * on a per-task basis, but can also be forced on for the kernel, necessitating
+ * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
+ * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
+ * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
+ * so you can have systems that have both firmware and SSBS mitigations. This
+ * means we actually have to reject late onlining of CPUs with mitigations if
+ * all of the currently onlined CPUs are safelisted, as the mitigation tends to
+ * be opt-in for userspace. Yes, really, the cure is worse than the disease.
+ *
+ * The only good part is that if the firmware mitigation is present, then it is
+ * present for all CPUs, meaning we don't have to worry about late onlining of a
+ * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
+ *
+ * Give me a VAX-11/780 any day of the week...
+ */
+static enum mitigation_state spectre_v4_state;
+
+/* This is the per-cpu state tracking whether we need to talk to firmware */
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+enum spectre_v4_policy {
+ SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
+ SPECTRE_V4_POLICY_MITIGATION_ENABLED,
+ SPECTRE_V4_POLICY_MITIGATION_DISABLED,
+};
+
+static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
+
+static const struct spectre_v4_param {
+ const char *str;
+ enum spectre_v4_policy policy;
+} spectre_v4_params[] = {
+ { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
+ { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
+ { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
+};
+static int __init parse_spectre_v4_param(char *str)
+{
+ int i;
+
+ if (!str || !str[0])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
+ const struct spectre_v4_param *param = &spectre_v4_params[i];
+
+ if (strncmp(str, param->str, strlen(param->str)))
+ continue;
+
+ __spectre_v4_policy = param->policy;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+early_param("ssbd", parse_spectre_v4_param);
+
+/*
+ * Because this was all written in a rush by people working in different silos,
+ * we've ended up with multiple command line options to control the same thing.
+ * Wrap these up in some helpers, which prefer disabling the mitigation if faced
+ * with contradictory parameters. The mitigation is always either "off",
+ * "dynamic" or "on".
+ */
+static bool spectre_v4_mitigations_off(void)
+{
+ bool ret = cpu_mitigations_off() ||
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
+
+ if (ret)
+ pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
+
+ return ret;
+}
+
+/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
+static bool spectre_v4_mitigations_dynamic(void)
+{
+ return !spectre_v4_mitigations_off() &&
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
+}
+
+static bool spectre_v4_mitigations_on(void)
+{
+ return !spectre_v4_mitigations_off() &&
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ switch (spectre_v4_state) {
+ case SPECTRE_UNAFFECTED:
+ return sprintf(buf, "Not affected\n");
+ case SPECTRE_MITIGATED:
+ return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+}
+
+enum mitigation_state arm64_get_spectre_v4_state(void)
+{
+ return spectre_v4_state;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
+{
+ static const struct midr_range spectre_v4_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ { /* sentinel */ },
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
+ return SPECTRE_UNAFFECTED;
+
+ /* CPU features are detected first */
+ if (this_cpu_has_cap(ARM64_SSBS))
+ return SPECTRE_MITIGATED;
+
+ return SPECTRE_VULNERABLE;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ fallthrough;
+ case SMCCC_RET_NOT_REQUIRED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ fallthrough;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
+{
+ enum mitigation_state state;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ state = spectre_v4_get_cpu_hw_mitigation_state();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v4_get_cpu_fw_mitigation_state();
+
+ return state != SPECTRE_UNAFFECTED;
+}
+
+bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr)
+{
+ const u32 instr_mask = ~(1U << PSTATE_Imm_shift);
+ const u32 instr_val = 0xd500401f | PSTATE_SSBS;
+
+ if ((instr & instr_mask) != instr_val)
+ return false;
+
+ if (instr & BIT(PSTATE_Imm_shift))
+ regs->pstate |= PSR_SSBS_BIT;
+ else
+ regs->pstate &= ~PSR_SSBS_BIT;
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return true;
+}
+
+static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+{
+ enum mitigation_state state;
+
+ /*
+ * If the system is mitigated but this CPU doesn't have SSBS, then
+ * we must be on the safelist and there's nothing more to do.
+ */
+ state = spectre_v4_get_cpu_hw_mitigation_state();
+ if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
+ return state;
+
+ if (spectre_v4_mitigations_off()) {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+ asm volatile(SET_PSTATE_SSBS(1));
+ return SPECTRE_VULNERABLE;
+ }
+
+ /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
+ asm volatile(SET_PSTATE_SSBS(0));
+ return SPECTRE_MITIGATED;
+}
+
+/*
+ * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
+ * we fallthrough and check whether firmware needs to be called on this CPU.
+ */
+void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ BUG_ON(nr_inst != 1); /* Branch -> NOP */
+
+ if (spectre_v4_mitigations_off())
+ return;
+
+ if (cpus_have_final_cap(ARM64_SSBS))
+ return;
+
+ if (spectre_v4_mitigations_dynamic())
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/*
+ * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
+ * to call into firmware to adjust the mitigation state.
+ */
+void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ u32 insn;
+
+ BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
+
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ insn = aarch64_insn_get_hvc_value();
+ break;
+ case SMCCC_CONDUIT_SMC:
+ insn = aarch64_insn_get_smc_value();
+ break;
+ default:
+ return;
+ }
+
+ *updptr = cpu_to_le32(insn);
+}
+
+static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
+{
+ enum mitigation_state state;
+
+ state = spectre_v4_get_cpu_fw_mitigation_state();
+ if (state != SPECTRE_MITIGATED)
+ return state;
+
+ if (spectre_v4_mitigations_off()) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
+ return SPECTRE_VULNERABLE;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
+
+ if (spectre_v4_mitigations_dynamic())
+ __this_cpu_write(arm64_ssbd_callback_required, 1);
+
+ return SPECTRE_MITIGATED;
+}
+
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+ enum mitigation_state state;
+
+ WARN_ON(preemptible());
+
+ state = spectre_v4_enable_hw_mitigation();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v4_enable_fw_mitigation();
+
+ update_mitigation_state(&spectre_v4_state, state);
+}
+
+static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
+{
+ u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ if (state)
+ regs->pstate |= bit;
+ else
+ regs->pstate &= ~bit;
+}
+
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
+{
+ struct pt_regs *regs = task_pt_regs(tsk);
+ bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
+
+ if (spectre_v4_mitigations_off())
+ ssbs = true;
+ else if (spectre_v4_mitigations_dynamic() && !kthread)
+ ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
+
+ __update_pstate_ssbs(regs, ssbs);
+}
+
+/*
+ * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
+ * This is interesting because the "speculation disabled" behaviour can be
+ * configured so that it is preserved across exec(), which means that the
+ * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
+ * from userspace.
+ */
+static void ssbd_prctl_enable_mitigation(struct task_struct *task)
+{
+ task_clear_spec_ssb_noexec(task);
+ task_set_spec_ssb_disable(task);
+ set_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static void ssbd_prctl_disable_mitigation(struct task_struct *task)
+{
+ task_clear_spec_ssb_noexec(task);
+ task_clear_spec_ssb_disable(task);
+ clear_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ /* Enable speculation: disable mitigation */
+ /*
+ * Force disabled speculation prevents it from being
+ * re-enabled.
+ */
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+
+ /*
+ * If the mitigation is forced on, then speculation is forced
+ * off and we again prevent it from being re-enabled.
+ */
+ if (spectre_v4_mitigations_on())
+ return -EPERM;
+
+ ssbd_prctl_disable_mitigation(task);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ /* Force disable speculation: force enable mitigation */
+ /*
+ * If the mitigation is forced off, then speculation is forced
+ * on and we prevent it from being disabled.
+ */
+ if (spectre_v4_mitigations_off())
+ return -EPERM;
+
+ task_set_spec_ssb_force_disable(task);
+ fallthrough;
+ case PR_SPEC_DISABLE:
+ /* Disable speculation: enable mitigation */
+ /* Same as PR_SPEC_FORCE_DISABLE */
+ if (spectre_v4_mitigations_off())
+ return -EPERM;
+
+ ssbd_prctl_enable_mitigation(task);
+ break;
+ case PR_SPEC_DISABLE_NOEXEC:
+ /* Disable speculation until execve(): enable mitigation */
+ /*
+ * If the mitigation state is forced one way or the other, then
+ * we must fail now before we try to toggle it on execve().
+ */
+ if (task_spec_ssb_force_disable(task) ||
+ spectre_v4_mitigations_off() ||
+ spectre_v4_mitigations_on()) {
+ return -EPERM;
+ }
+
+ ssbd_prctl_enable_mitigation(task);
+ task_set_spec_ssb_noexec(task);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ spectre_v4_enable_task_mitigation(task);
+ return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+}
+
+static int ssbd_prctl_get(struct task_struct *task)
+{
+ switch (spectre_v4_state) {
+ case SPECTRE_UNAFFECTED:
+ return PR_SPEC_NOT_AFFECTED;
+ case SPECTRE_MITIGATED:
+ if (spectre_v4_mitigations_on())
+ return PR_SPEC_NOT_AFFECTED;
+
+ if (spectre_v4_mitigations_dynamic())
+ break;
+
+ /* Mitigations are disabled, so we're vulnerable. */
+ fallthrough;
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return PR_SPEC_ENABLE;
+ }
+
+ /* Check the mitigation state for this task */
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+
+ if (task_spec_ssb_noexec(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
+
+ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+}
+
+/*
+ * Spectre BHB.
+ *
+ * A CPU is either:
+ * - Mitigated by a branchy loop a CPU specific number of times, and listed
+ * in our "loop mitigated list".
+ * - Mitigated in software by the firmware Spectre v2 call.
+ * - Has the ClearBHB instruction to perform the mitigation.
+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
+ * software mitigation in the vectors is needed.
+ * - Has CSV2.3, so is unaffected.
+ */
+static enum mitigation_state spectre_bhb_state;
+
+enum mitigation_state arm64_get_spectre_bhb_state(void)
+{
+ return spectre_bhb_state;
+}
+
+/*
+ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
+ * SCOPE_SYSTEM call will give the right answer.
+ */
+u8 spectre_bhb_loop_affected(int scope)
+{
+ u8 k = 0;
+ static u8 max_bhb_k;
+
+ if (scope == SCOPE_LOCAL_CPU) {
+ static const struct midr_range spectre_bhb_k32_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k24_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k11_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k8_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ {},
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
+ k = 32;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+ k = 24;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
+ k = 11;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+ k = 8;
+
+ max_bhb_k = max(max_bhb_k, k);
+ } else {
+ k = max_bhb_k;
+ }
+
+ return k;
+}
+
+static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_3, &res);
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ fallthrough;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+static bool is_spectre_bhb_fw_affected(int scope)
+{
+ static bool system_affected;
+ enum mitigation_state fw_state;
+ bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
+ static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ {},
+ };
+ bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
+ spectre_bhb_firmware_mitigated_list);
+
+ if (scope != SCOPE_LOCAL_CPU)
+ return system_affected;
+
+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+ if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
+ system_affected = true;
+ return true;
+ }
+
+ return false;
+}
+
+static bool supports_ecbhb(int scope)
+{
+ u64 mmfr1;
+
+ if (scope == SCOPE_LOCAL_CPU)
+ mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
+ else
+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+
+ return cpuid_feature_extract_unsigned_field(mmfr1,
+ ID_AA64MMFR1_ECBHB_SHIFT);
+}
+
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ if (supports_csv2p3(scope))
+ return false;
+
+ if (supports_clearbhb(scope))
+ return true;
+
+ if (spectre_bhb_loop_affected(scope))
+ return true;
+
+ if (is_spectre_bhb_fw_affected(scope))
+ return true;
+
+ return false;
+}
+
+static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+{
+ const char *v = arm64_get_bp_hardening_vector(slot);
+
+ if (slot < 0)
+ return;
+
+ __this_cpu_write(this_cpu_vector, v);
+
+ /*
+ * When KPTI is in use, the vectors are switched when exiting to
+ * user-space.
+ */
+ if (arm64_kernel_unmapped_at_el0())
+ return;
+
+ write_sysreg(v, vbar_el1);
+ isb();
+}
+
+#ifdef CONFIG_KVM
+static int kvm_bhb_get_vecs_size(const char *start)
+{
+ if (start == __smccc_workaround_3_smc)
+ return __SMCCC_WORKAROUND_3_SMC_SZ;
+ else if (start == __spectre_bhb_loop_k8 ||
+ start == __spectre_bhb_loop_k24 ||
+ start == __spectre_bhb_loop_k32)
+ return __SPECTRE_BHB_LOOP_SZ;
+ else if (start == __spectre_bhb_clearbhb)
+ return __SPECTRE_BHB_CLEARBHB_SZ;
+
+ return 0;
+}
+
+static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
+{
+ int cpu, slot = -1, size;
+ const char *hyp_vecs_end;
+
+ if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+ return;
+
+ size = kvm_bhb_get_vecs_size(hyp_vecs_start);
+ if (WARN_ON_ONCE(!hyp_vecs_start || !size))
+ return;
+ hyp_vecs_end = hyp_vecs_start + size;
+
+ raw_spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+ raw_spin_unlock(&bp_lock);
+}
+#else
+#define __smccc_workaround_3_smc NULL
+#define __spectre_bhb_loop_k8 NULL
+#define __spectre_bhb_loop_k24 NULL
+#define __spectre_bhb_loop_k32 NULL
+#define __spectre_bhb_clearbhb NULL
+
+static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }
+#endif /* CONFIG_KVM */
+
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+{
+ enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
+
+ if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
+ return;
+
+ if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
+ /* No point mitigating Spectre-BHB alone. */
+ } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
+ pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
+ } else if (cpu_mitigations_off()) {
+ pr_info_once("spectre-bhb mitigation disabled by command line option\n");
+ } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
+ state = SPECTRE_MITIGATED;
+ } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
+ kvm_setup_bhb_slot(__spectre_bhb_clearbhb);
+ this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
+
+ state = SPECTRE_MITIGATED;
+ } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
+ switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
+ case 8:
+ kvm_setup_bhb_slot(__spectre_bhb_loop_k8);
+ break;
+ case 24:
+ kvm_setup_bhb_slot(__spectre_bhb_loop_k24);
+ break;
+ case 32:
+ kvm_setup_bhb_slot(__spectre_bhb_loop_k32);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
+
+ state = SPECTRE_MITIGATED;
+ } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+ if (fw_state == SPECTRE_MITIGATED) {
+ kvm_setup_bhb_slot(__smccc_workaround_3_smc);
+ this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
+
+ state = SPECTRE_MITIGATED;
+ }
+ }
+
+ update_mitigation_state(&spectre_bhb_state, state);
+}
+
+/* Patched to correct the immediate */
+void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ u8 rd;
+ u32 insn;
+ u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
+
+ BUG_ON(nr_inst != 1); /* MOV -> MOV */
+
+ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
+ return;
+
+ insn = le32_to_cpu(*origptr);
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+ insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_ZERO);
+ *updptr++ = cpu_to_le32(insn);
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
+void unpriv_ebpf_notify(int new_state)
+{
+ if (spectre_v2_state == SPECTRE_VULNERABLE ||
+ spectre_bhb_state != SPECTRE_MITIGATED)
+ return;
+
+ if (!new_state)
+ pr_err("WARNING: %s", EBPF_WARN);
+}
+#endif
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
new file mode 100644
index 000000000..62d2bda7a
--- /dev/null
+++ b/arch/arm64/kernel/psci.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/psci.h>
+#include <linux/mm.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cpu_ops.h>
+#include <asm/errno.h>
+#include <asm/smp_plat.h>
+
+static int __init cpu_psci_cpu_init(unsigned int cpu)
+{
+ return 0;
+}
+
+static int __init cpu_psci_cpu_prepare(unsigned int cpu)
+{
+ if (!psci_ops.cpu_on) {
+ pr_err("no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int cpu_psci_cpu_boot(unsigned int cpu)
+{
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
+ if (err)
+ pr_err("failed to boot CPU%d (%d)\n", cpu, err);
+
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool cpu_psci_cpu_can_disable(unsigned int cpu)
+{
+ return !psci_tos_resident_on(cpu);
+}
+
+static int cpu_psci_cpu_disable(unsigned int cpu)
+{
+ /* Fail early if we don't have CPU_OFF support */
+ if (!psci_ops.cpu_off)
+ return -EOPNOTSUPP;
+
+ /* Trusted OS will deny CPU_OFF */
+ if (psci_tos_resident_on(cpu))
+ return -EPERM;
+
+ return 0;
+}
+
+static void cpu_psci_cpu_die(unsigned int cpu)
+{
+ /*
+ * There are no known implementations of PSCI actually using the
+ * power state field, pass a sensible default for now.
+ */
+ u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
+ PSCI_0_2_POWER_STATE_TYPE_SHIFT;
+
+ psci_ops.cpu_off(state);
+}
+
+static int cpu_psci_cpu_kill(unsigned int cpu)
+{
+ int err;
+ unsigned long start, end;
+
+ if (!psci_ops.affinity_info)
+ return 0;
+ /*
+ * cpu_kill could race with cpu_die and we can
+ * potentially end up declaring this cpu undead
+ * while it is dying. So, try again a few times.
+ */
+
+ start = jiffies;
+ end = start + msecs_to_jiffies(100);
+ do {
+ err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
+ if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
+ pr_info("CPU%d killed (polled %d ms)\n", cpu,
+ jiffies_to_msecs(jiffies - start));
+ return 0;
+ }
+
+ usleep_range(100, 1000);
+ } while (time_before(jiffies, end));
+
+ pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
+ cpu, err);
+ return -ETIMEDOUT;
+}
+#endif
+
+const struct cpu_operations cpu_psci_ops = {
+ .name = "psci",
+ .cpu_init = cpu_psci_cpu_init,
+ .cpu_prepare = cpu_psci_cpu_prepare,
+ .cpu_boot = cpu_psci_cpu_boot,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_can_disable = cpu_psci_cpu_can_disable,
+ .cpu_disable = cpu_psci_cpu_disable,
+ .cpu_die = cpu_psci_cpu_die,
+ .cpu_kill = cpu_psci_cpu_kill,
+#endif
+};
+
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
new file mode 100644
index 000000000..2817e3988
--- /dev/null
+++ b/arch/arm64/kernel/ptrace.c
@@ -0,0 +1,1913 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/ptrace.c
+ *
+ * By Ross Biro 1/23/92
+ * edited by Linus Torvalds
+ * ARM modifications Copyright (C) 2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/audit.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/nospec.h>
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/seccomp.h>
+#include <linux/security.h>
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+
+#include <asm/compat.h>
+#include <asm/cpufeature.h>
+#include <asm/debug-monitors.h>
+#include <asm/fpsimd.h>
+#include <asm/mte.h>
+#include <asm/pointer_auth.h>
+#include <asm/stacktrace.h>
+#include <asm/syscall.h>
+#include <asm/traps.h>
+#include <asm/system_misc.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+#define GPR_OFFSET_NAME(r) \
+ {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ GPR_OFFSET_NAME(0),
+ GPR_OFFSET_NAME(1),
+ GPR_OFFSET_NAME(2),
+ GPR_OFFSET_NAME(3),
+ GPR_OFFSET_NAME(4),
+ GPR_OFFSET_NAME(5),
+ GPR_OFFSET_NAME(6),
+ GPR_OFFSET_NAME(7),
+ GPR_OFFSET_NAME(8),
+ GPR_OFFSET_NAME(9),
+ GPR_OFFSET_NAME(10),
+ GPR_OFFSET_NAME(11),
+ GPR_OFFSET_NAME(12),
+ GPR_OFFSET_NAME(13),
+ GPR_OFFSET_NAME(14),
+ GPR_OFFSET_NAME(15),
+ GPR_OFFSET_NAME(16),
+ GPR_OFFSET_NAME(17),
+ GPR_OFFSET_NAME(18),
+ GPR_OFFSET_NAME(19),
+ GPR_OFFSET_NAME(20),
+ GPR_OFFSET_NAME(21),
+ GPR_OFFSET_NAME(22),
+ GPR_OFFSET_NAME(23),
+ GPR_OFFSET_NAME(24),
+ GPR_OFFSET_NAME(25),
+ GPR_OFFSET_NAME(26),
+ GPR_OFFSET_NAME(27),
+ GPR_OFFSET_NAME(28),
+ GPR_OFFSET_NAME(29),
+ GPR_OFFSET_NAME(30),
+ {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
+ REG_OFFSET_NAME(sp),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(pstate),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
+ on_irq_stack(addr, NULL);
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
+/*
+ * TODO: does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /*
+ * This would be better off in core code, but PTRACE_DETACH has
+ * grown its fair share of arch-specific worts and changing it
+ * is likely to cause regressions on obscure architectures.
+ */
+ user_disable_single_step(child);
+}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+/*
+ * Handle hitting a HW-breakpoint.
+ */
+static void ptrace_hbptriggered(struct perf_event *bp,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
+ const char *desc = "Hardware breakpoint trap (ptrace)";
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ int si_errno = 0;
+ int i;
+
+ for (i = 0; i < ARM_MAX_BRP; ++i) {
+ if (current->thread.debug.hbp_break[i] == bp) {
+ si_errno = (i << 1) + 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARM_MAX_WRP; ++i) {
+ if (current->thread.debug.hbp_watch[i] == bp) {
+ si_errno = -((i << 1) + 1);
+ break;
+ }
+ }
+ arm64_force_sig_ptrace_errno_trap(si_errno,
+ (void __user *)bkpt->trigger,
+ desc);
+ }
+#endif
+ arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
+ (void __user *)(bkpt->trigger),
+ desc);
+}
+
+/*
+ * Unregister breakpoints from this task and reset the pointers in
+ * the thread_struct.
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < ARM_MAX_BRP; i++) {
+ if (t->debug.hbp_break[i]) {
+ unregister_hw_breakpoint(t->debug.hbp_break[i]);
+ t->debug.hbp_break[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < ARM_MAX_WRP; i++) {
+ if (t->debug.hbp_watch[i]) {
+ unregister_hw_breakpoint(t->debug.hbp_watch[i]);
+ t->debug.hbp_watch[i] = NULL;
+ }
+ }
+}
+
+void ptrace_hw_copy_thread(struct task_struct *tsk)
+{
+ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+}
+
+static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ struct perf_event *bp = ERR_PTR(-EINVAL);
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if (idx >= ARM_MAX_BRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_BRP);
+ bp = tsk->thread.debug.hbp_break[idx];
+ break;
+ case NT_ARM_HW_WATCH:
+ if (idx >= ARM_MAX_WRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_WRP);
+ bp = tsk->thread.debug.hbp_watch[idx];
+ break;
+ }
+
+out:
+ return bp;
+}
+
+static int ptrace_hbp_set_event(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx,
+ struct perf_event *bp)
+{
+ int err = -EINVAL;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if (idx >= ARM_MAX_BRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_BRP);
+ tsk->thread.debug.hbp_break[idx] = bp;
+ err = 0;
+ break;
+ case NT_ARM_HW_WATCH:
+ if (idx >= ARM_MAX_WRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_WRP);
+ tsk->thread.debug.hbp_watch[idx] = bp;
+ err = 0;
+ break;
+ }
+
+out:
+ return err;
+}
+
+static struct perf_event *ptrace_hbp_create(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ int err, type;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ type = HW_BREAKPOINT_X;
+ break;
+ case NT_ARM_HW_WATCH:
+ type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ptrace_breakpoint_init(&attr);
+
+ /*
+ * Initialise fields to sane defaults
+ * (i.e. values that will pass validation).
+ */
+ attr.bp_addr = 0;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_type = type;
+ attr.disabled = 1;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
+ if (IS_ERR(bp))
+ return bp;
+
+ err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
+ if (err)
+ return ERR_PTR(err);
+
+ return bp;
+}
+
+static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
+ struct arch_hw_breakpoint_ctrl ctrl,
+ struct perf_event_attr *attr)
+{
+ int err, len, type, offset, disabled = !ctrl.enabled;
+
+ attr->disabled = disabled;
+ if (disabled)
+ return 0;
+
+ err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
+ return -EINVAL;
+ break;
+ case NT_ARM_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ attr->bp_len = len;
+ attr->bp_type = type;
+ attr->bp_addr += offset;
+
+ return 0;
+}
+
+static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
+{
+ u8 num;
+ u32 reg = 0;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ num = hw_breakpoint_slots(TYPE_INST);
+ break;
+ case NT_ARM_HW_WATCH:
+ num = hw_breakpoint_slots(TYPE_DATA);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg |= debug_monitors_arch();
+ reg <<= 8;
+ reg |= num;
+
+ *info = reg;
+ return 0;
+}
+
+static int ptrace_hbp_get_ctrl(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx,
+ u32 *ctrl)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
+ return 0;
+}
+
+static int ptrace_hbp_get_addr(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx,
+ u64 *addr)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *addr = bp ? counter_arch_bp(bp)->address : 0;
+ return 0;
+}
+
+static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (!bp)
+ bp = ptrace_hbp_create(note_type, tsk, idx);
+
+ return bp;
+}
+
+static int ptrace_hbp_set_ctrl(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx,
+ u32 uctrl)
+{
+ int err;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint_ctrl ctrl;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp)) {
+ err = PTR_ERR(bp);
+ return err;
+ }
+
+ attr = bp->attr;
+ decode_ctrl_reg(uctrl, &ctrl);
+ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+ if (err)
+ return err;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int ptrace_hbp_set_addr(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx,
+ u64 addr)
+{
+ int err;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp)) {
+ err = PTR_ERR(bp);
+ return err;
+ }
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+ err = modify_user_hw_breakpoint(bp, &attr);
+ return err;
+}
+
+#define PTRACE_HBP_ADDR_SZ sizeof(u64)
+#define PTRACE_HBP_CTRL_SZ sizeof(u32)
+#define PTRACE_HBP_PAD_SZ sizeof(u32)
+
+static int hw_break_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ unsigned int note_type = regset->core_note_type;
+ int ret, idx = 0;
+ u32 info, ctrl;
+ u64 addr;
+
+ /* Resource info */
+ ret = ptrace_hbp_get_resource_info(note_type, &info);
+ if (ret)
+ return ret;
+
+ membuf_write(&to, &info, sizeof(info));
+ membuf_zero(&to, sizeof(u32));
+ /* (address, ctrl) registers */
+ while (to.left) {
+ ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
+ if (ret)
+ return ret;
+ ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
+ if (ret)
+ return ret;
+ membuf_store(&to, addr);
+ membuf_store(&to, ctrl);
+ membuf_zero(&to, sizeof(u32));
+ idx++;
+ }
+ return 0;
+}
+
+static int hw_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int note_type = regset->core_note_type;
+ int ret, idx = 0, offset, limit;
+ u32 ctrl;
+ u64 addr;
+
+ /* Resource info and pad */
+ offset = offsetof(struct user_hwdebug_state, dbg_regs);
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
+ if (ret)
+ return ret;
+
+ /* (address, ctrl) registers */
+ limit = regset->n * regset->size;
+ while (count && offset < limit) {
+ if (count < PTRACE_HBP_ADDR_SZ)
+ return -EINVAL;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
+ offset, offset + PTRACE_HBP_ADDR_SZ);
+ if (ret)
+ return ret;
+ ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_ADDR_SZ;
+
+ if (!count)
+ break;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
+ offset, offset + PTRACE_HBP_CTRL_SZ);
+ if (ret)
+ return ret;
+ ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_CTRL_SZ;
+
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ offset,
+ offset + PTRACE_HBP_PAD_SZ);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_PAD_SZ;
+ idx++;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
+ return membuf_write(&to, uregs, sizeof(*uregs));
+}
+
+static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
+ if (ret)
+ return ret;
+
+ if (!valid_user_regs(&newregs, target))
+ return -EINVAL;
+
+ task_pt_regs(target)->user_regs = newregs;
+ return 0;
+}
+
+static int fpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!system_supports_fpsimd())
+ return -ENODEV;
+ return regset->n;
+}
+
+/*
+ * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
+ */
+static int __fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct user_fpsimd_state *uregs;
+
+ sve_sync_to_fpsimd(target);
+
+ uregs = &target->thread.uw.fpsimd_state;
+
+ return membuf_write(&to, uregs, sizeof(*uregs));
+}
+
+static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ return __fpr_get(target, regset, to);
+}
+
+static int __fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ unsigned int start_pos)
+{
+ int ret;
+ struct user_fpsimd_state newstate;
+
+ /*
+ * Ensure target->thread.uw.fpsimd_state is up to date, so that a
+ * short copyin can't resurrect stale data.
+ */
+ sve_sync_to_fpsimd(target);
+
+ newstate = target->thread.uw.fpsimd_state;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
+ start_pos, start_pos + sizeof(newstate));
+ if (ret)
+ return ret;
+
+ target->thread.uw.fpsimd_state = newstate;
+
+ return ret;
+}
+
+static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
+ if (ret)
+ return ret;
+
+ sve_sync_from_fpsimd_zeropad(target);
+ fpsimd_flush_task_state(target);
+
+ return ret;
+}
+
+static int tls_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ if (target == current)
+ tls_preserve_current_state();
+
+ return membuf_store(&to, target->thread.uw.tp_value);
+}
+
+static int tls_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ unsigned long tls = target->thread.uw.tp_value;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ if (ret)
+ return ret;
+
+ target->thread.uw.tp_value = tls;
+ return ret;
+}
+
+static int system_call_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_store(&to, task_pt_regs(target)->syscallno);
+}
+
+static int system_call_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int syscallno = task_pt_regs(target)->syscallno;
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
+ if (ret)
+ return ret;
+
+ task_pt_regs(target)->syscallno = syscallno;
+ return ret;
+}
+
+#ifdef CONFIG_ARM64_SVE
+
+static void sve_init_header_from_task(struct user_sve_header *header,
+ struct task_struct *target)
+{
+ unsigned int vq;
+
+ memset(header, 0, sizeof(*header));
+
+ header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
+ SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
+ if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+
+ header->vl = target->thread.sve_vl;
+ vq = sve_vq_from_vl(header->vl);
+
+ header->max_vl = sve_max_vl;
+ header->size = SVE_PT_SIZE(vq, header->flags);
+ header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
+ SVE_PT_REGS_SVE);
+}
+
+static unsigned int sve_size_from_header(struct user_sve_header const *header)
+{
+ return ALIGN(header->size, SVE_VQ_BYTES);
+}
+
+static int sve_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct user_sve_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Header */
+ sve_init_header_from_task(&header, target);
+ vq = sve_vq_from_vl(header.vl);
+
+ membuf_write(&to, &header, sizeof(header));
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ /* Registers: FPSIMD-only case */
+
+ BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
+ if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
+ return __fpr_get(target, regset, to);
+
+ /* Otherwise: full SVE case */
+
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ membuf_write(&to, target->thread.sve_state, end - start);
+
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ membuf_zero(&to, end - start);
+
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start);
+
+ start = end;
+ end = sve_size_from_header(&header);
+ return membuf_zero(&to, end - start);
+}
+
+static int sve_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_sve_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Header */
+ if (count < sizeof(header))
+ return -EINVAL;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
+ 0, sizeof(header));
+ if (ret)
+ goto out;
+
+ /*
+ * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
+ * sve_set_vector_length(), which will also validate them for us:
+ */
+ ret = sve_set_vector_length(target, header.vl,
+ ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
+ if (ret)
+ goto out;
+
+ /* Actual VL set may be less than the user asked for: */
+ vq = sve_vq_from_vl(target->thread.sve_vl);
+
+ /* Registers: FPSIMD-only case */
+
+ BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
+ if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
+ SVE_PT_FPSIMD_OFFSET);
+ clear_tsk_thread_flag(target, TIF_SVE);
+ goto out;
+ }
+
+ /* Otherwise: full SVE case */
+
+ /*
+ * If setting a different VL from the requested VL and there is
+ * register data, the data layout will be wrong: don't even
+ * try to set the registers in this case.
+ */
+ if (count && vq != sve_vq_from_vl(header.vl)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ sve_alloc(target);
+
+ /*
+ * Ensure target->thread.sve_state is up to date with target's
+ * FPSIMD regs, so that a short copyin leaves trailing registers
+ * unmodified.
+ */
+ fpsimd_sync_to_sve(target);
+ set_tsk_thread_flag(target, TIF_SVE);
+
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.sve_state,
+ start, end);
+ if (ret)
+ goto out;
+
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ start, end);
+ if (ret)
+ goto out;
+
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.uw.fpsimd_state.fpsr,
+ start, end);
+
+out:
+ fpsimd_flush_task_state(target);
+ return ret;
+}
+
+#endif /* CONFIG_ARM64_SVE */
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static int pac_mask_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ /*
+ * The PAC bits can differ across data and instruction pointers
+ * depending on TCR_EL1.TBID*, which we may make use of in future, so
+ * we expose separate masks.
+ */
+ unsigned long mask = ptrauth_user_pac_mask();
+ struct user_pac_mask uregs = {
+ .data_mask = mask,
+ .insn_mask = mask,
+ };
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ return membuf_write(&to, &uregs, sizeof(uregs));
+}
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
+{
+ return (__uint128_t)key->hi << 64 | key->lo;
+}
+
+static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
+{
+ struct ptrauth_key key = {
+ .lo = (unsigned long)ukey,
+ .hi = (unsigned long)(ukey >> 64),
+ };
+
+ return key;
+}
+
+static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
+ const struct ptrauth_keys_user *keys)
+{
+ ukeys->apiakey = pac_key_to_user(&keys->apia);
+ ukeys->apibkey = pac_key_to_user(&keys->apib);
+ ukeys->apdakey = pac_key_to_user(&keys->apda);
+ ukeys->apdbkey = pac_key_to_user(&keys->apdb);
+}
+
+static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
+ const struct user_pac_address_keys *ukeys)
+{
+ keys->apia = pac_key_from_user(ukeys->apiakey);
+ keys->apib = pac_key_from_user(ukeys->apibkey);
+ keys->apda = pac_key_from_user(ukeys->apdakey);
+ keys->apdb = pac_key_from_user(ukeys->apdbkey);
+}
+
+static int pac_address_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
+ struct user_pac_address_keys user_keys;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ pac_address_keys_to_user(&user_keys, keys);
+
+ return membuf_write(&to, &user_keys, sizeof(user_keys));
+}
+
+static int pac_address_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
+ struct user_pac_address_keys user_keys;
+ int ret;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ pac_address_keys_to_user(&user_keys, keys);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+ if (ret)
+ return ret;
+ pac_address_keys_from_user(keys, &user_keys);
+
+ return 0;
+}
+
+static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
+ const struct ptrauth_keys_user *keys)
+{
+ ukeys->apgakey = pac_key_to_user(&keys->apga);
+}
+
+static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
+ const struct user_pac_generic_keys *ukeys)
+{
+ keys->apga = pac_key_from_user(ukeys->apgakey);
+}
+
+static int pac_generic_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
+ struct user_pac_generic_keys user_keys;
+
+ if (!system_supports_generic_auth())
+ return -EINVAL;
+
+ pac_generic_keys_to_user(&user_keys, keys);
+
+ return membuf_write(&to, &user_keys, sizeof(user_keys));
+}
+
+static int pac_generic_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct ptrauth_keys_user *keys = &target->thread.keys_user;
+ struct user_pac_generic_keys user_keys;
+ int ret;
+
+ if (!system_supports_generic_auth())
+ return -EINVAL;
+
+ pac_generic_keys_to_user(&user_keys, keys);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+ if (ret)
+ return ret;
+ pac_generic_keys_from_user(keys, &user_keys);
+
+ return 0;
+}
+#endif /* CONFIG_CHECKPOINT_RESTORE */
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+static int tagged_addr_ctrl_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ long ctrl = get_tagged_addr_ctrl(target);
+
+ if (IS_ERR_VALUE(ctrl))
+ return ctrl;
+
+ return membuf_write(&to, &ctrl, sizeof(ctrl));
+}
+
+static int tagged_addr_ctrl_set(struct task_struct *target, const struct
+ user_regset *regset, unsigned int pos,
+ unsigned int count, const void *kbuf, const
+ void __user *ubuf)
+{
+ int ret;
+ long ctrl;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
+ if (ret)
+ return ret;
+
+ return set_tagged_addr_ctrl(target, ctrl);
+}
+#endif
+
+enum aarch64_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+ REGSET_TLS,
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ REGSET_HW_BREAK,
+ REGSET_HW_WATCH,
+#endif
+ REGSET_SYSTEM_CALL,
+#ifdef CONFIG_ARM64_SVE
+ REGSET_SVE,
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ REGSET_PAC_MASK,
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ REGSET_PACA_KEYS,
+ REGSET_PACG_KEYS,
+#endif
+#endif
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+ REGSET_TAGGED_ADDR_CTRL,
+#endif
+};
+
+static const struct user_regset aarch64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct user_pt_regs) / sizeof(u64),
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = gpr_get,
+ .set = gpr_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
+ /*
+ * We pretend we have 32-bit registers because the fpsr and
+ * fpcr are 32-bits wide.
+ */
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .active = fpr_active,
+ .regset_get = fpr_get,
+ .set = fpr_set
+ },
+ [REGSET_TLS] = {
+ .core_note_type = NT_ARM_TLS,
+ .n = 1,
+ .size = sizeof(void *),
+ .align = sizeof(void *),
+ .regset_get = tls_get,
+ .set = tls_set,
+ },
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ [REGSET_HW_BREAK] = {
+ .core_note_type = NT_ARM_HW_BREAK,
+ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+ [REGSET_HW_WATCH] = {
+ .core_note_type = NT_ARM_HW_WATCH,
+ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+#endif
+ [REGSET_SYSTEM_CALL] = {
+ .core_note_type = NT_ARM_SYSTEM_CALL,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .regset_get = system_call_get,
+ .set = system_call_set,
+ },
+#ifdef CONFIG_ARM64_SVE
+ [REGSET_SVE] = { /* Scalable Vector Extension */
+ .core_note_type = NT_ARM_SVE,
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
+ SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+ .regset_get = sve_get,
+ .set = sve_set,
+ },
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ [REGSET_PAC_MASK] = {
+ .core_note_type = NT_ARM_PAC_MASK,
+ .n = sizeof(struct user_pac_mask) / sizeof(u64),
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = pac_mask_get,
+ /* this cannot be set dynamically */
+ },
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ [REGSET_PACA_KEYS] = {
+ .core_note_type = NT_ARM_PACA_KEYS,
+ .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
+ .size = sizeof(__uint128_t),
+ .align = sizeof(__uint128_t),
+ .regset_get = pac_address_keys_get,
+ .set = pac_address_keys_set,
+ },
+ [REGSET_PACG_KEYS] = {
+ .core_note_type = NT_ARM_PACG_KEYS,
+ .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
+ .size = sizeof(__uint128_t),
+ .align = sizeof(__uint128_t),
+ .regset_get = pac_generic_keys_get,
+ .set = pac_generic_keys_set,
+ },
+#endif
+#endif
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+ [REGSET_TAGGED_ADDR_CTRL] = {
+ .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .regset_get = tagged_addr_ctrl_get,
+ .set = tagged_addr_ctrl_set,
+ },
+#endif
+};
+
+static const struct user_regset_view user_aarch64_view = {
+ .name = "aarch64", .e_machine = EM_AARCH64,
+ .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
+};
+
+#ifdef CONFIG_COMPAT
+enum compat_regset {
+ REGSET_COMPAT_GPR,
+ REGSET_COMPAT_VFP,
+};
+
+static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
+{
+ struct pt_regs *regs = task_pt_regs(task);
+
+ switch (idx) {
+ case 15:
+ return regs->pc;
+ case 16:
+ return pstate_to_compat_psr(regs->pstate);
+ case 17:
+ return regs->orig_x0;
+ default:
+ return regs->regs[idx];
+ }
+}
+
+static int compat_gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int i = 0;
+
+ while (to.left)
+ membuf_store(&to, compat_get_user_reg(target, i++));
+ return 0;
+}
+
+static int compat_gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs newregs;
+ int ret = 0;
+ unsigned int i, start, num_regs;
+
+ /* Calculate the number of AArch32 registers contained in count */
+ num_regs = count / regset->size;
+
+ /* Convert pos into an register number */
+ start = pos / regset->size;
+
+ if (start + num_regs > regset->n)
+ return -EIO;
+
+ newregs = *task_pt_regs(target);
+
+ for (i = 0; i < num_regs; ++i) {
+ unsigned int idx = start + i;
+ compat_ulong_t reg;
+
+ if (kbuf) {
+ memcpy(&reg, kbuf, sizeof(reg));
+ kbuf += sizeof(reg);
+ } else {
+ ret = copy_from_user(&reg, ubuf, sizeof(reg));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ubuf += sizeof(reg);
+ }
+
+ switch (idx) {
+ case 15:
+ newregs.pc = reg;
+ break;
+ case 16:
+ reg = compat_psr_to_pstate(reg);
+ newregs.pstate = reg;
+ break;
+ case 17:
+ newregs.orig_x0 = reg;
+ break;
+ default:
+ newregs.regs[idx] = reg;
+ }
+
+ }
+
+ if (valid_user_regs(&newregs.user_regs, target))
+ *task_pt_regs(target) = newregs;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int compat_vfp_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct user_fpsimd_state *uregs;
+ compat_ulong_t fpscr;
+
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
+ uregs = &target->thread.uw.fpsimd_state;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ /*
+ * The VFP registers are packed into the fpsimd_state, so they all sit
+ * nicely together for us. We just need to create the fpscr separately.
+ */
+ membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
+ fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
+ (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
+ return membuf_store(&to, fpscr);
+}
+
+static int compat_vfp_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct user_fpsimd_state *uregs;
+ compat_ulong_t fpscr;
+ int ret, vregs_end_pos;
+
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
+ uregs = &target->thread.uw.fpsimd_state;
+
+ vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ vregs_end_pos);
+
+ if (count && !ret) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
+ vregs_end_pos, VFP_STATE_SIZE);
+ if (!ret) {
+ uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
+ uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
+ }
+ }
+
+ fpsimd_flush_task_state(target);
+ return ret;
+}
+
+static int compat_tls_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
+}
+
+static int compat_tls_set(struct task_struct *target,
+ const struct user_regset *regset, unsigned int pos,
+ unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ int ret;
+ compat_ulong_t tls = target->thread.uw.tp_value;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ if (ret)
+ return ret;
+
+ target->thread.uw.tp_value = tls;
+ return ret;
+}
+
+static const struct user_regset aarch32_regsets[] = {
+ [REGSET_COMPAT_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = COMPAT_ELF_NGREG,
+ .size = sizeof(compat_elf_greg_t),
+ .align = sizeof(compat_elf_greg_t),
+ .regset_get = compat_gpr_get,
+ .set = compat_gpr_set
+ },
+ [REGSET_COMPAT_VFP] = {
+ .core_note_type = NT_ARM_VFP,
+ .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+ .size = sizeof(compat_ulong_t),
+ .align = sizeof(compat_ulong_t),
+ .active = fpr_active,
+ .regset_get = compat_vfp_get,
+ .set = compat_vfp_set
+ },
+};
+
+static const struct user_regset_view user_aarch32_view = {
+ .name = "aarch32", .e_machine = EM_ARM,
+ .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
+};
+
+static const struct user_regset aarch32_ptrace_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = COMPAT_ELF_NGREG,
+ .size = sizeof(compat_elf_greg_t),
+ .align = sizeof(compat_elf_greg_t),
+ .regset_get = compat_gpr_get,
+ .set = compat_gpr_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_ARM_VFP,
+ .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+ .size = sizeof(compat_ulong_t),
+ .align = sizeof(compat_ulong_t),
+ .regset_get = compat_vfp_get,
+ .set = compat_vfp_set
+ },
+ [REGSET_TLS] = {
+ .core_note_type = NT_ARM_TLS,
+ .n = 1,
+ .size = sizeof(compat_ulong_t),
+ .align = sizeof(compat_ulong_t),
+ .regset_get = compat_tls_get,
+ .set = compat_tls_set,
+ },
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ [REGSET_HW_BREAK] = {
+ .core_note_type = NT_ARM_HW_BREAK,
+ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+ [REGSET_HW_WATCH] = {
+ .core_note_type = NT_ARM_HW_WATCH,
+ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+#endif
+ [REGSET_SYSTEM_CALL] = {
+ .core_note_type = NT_ARM_SYSTEM_CALL,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .regset_get = system_call_get,
+ .set = system_call_set,
+ },
+};
+
+static const struct user_regset_view user_aarch32_ptrace_view = {
+ .name = "aarch32", .e_machine = EM_ARM,
+ .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
+};
+
+static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
+ compat_ulong_t __user *ret)
+{
+ compat_ulong_t tmp;
+
+ if (off & 3)
+ return -EIO;
+
+ if (off == COMPAT_PT_TEXT_ADDR)
+ tmp = tsk->mm->start_code;
+ else if (off == COMPAT_PT_DATA_ADDR)
+ tmp = tsk->mm->start_data;
+ else if (off == COMPAT_PT_TEXT_END_ADDR)
+ tmp = tsk->mm->end_code;
+ else if (off < sizeof(compat_elf_gregset_t))
+ tmp = compat_get_user_reg(tsk, off >> 2);
+ else if (off >= COMPAT_USER_SZ)
+ return -EIO;
+ else
+ tmp = 0;
+
+ return put_user(tmp, ret);
+}
+
+static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
+ compat_ulong_t val)
+{
+ struct pt_regs newregs = *task_pt_regs(tsk);
+ unsigned int idx = off / 4;
+
+ if (off & 3 || off >= COMPAT_USER_SZ)
+ return -EIO;
+
+ if (off >= sizeof(compat_elf_gregset_t))
+ return 0;
+
+ switch (idx) {
+ case 15:
+ newregs.pc = val;
+ break;
+ case 16:
+ newregs.pstate = compat_psr_to_pstate(val);
+ break;
+ case 17:
+ newregs.orig_x0 = val;
+ break;
+ default:
+ newregs.regs[idx] = val;
+ }
+
+ if (!valid_user_regs(&newregs.user_regs, tsk))
+ return -EINVAL;
+
+ *task_pt_regs(tsk) = newregs;
+ return 0;
+}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+/*
+ * Convert a virtual register number into an index for a thread_info
+ * breakpoint array. Breakpoints are identified using positive numbers
+ * whilst watchpoints are negative. The registers are laid out as pairs
+ * of (address, control), each pair mapping to a unique hw_breakpoint struct.
+ * Register 0 is reserved for describing resource information.
+ */
+static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
+{
+ return (abs(num) - 1) >> 1;
+}
+
+static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
+{
+ u8 num_brps, num_wrps, debug_arch, wp_len;
+ u32 reg = 0;
+
+ num_brps = hw_breakpoint_slots(TYPE_INST);
+ num_wrps = hw_breakpoint_slots(TYPE_DATA);
+
+ debug_arch = debug_monitors_arch();
+ wp_len = 8;
+ reg |= debug_arch;
+ reg <<= 8;
+ reg |= wp_len;
+ reg <<= 8;
+ reg |= num_wrps;
+ reg <<= 8;
+ reg |= num_brps;
+
+ *kdata = reg;
+ return 0;
+}
+
+static int compat_ptrace_hbp_get(unsigned int note_type,
+ struct task_struct *tsk,
+ compat_long_t num,
+ u32 *kdata)
+{
+ u64 addr = 0;
+ u32 ctrl = 0;
+
+ int err, idx = compat_ptrace_hbp_num_to_idx(num);
+
+ if (num & 1) {
+ err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
+ *kdata = (u32)addr;
+ } else {
+ err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
+ *kdata = ctrl;
+ }
+
+ return err;
+}
+
+static int compat_ptrace_hbp_set(unsigned int note_type,
+ struct task_struct *tsk,
+ compat_long_t num,
+ u32 *kdata)
+{
+ u64 addr;
+ u32 ctrl;
+
+ int err, idx = compat_ptrace_hbp_num_to_idx(num);
+
+ if (num & 1) {
+ addr = *kdata;
+ err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
+ } else {
+ ctrl = *kdata;
+ err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
+ }
+
+ return err;
+}
+
+static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
+ compat_ulong_t __user *data)
+{
+ int ret;
+ u32 kdata;
+
+ /* Watchpoint */
+ if (num < 0) {
+ ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
+ /* Resource info */
+ } else if (num == 0) {
+ ret = compat_ptrace_hbp_get_resource_info(&kdata);
+ /* Breakpoint */
+ } else {
+ ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
+ }
+
+ if (!ret)
+ ret = put_user(kdata, data);
+
+ return ret;
+}
+
+static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
+ compat_ulong_t __user *data)
+{
+ int ret;
+ u32 kdata = 0;
+
+ if (num == 0)
+ return 0;
+
+ ret = get_user(kdata, data);
+ if (ret)
+ return ret;
+
+ if (num < 0)
+ ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
+ else
+ ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
+
+ return ret;
+}
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t caddr, compat_ulong_t cdata)
+{
+ unsigned long addr = caddr;
+ unsigned long data = cdata;
+ void __user *datap = compat_ptr(data);
+ int ret;
+
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ ret = compat_ptrace_read_user(child, addr, datap);
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = compat_ptrace_write_user(child, addr, data);
+ break;
+
+ case COMPAT_PTRACE_GETREGS:
+ ret = copy_regset_to_user(child,
+ &user_aarch32_view,
+ REGSET_COMPAT_GPR,
+ 0, sizeof(compat_elf_gregset_t),
+ datap);
+ break;
+
+ case COMPAT_PTRACE_SETREGS:
+ ret = copy_regset_from_user(child,
+ &user_aarch32_view,
+ REGSET_COMPAT_GPR,
+ 0, sizeof(compat_elf_gregset_t),
+ datap);
+ break;
+
+ case COMPAT_PTRACE_GET_THREAD_AREA:
+ ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
+ (compat_ulong_t __user *)datap);
+ break;
+
+ case COMPAT_PTRACE_SET_SYSCALL:
+ task_pt_regs(child)->syscallno = data;
+ ret = 0;
+ break;
+
+ case COMPAT_PTRACE_GETVFPREGS:
+ ret = copy_regset_to_user(child,
+ &user_aarch32_view,
+ REGSET_COMPAT_VFP,
+ 0, VFP_STATE_SIZE,
+ datap);
+ break;
+
+ case COMPAT_PTRACE_SETVFPREGS:
+ ret = copy_regset_from_user(child,
+ &user_aarch32_view,
+ REGSET_COMPAT_VFP,
+ 0, VFP_STATE_SIZE,
+ datap);
+ break;
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ case COMPAT_PTRACE_GETHBPREGS:
+ ret = compat_ptrace_gethbpregs(child, addr, datap);
+ break;
+
+ case COMPAT_PTRACE_SETHBPREGS:
+ ret = compat_ptrace_sethbpregs(child, addr, datap);
+ break;
+#endif
+
+ default:
+ ret = compat_ptrace_request(child, request, addr,
+ data);
+ break;
+ }
+
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_COMPAT
+ /*
+ * Core dumping of 32-bit tasks or compat ptrace requests must use the
+ * user_aarch32_view compatible with arm32. Native ptrace requests on
+ * 32-bit children use an extended user_aarch32_ptrace_view to allow
+ * access to the TLS register.
+ */
+ if (is_compat_task())
+ return &user_aarch32_view;
+ else if (is_compat_thread(task_thread_info(task)))
+ return &user_aarch32_ptrace_view;
+#endif
+ return &user_aarch64_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ switch (request) {
+ case PTRACE_PEEKMTETAGS:
+ case PTRACE_POKEMTETAGS:
+ return mte_ptrace_copy_tags(child, request, addr, data);
+ }
+
+ return ptrace_request(child, request, addr, data);
+}
+
+enum ptrace_syscall_dir {
+ PTRACE_SYSCALL_ENTER = 0,
+ PTRACE_SYSCALL_EXIT,
+};
+
+static void tracehook_report_syscall(struct pt_regs *regs,
+ enum ptrace_syscall_dir dir)
+{
+ int regno;
+ unsigned long saved_reg;
+
+ /*
+ * We have some ABI weirdness here in the way that we handle syscall
+ * exit stops because we indicate whether or not the stop has been
+ * signalled from syscall entry or syscall exit by clobbering a general
+ * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
+ * and restoring its old value after the stop. This means that:
+ *
+ * - Any writes by the tracer to this register during the stop are
+ * ignored/discarded.
+ *
+ * - The actual value of the register is not available during the stop,
+ * so the tracer cannot save it and restore it later.
+ *
+ * - Syscall stops behave differently to seccomp and pseudo-step traps
+ * (the latter do not nobble any registers).
+ */
+ regno = (is_compat_task() ? 12 : 7);
+ saved_reg = regs->regs[regno];
+ regs->regs[regno] = dir;
+
+ if (dir == PTRACE_SYSCALL_ENTER) {
+ if (tracehook_report_syscall_entry(regs))
+ forget_syscall(regs);
+ regs->regs[regno] = saved_reg;
+ } else if (!test_thread_flag(TIF_SINGLESTEP)) {
+ tracehook_report_syscall_exit(regs, 0);
+ regs->regs[regno] = saved_reg;
+ } else {
+ regs->regs[regno] = saved_reg;
+
+ /*
+ * Signal a pseudo-step exception since we are stepping but
+ * tracer modifications to the registers may have rewound the
+ * state machine.
+ */
+ tracehook_report_syscall_exit(regs, 1);
+ }
+}
+
+int syscall_trace_enter(struct pt_regs *regs)
+{
+ unsigned long flags = READ_ONCE(current_thread_info()->flags);
+
+ if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+ if (flags & _TIF_SYSCALL_EMU)
+ return NO_SYSCALL;
+ }
+
+ /* Do the secure computing after ptrace; failures should be fast. */
+ if (secure_computing() == -1)
+ return NO_SYSCALL;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, regs->syscallno);
+
+ audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
+ regs->regs[2], regs->regs[3]);
+
+ return regs->syscallno;
+}
+
+void syscall_trace_exit(struct pt_regs *regs)
+{
+ unsigned long flags = READ_ONCE(current_thread_info()->flags);
+
+ audit_syscall_exit(regs);
+
+ if (flags & _TIF_SYSCALL_TRACEPOINT)
+ trace_sys_exit(regs, syscall_get_return_value(current, regs));
+
+ if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+
+ rseq_syscall(regs);
+}
+
+/*
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
+ * Userspace cannot use these until they have an architectural meaning.
+ * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
+ * We also reserve IL for the kernel; SS is handled dynamically.
+ */
+#define SPSR_EL1_AARCH64_RES0_BITS \
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
+ GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
+#define SPSR_EL1_AARCH32_RES0_BITS \
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
+
+static int valid_compat_regs(struct user_pt_regs *regs)
+{
+ regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
+
+ if (!system_supports_mixed_endian_el0()) {
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ regs->pstate |= PSR_AA32_E_BIT;
+ else
+ regs->pstate &= ~PSR_AA32_E_BIT;
+ }
+
+ if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
+ (regs->pstate & PSR_AA32_A_BIT) == 0 &&
+ (regs->pstate & PSR_AA32_I_BIT) == 0 &&
+ (regs->pstate & PSR_AA32_F_BIT) == 0) {
+ return 1;
+ }
+
+ /*
+ * Force PSR to a valid 32-bit EL0t, preserving the same bits as
+ * arch/arm.
+ */
+ regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
+ PSR_AA32_C_BIT | PSR_AA32_V_BIT |
+ PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
+ PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
+ PSR_AA32_T_BIT;
+ regs->pstate |= PSR_MODE32_BIT;
+
+ return 0;
+}
+
+static int valid_native_regs(struct user_pt_regs *regs)
+{
+ regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
+
+ if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
+ (regs->pstate & PSR_D_BIT) == 0 &&
+ (regs->pstate & PSR_A_BIT) == 0 &&
+ (regs->pstate & PSR_I_BIT) == 0 &&
+ (regs->pstate & PSR_F_BIT) == 0) {
+ return 1;
+ }
+
+ /* Force PSR to a valid 64-bit EL0t */
+ regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
+
+ return 0;
+}
+
+/*
+ * Are the current registers suitable for user mode? (used to maintain
+ * security in signal handlers)
+ */
+int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
+{
+ /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
+ user_regs_reset_single_step(regs, task);
+
+ if (is_compat_thread(task_thread_info(task)))
+ return valid_compat_regs(regs);
+ else
+ return valid_native_regs(regs);
+}
diff --git a/arch/arm64/kernel/reloc_test_core.c b/arch/arm64/kernel/reloc_test_core.c
new file mode 100644
index 000000000..e87a2b7f2
--- /dev/null
+++ b/arch/arm64/kernel/reloc_test_core.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/module.h>
+
+int sym64_rel;
+
+#define SYM64_ABS_VAL 0xffff880000cccccc
+#define SYM32_ABS_VAL 0xf800cccc
+#define SYM16_ABS_VAL 0xf8cc
+
+#define __SET_ABS(name, val) asm(".globl " #name "; .set "#name ", " #val)
+#define SET_ABS(name, val) __SET_ABS(name, val)
+
+SET_ABS(sym64_abs, SYM64_ABS_VAL);
+SET_ABS(sym32_abs, SYM32_ABS_VAL);
+SET_ABS(sym16_abs, SYM16_ABS_VAL);
+
+asmlinkage u64 absolute_data64(void);
+asmlinkage u64 absolute_data32(void);
+asmlinkage u64 absolute_data16(void);
+asmlinkage u64 signed_movw(void);
+asmlinkage u64 unsigned_movw(void);
+asmlinkage u64 relative_adrp(void);
+asmlinkage u64 relative_adrp_far(void);
+asmlinkage u64 relative_adr(void);
+asmlinkage u64 relative_data64(void);
+asmlinkage u64 relative_data32(void);
+asmlinkage u64 relative_data16(void);
+
+static struct {
+ char name[32];
+ u64 (*f)(void);
+ u64 expect;
+} const funcs[] = {
+ { "R_AARCH64_ABS64", absolute_data64, UL(SYM64_ABS_VAL) },
+ { "R_AARCH64_ABS32", absolute_data32, UL(SYM32_ABS_VAL) },
+ { "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) },
+ { "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) },
+ { "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) },
+ { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel },
+ { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp_far, (u64)&memstart_addr },
+ { "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel },
+ { "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel },
+ { "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel },
+ { "R_AARCH64_PREL16", relative_data16, (u64)&sym64_rel },
+};
+
+static int reloc_test_init(void)
+{
+ int i;
+
+ pr_info("Relocation test:\n");
+ pr_info("-------------------------------------------------------\n");
+
+ for (i = 0; i < ARRAY_SIZE(funcs); i++) {
+ u64 ret = funcs[i].f();
+
+ pr_info("%-31s 0x%016llx %s\n", funcs[i].name, ret,
+ ret == funcs[i].expect ? "pass" : "fail");
+ if (ret != funcs[i].expect)
+ pr_err("Relocation failed, expected 0x%016llx, not 0x%016llx\n",
+ funcs[i].expect, ret);
+ }
+ return 0;
+}
+
+static void reloc_test_exit(void)
+{
+}
+
+module_init(reloc_test_init);
+module_exit(reloc_test_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/kernel/reloc_test_syms.S b/arch/arm64/kernel/reloc_test_syms.S
new file mode 100644
index 000000000..c50f45fa2
--- /dev/null
+++ b/arch/arm64/kernel/reloc_test_syms.S
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/linkage.h>
+
+SYM_FUNC_START(absolute_data64)
+ ldr x0, 0f
+ ret
+0: .quad sym64_abs
+SYM_FUNC_END(absolute_data64)
+
+SYM_FUNC_START(absolute_data32)
+ ldr w0, 0f
+ ret
+0: .long sym32_abs
+SYM_FUNC_END(absolute_data32)
+
+SYM_FUNC_START(absolute_data16)
+ adr x0, 0f
+ ldrh w0, [x0]
+ ret
+0: .short sym16_abs, 0
+SYM_FUNC_END(absolute_data16)
+
+SYM_FUNC_START(signed_movw)
+ movz x0, #:abs_g2_s:sym64_abs
+ movk x0, #:abs_g1_nc:sym64_abs
+ movk x0, #:abs_g0_nc:sym64_abs
+ ret
+SYM_FUNC_END(signed_movw)
+
+SYM_FUNC_START(unsigned_movw)
+ movz x0, #:abs_g3:sym64_abs
+ movk x0, #:abs_g2_nc:sym64_abs
+ movk x0, #:abs_g1_nc:sym64_abs
+ movk x0, #:abs_g0_nc:sym64_abs
+ ret
+SYM_FUNC_END(unsigned_movw)
+
+ .align 12
+ .space 0xff8
+SYM_FUNC_START(relative_adrp)
+ adrp x0, sym64_rel
+ add x0, x0, #:lo12:sym64_rel
+ ret
+SYM_FUNC_END(relative_adrp)
+
+ .align 12
+ .space 0xffc
+SYM_FUNC_START(relative_adrp_far)
+ adrp x0, memstart_addr
+ add x0, x0, #:lo12:memstart_addr
+ ret
+SYM_FUNC_END(relative_adrp_far)
+
+SYM_FUNC_START(relative_adr)
+ adr x0, sym64_rel
+ ret
+SYM_FUNC_END(relative_adr)
+
+SYM_FUNC_START(relative_data64)
+ adr x1, 0f
+ ldr x0, [x1]
+ add x0, x0, x1
+ ret
+0: .quad sym64_rel - .
+SYM_FUNC_END(relative_data64)
+
+SYM_FUNC_START(relative_data32)
+ adr x1, 0f
+ ldr w0, [x1]
+ add x0, x0, x1
+ ret
+0: .long sym64_rel - .
+SYM_FUNC_END(relative_data32)
+
+SYM_FUNC_START(relative_data16)
+ adr x1, 0f
+ ldrsh w0, [x1]
+ add x0, x0, x1
+ ret
+0: .short sym64_rel - ., 0
+SYM_FUNC_END(relative_data16)
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 000000000..84eec95ec
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/kexec.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/kexec.h>
+#include <asm/page.h>
+#include <asm/sysreg.h>
+
+/*
+ * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
+ *
+ * The memory that the old kernel occupies may be overwritten when coping the
+ * new image to its final location. To assure that the
+ * arm64_relocate_new_kernel routine which does that copy is not overwritten,
+ * all code and data needed by arm64_relocate_new_kernel must be between the
+ * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
+ * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
+ * control_code_page, a special page which has been set up to be preserved
+ * during the copy operation.
+ */
+SYM_CODE_START(arm64_relocate_new_kernel)
+
+ /* Setup the list loop variables. */
+ mov x18, x2 /* x18 = dtb address */
+ mov x17, x1 /* x17 = kimage_start */
+ mov x16, x0 /* x16 = kimage_head */
+ raw_dcache_line_size x15, x0 /* x15 = dcache line size */
+ mov x14, xzr /* x14 = entry ptr */
+ mov x13, xzr /* x13 = copy dest */
+
+ /* Check if the new image needs relocation. */
+ tbnz x16, IND_DONE_BIT, .Ldone
+
+.Lloop:
+ and x12, x16, PAGE_MASK /* x12 = addr */
+
+ /* Test the entry flags. */
+.Ltest_source:
+ tbz x16, IND_SOURCE_BIT, .Ltest_indirection
+
+ /* Invalidate dest page to PoC. */
+ mov x0, x13
+ add x20, x0, #PAGE_SIZE
+ sub x1, x15, #1
+ bic x0, x0, x1
+2: dc ivac, x0
+ add x0, x0, x15
+ cmp x0, x20
+ b.lo 2b
+ dsb sy
+
+ mov x20, x13
+ mov x21, x12
+ copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
+
+ /* dest += PAGE_SIZE */
+ add x13, x13, PAGE_SIZE
+ b .Lnext
+
+.Ltest_indirection:
+ tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
+
+ /* ptr = addr */
+ mov x14, x12
+ b .Lnext
+
+.Ltest_destination:
+ tbz x16, IND_DESTINATION_BIT, .Lnext
+
+ /* dest = addr */
+ mov x13, x12
+
+.Lnext:
+ /* entry = *ptr++ */
+ ldr x16, [x14], #8
+
+ /* while (!(entry & DONE)) */
+ tbz x16, IND_DONE_BIT, .Lloop
+
+.Ldone:
+ /* wait for writes from copy_page to finish */
+ dsb nsh
+ ic iallu
+ dsb nsh
+ isb
+
+ /* Start new image. */
+ mov x0, x18
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ br x17
+
+SYM_CODE_END(arm64_relocate_new_kernel)
+
+.align 3 /* To keep the 64-bit values below naturally aligned. */
+
+.Lcopy_end:
+.org KEXEC_CONTROL_PAGE_SIZE
+
+/*
+ * arm64_relocate_new_kernel_size - Number of bytes to copy to the
+ * control_code_page.
+ */
+.globl arm64_relocate_new_kernel_size
+arm64_relocate_new_kernel_size:
+ .quad .Lcopy_end - arm64_relocate_new_kernel
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
new file mode 100644
index 000000000..a6d187556
--- /dev/null
+++ b/arch/arm64/kernel/return_address.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/arm64/kernel/return_address.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#include <linux/export.h>
+#include <linux/ftrace.h>
+#include <linux/kprobes.h>
+
+#include <asm/stack_pointer.h>
+#include <asm/stacktrace.h>
+
+struct return_address_data {
+ unsigned int level;
+ void *addr;
+};
+
+static bool save_return_addr(void *d, unsigned long pc)
+{
+ struct return_address_data *data = d;
+
+ if (!data->level) {
+ data->addr = (void *)pc;
+ return false;
+ } else {
+ --data->level;
+ return true;
+ }
+}
+NOKPROBE_SYMBOL(save_return_addr);
+
+void *return_address(unsigned int level)
+{
+ struct return_address_data data;
+ struct stackframe frame;
+
+ data.level = level + 2;
+ data.addr = NULL;
+
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)return_address);
+ walk_stackframe(current, &frame, save_return_addr, &data);
+
+ if (!data.level)
+ return data.addr;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c
new file mode 100644
index 000000000..e8f7ff45d
--- /dev/null
+++ b/arch/arm64/kernel/scs.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Shadow Call Stack support.
+ *
+ * Copyright (C) 2019 Google LLC
+ */
+
+#include <linux/percpu.h>
+#include <linux/scs.h>
+
+DEFINE_SCS(irq_shadow_call_stack);
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+DEFINE_SCS(sdei_shadow_call_stack_normal);
+DEFINE_SCS(sdei_shadow_call_stack_critical);
+#endif
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
new file mode 100644
index 000000000..0083f5afa
--- /dev/null
+++ b/arch/arm64/kernel/sdei.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/arm_sdei.h>
+#include <linux/hardirq.h>
+#include <linux/irqflags.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/exception.h>
+#include <asm/kprobes.h>
+#include <asm/mmu.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/stacktrace.h>
+#include <asm/sysreg.h>
+#include <asm/vmap_stack.h>
+
+unsigned long sdei_exit_mode;
+
+/*
+ * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
+ * register, meaning SDEI has to switch to its own stack. We need two stacks as
+ * a critical event may interrupt a normal event that has just taken a
+ * synchronous exception, and is using sp as scratch register. For a critical
+ * event interrupting a normal event, we can't reliably tell if we were on the
+ * sdei stack.
+ * For now, we allocate stacks when the driver is probed.
+ */
+DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
+DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
+
+#ifdef CONFIG_VMAP_STACK
+DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
+DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
+#endif
+
+DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
+DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
+
+static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p;
+
+ p = per_cpu(*ptr, cpu);
+ if (p) {
+ per_cpu(*ptr, cpu) = NULL;
+ vfree(p);
+ }
+}
+
+static void free_sdei_stacks(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
+ _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
+ }
+}
+
+static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p;
+
+ p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
+ if (!p)
+ return -ENOMEM;
+ per_cpu(*ptr, cpu) = p;
+
+ return 0;
+}
+
+static int init_sdei_stacks(void)
+{
+ int cpu;
+ int err = 0;
+
+ for_each_possible_cpu(cpu) {
+ err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
+ if (err)
+ break;
+ err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
+ if (err)
+ break;
+ }
+
+ if (err)
+ free_sdei_stacks();
+
+ return err;
+}
+
+static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
+ unsigned long high = low + SDEI_STACK_SIZE;
+
+ return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+}
+
+static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
+ unsigned long high = low + SDEI_STACK_SIZE;
+
+ return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+}
+
+bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+{
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return false;
+
+ if (on_sdei_critical_stack(sp, info))
+ return true;
+
+ if (on_sdei_normal_stack(sp, info))
+ return true;
+
+ return false;
+}
+
+unsigned long sdei_arch_get_entry_point(int conduit)
+{
+ /*
+ * SDEI works between adjacent exception levels. If we booted at EL1 we
+ * assume a hypervisor is marshalling events. If we booted at EL2 and
+ * dropped to EL1 because we don't support VHE, then we can't support
+ * SDEI.
+ */
+ if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+ pr_err("Not supported on this hardware/boot configuration\n");
+ return 0;
+ }
+
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+ if (init_sdei_stacks())
+ return 0;
+ }
+
+ sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ if (arm64_kernel_unmapped_at_el0()) {
+ unsigned long offset;
+
+ offset = (unsigned long)__sdei_asm_entry_trampoline -
+ (unsigned long)__entry_tramp_text_start;
+ return TRAMP_VALIAS + offset;
+ } else
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ return (unsigned long)__sdei_asm_handler;
+
+}
+
+/*
+ * __sdei_handler() returns one of:
+ * SDEI_EV_HANDLED - success, return to the interrupted context.
+ * SDEI_EV_FAILED - failure, return this error code to firmare.
+ * virtual-address - success, return to this address.
+ */
+static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ u32 mode;
+ int i, err = 0;
+ int clobbered_registers = 4;
+ u64 elr = read_sysreg(elr_el1);
+ u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
+ unsigned long vbar = read_sysreg(vbar_el1);
+
+ if (arm64_kernel_unmapped_at_el0())
+ clobbered_registers++;
+
+ /* Retrieve the missing registers values */
+ for (i = 0; i < clobbered_registers; i++) {
+ /* from within the handler, this call always succeeds */
+ sdei_api_event_context(i, &regs->regs[i]);
+ }
+
+ /*
+ * We didn't take an exception to get here, set PAN. UAO will be cleared
+ * by sdei_event_handler()s force_uaccess_begin() call.
+ */
+ __uaccess_enable_hw_pan();
+
+ err = sdei_event_handler(regs, arg);
+ if (err)
+ return SDEI_EV_FAILED;
+
+ if (elr != read_sysreg(elr_el1)) {
+ /*
+ * We took a synchronous exception from the SDEI handler.
+ * This could deadlock, and if you interrupt KVM it will
+ * hyp-panic instead.
+ */
+ pr_warn("unsafe: exception during handler\n");
+ }
+
+ mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
+
+ /*
+ * If we interrupted the kernel with interrupts masked, we always go
+ * back to wherever we came from.
+ */
+ if (mode == kernel_mode && !interrupts_enabled(regs))
+ return SDEI_EV_HANDLED;
+
+ /*
+ * Otherwise, we pretend this was an IRQ. This lets user space tasks
+ * receive signals before we return to them, and KVM to invoke it's
+ * world switch to do the same.
+ *
+ * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
+ * address'.
+ */
+ if (mode == kernel_mode)
+ return vbar + 0x280;
+ else if (mode & PSR_MODE32_BIT)
+ return vbar + 0x680;
+
+ return vbar + 0x480;
+}
+
+
+asmlinkage noinstr unsigned long
+__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
+{
+ unsigned long ret;
+
+ arm64_enter_nmi(regs);
+
+ ret = _sdei_handler(regs, arg);
+
+ arm64_exit_nmi(regs);
+
+ return ret;
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
new file mode 100644
index 000000000..eb4b24652
--- /dev/null
+++ b/arch/arm64/kernel/setup.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/setup.c
+ *
+ * Copyright (C) 1995-2001 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+#include <linux/cache.h>
+#include <linux/screen_info.h>
+#include <linux/init.h>
+#include <linux/kexec.h>
+#include <linux/root_dev.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/efi.h>
+#include <linux/psci.h>
+#include <linux/sched/task.h>
+#include <linux/mm.h>
+
+#include <asm/acpi.h>
+#include <asm/fixmap.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/daifflags.h>
+#include <asm/elf.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_ops.h>
+#include <asm/kasan.h>
+#include <asm/numa.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+#include <asm/efi.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/mmu_context.h>
+
+static int num_standard_resources;
+static struct resource *standard_resources;
+
+phys_addr_t __fdt_pointer __initdata;
+
+/*
+ * Standard memory resources
+ */
+static struct resource mem_res[] = {
+ {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_SYSTEM_RAM
+ },
+ {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_SYSTEM_RAM
+ }
+};
+
+#define kernel_code mem_res[0]
+#define kernel_data mem_res[1]
+
+/*
+ * The recorded values of x0 .. x3 upon kernel entry.
+ */
+u64 __cacheline_aligned boot_args[4];
+
+void __init smp_setup_processor_id(void)
+{
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ set_cpu_logical_map(0, mpidr);
+
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+ * using percpu variable early, for example, lockdep will
+ * access percpu variable inside lock_release
+ */
+ set_my_cpu_offset(0);
+ pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
+ (unsigned long)mpidr, read_cpuid_id());
+}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
+
+struct mpidr_hash mpidr_hash;
+/**
+ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
+ * level in order to build a linear index from an
+ * MPIDR value. Resulting algorithm is a collision
+ * free hash carried out through shifting and ORing
+ */
+static void __init smp_build_mpidr_hash(void)
+{
+ u32 i, affinity, fs[4], bits[4], ls;
+ u64 mask = 0;
+ /*
+ * Pre-scan the list of MPIDRS and filter out bits that do
+ * not contribute to affinity levels, ie they never toggle.
+ */
+ for_each_possible_cpu(i)
+ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
+ pr_debug("mask of set bits %#llx\n", mask);
+ /*
+ * Find and stash the last and first bit set at all affinity levels to
+ * check how many bits are required to represent them.
+ */
+ for (i = 0; i < 4; i++) {
+ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
+ /*
+ * Find the MSB bit and LSB bits position
+ * to determine how many bits are required
+ * to express the affinity level.
+ */
+ ls = fls(affinity);
+ fs[i] = affinity ? ffs(affinity) - 1 : 0;
+ bits[i] = ls - fs[i];
+ }
+ /*
+ * An index can be created from the MPIDR_EL1 by isolating the
+ * significant bits at each affinity level and by shifting
+ * them in order to compress the 32 bits values space to a
+ * compressed set of values. This is equivalent to hashing
+ * the MPIDR_EL1 through shifting and ORing. It is a collision free
+ * hash though not minimal since some levels might contain a number
+ * of CPUs that is not an exact power of 2 and their bit
+ * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
+ */
+ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
+ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
+ mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
+ (bits[1] + bits[0]);
+ mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
+ fs[3] - (bits[2] + bits[1] + bits[0]);
+ mpidr_hash.mask = mask;
+ mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
+ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
+ mpidr_hash.shift_aff[0],
+ mpidr_hash.shift_aff[1],
+ mpidr_hash.shift_aff[2],
+ mpidr_hash.shift_aff[3],
+ mpidr_hash.mask,
+ mpidr_hash.bits);
+ /*
+ * 4x is an arbitrary value used to warn on a hash table much bigger
+ * than expected on most systems.
+ */
+ if (mpidr_hash_size() > 4 * num_possible_cpus())
+ pr_warn("Large number of MPIDR hash buckets detected\n");
+}
+
+static void __init setup_machine_fdt(phys_addr_t dt_phys)
+{
+ int size;
+ void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ const char *name;
+
+ if (dt_virt)
+ memblock_reserve(dt_phys, size);
+
+ if (!dt_virt || !early_init_dt_scan(dt_virt)) {
+ pr_crit("\n"
+ "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
+ "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
+ "\nPlease check your bootloader.",
+ &dt_phys, dt_virt);
+
+ while (true)
+ cpu_relax();
+ }
+
+ /* Early fixups are done, map the FDT as read-only now */
+ fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+
+ name = of_flat_dt_get_machine_name();
+ if (!name)
+ return;
+
+ pr_info("Machine model: %s\n", name);
+ dump_stack_set_arch_desc("%s (DT)", name);
+}
+
+static void __init request_standard_resources(void)
+{
+ struct memblock_region *region;
+ struct resource *res;
+ unsigned long i = 0;
+ size_t res_size;
+
+ kernel_code.start = __pa_symbol(_text);
+ kernel_code.end = __pa_symbol(__init_begin - 1);
+ kernel_data.start = __pa_symbol(_sdata);
+ kernel_data.end = __pa_symbol(_end - 1);
+
+ num_standard_resources = memblock.memory.cnt;
+ res_size = num_standard_resources * sizeof(*standard_resources);
+ standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+ if (!standard_resources)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
+
+ for_each_mem_region(region) {
+ res = &standard_resources[i++];
+ if (memblock_is_nomap(region)) {
+ res->name = "reserved";
+ res->flags = IORESOURCE_MEM;
+ } else {
+ res->name = "System RAM";
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ }
+ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+
+ request_resource(&iomem_resource, res);
+
+ if (kernel_code.start >= res->start &&
+ kernel_code.end <= res->end)
+ request_resource(res, &kernel_code);
+ if (kernel_data.start >= res->start &&
+ kernel_data.end <= res->end)
+ request_resource(res, &kernel_data);
+#ifdef CONFIG_KEXEC_CORE
+ /* Userspace will find "Crash kernel" region in /proc/iomem. */
+ if (crashk_res.end && crashk_res.start >= res->start &&
+ crashk_res.end <= res->end)
+ request_resource(res, &crashk_res);
+#endif
+ }
+}
+
+static int __init reserve_memblock_reserved_regions(void)
+{
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; ++i) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
+ continue;
+
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "reserved");
+ }
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
+
+u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+
+u64 cpu_logical_map(int cpu)
+{
+ return __cpu_logical_map[cpu];
+}
+
+void __init __no_sanitize_address setup_arch(char **cmdline_p)
+{
+ init_mm.start_code = (unsigned long) _text;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+
+ *cmdline_p = boot_command_line;
+
+ /*
+ * If know now we are going to need KPTI then use non-global
+ * mappings from the start, avoiding the cost of rewriting
+ * everything later.
+ */
+ arm64_use_ng_mappings = kaslr_requires_kpti();
+
+ early_fixmap_init();
+ early_ioremap_init();
+
+ setup_machine_fdt(__fdt_pointer);
+
+ /*
+ * Initialise the static keys early as they may be enabled by the
+ * cpufeature code and early parameters.
+ */
+ jump_label_init();
+ parse_early_param();
+
+ /*
+ * Unmask asynchronous aborts and fiq after bringing up possible
+ * earlycon. (Report possible System Errors once we can report this
+ * occurred).
+ */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
+ /*
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
+ */
+ cpu_uninstall_idmap();
+
+ xen_early_init();
+ efi_init();
+
+ if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
+ pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+
+ arm64_memblock_init();
+
+ paging_init();
+
+ acpi_table_upgrade();
+
+ /* Parse the ACPI tables for possible boot-time configuration */
+ acpi_boot_table_init();
+
+ if (acpi_disabled)
+ unflatten_device_tree();
+
+ bootmem_init();
+
+ kasan_init();
+
+ request_standard_resources();
+
+ early_ioremap_reset();
+
+ if (acpi_disabled)
+ psci_dt_init();
+ else
+ psci_acpi_init();
+
+ init_bootcpu_ops();
+ smp_init_cpus();
+ smp_build_mpidr_hash();
+
+ /* Init percpu seeds for random tags after cpus are set up. */
+ kasan_init_tags();
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Make sure init_thread_info.ttbr0 always generates translation
+ * faults in case uaccess_enable() is inadvertently called by the init
+ * thread.
+ */
+ init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
+#endif
+
+ if (boot_args[1] || boot_args[2] || boot_args[3]) {
+ pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
+ "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
+ "This indicates a broken bootloader or old kernel\n",
+ boot_args[1], boot_args[2], boot_args[3]);
+ }
+}
+
+static inline bool cpu_can_disable(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops && ops->cpu_can_disable)
+ return ops->cpu_can_disable(cpu);
+#endif
+ return false;
+}
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_online_node(i)
+ register_one_node(i);
+
+ for_each_possible_cpu(i) {
+ struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
+ cpu->hotpluggable = cpu_can_disable(i);
+ register_cpu(cpu, i);
+ }
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
+static void dump_kernel_offset(void)
+{
+ const unsigned long offset = kaslr_offset();
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
+ offset, KIMAGE_VADDR);
+ pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
+ } else {
+ pr_emerg("Kernel Offset: disabled\n");
+ }
+}
+
+static int arm64_panic_block_dump(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ dump_kernel_offset();
+ dump_cpu_features();
+ dump_mem_limit();
+ return 0;
+}
+
+static struct notifier_block arm64_panic_block = {
+ .notifier_call = arm64_panic_block_dump
+};
+
+static int __init register_arm64_panic_block(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &arm64_panic_block);
+ return 0;
+}
+device_initcall(register_arm64_panic_block);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
new file mode 100644
index 000000000..b6fbbd527
--- /dev/null
+++ b/arch/arm64/kernel/signal.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/signal.c
+ *
+ * Copyright (C) 1995-2009 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/cache.h>
+#include <linux/compat.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/stddef.h>
+#include <linux/uaccess.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
+#include <linux/tracehook.h>
+#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
+
+#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
+#include <asm/elf.h>
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/unistd.h>
+#include <asm/fpsimd.h>
+#include <asm/ptrace.h>
+#include <asm/syscall.h>
+#include <asm/signal32.h>
+#include <asm/traps.h>
+#include <asm/vdso.h>
+
+/*
+ * Do a signal return; undo the signal stack. These are aligned to 128-bit.
+ */
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+struct frame_record {
+ u64 fp;
+ u64 lr;
+};
+
+struct rt_sigframe_user_layout {
+ struct rt_sigframe __user *sigframe;
+ struct frame_record __user *next_frame;
+
+ unsigned long size; /* size of allocated sigframe data */
+ unsigned long limit; /* largest allowed size */
+
+ unsigned long fpsimd_offset;
+ unsigned long esr_offset;
+ unsigned long sve_offset;
+ unsigned long extra_offset;
+ unsigned long end_offset;
+};
+
+#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
+#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
+#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
+
+static void init_user_layout(struct rt_sigframe_user_layout *user)
+{
+ const size_t reserved_size =
+ sizeof(user->sigframe->uc.uc_mcontext.__reserved);
+
+ memset(user, 0, sizeof(*user));
+ user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
+
+ user->limit = user->size + reserved_size;
+
+ user->limit -= TERMINATOR_SIZE;
+ user->limit -= EXTRA_CONTEXT_SIZE;
+ /* Reserve space for extension and terminator ^ */
+}
+
+static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
+{
+ return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
+}
+
+/*
+ * Sanity limit on the approximate maximum size of signal frame we'll
+ * try to generate. Stack alignment padding and the frame record are
+ * not taken into account. This limit is not a guarantee and is
+ * NOT ABI.
+ */
+#define SIGFRAME_MAXSZ SZ_64K
+
+static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
+ unsigned long *offset, size_t size, bool extend)
+{
+ size_t padded_size = round_up(size, 16);
+
+ if (padded_size > user->limit - user->size &&
+ !user->extra_offset &&
+ extend) {
+ int ret;
+
+ user->limit += EXTRA_CONTEXT_SIZE;
+ ret = __sigframe_alloc(user, &user->extra_offset,
+ sizeof(struct extra_context), false);
+ if (ret) {
+ user->limit -= EXTRA_CONTEXT_SIZE;
+ return ret;
+ }
+
+ /* Reserve space for the __reserved[] terminator */
+ user->size += TERMINATOR_SIZE;
+
+ /*
+ * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
+ * the terminator:
+ */
+ user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
+ }
+
+ /* Still not enough space? Bad luck! */
+ if (padded_size > user->limit - user->size)
+ return -ENOMEM;
+
+ *offset = user->size;
+ user->size += padded_size;
+
+ return 0;
+}
+
+/*
+ * Allocate space for an optional record of <size> bytes in the user
+ * signal frame. The offset from the signal frame base address to the
+ * allocated block is assigned to *offset.
+ */
+static int sigframe_alloc(struct rt_sigframe_user_layout *user,
+ unsigned long *offset, size_t size)
+{
+ return __sigframe_alloc(user, offset, size, true);
+}
+
+/* Allocate the null terminator record and prevent further allocations */
+static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
+{
+ int ret;
+
+ /* Un-reserve the space reserved for the terminator: */
+ user->limit += TERMINATOR_SIZE;
+
+ ret = sigframe_alloc(user, &user->end_offset,
+ sizeof(struct _aarch64_ctx));
+ if (ret)
+ return ret;
+
+ /* Prevent further allocation: */
+ user->limit = user->size;
+ return 0;
+}
+
+static void __user *apply_user_offset(
+ struct rt_sigframe_user_layout const *user, unsigned long offset)
+{
+ char __user *base = (char __user *)user->sigframe;
+
+ return base + offset;
+}
+
+static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
+{
+ struct user_fpsimd_state const *fpsimd =
+ &current->thread.uw.fpsimd_state;
+ int err;
+
+ /* copy the FP and status/control registers */
+ err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
+ __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
+ __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
+
+ /* copy the magic/size information */
+ __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
+ __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
+{
+ struct user_fpsimd_state fpsimd;
+ __u32 magic, size;
+ int err = 0;
+
+ /* check the magic/size information */
+ __get_user_error(magic, &ctx->head.magic, err);
+ __get_user_error(size, &ctx->head.size, err);
+ if (err)
+ return -EFAULT;
+ if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
+ return -EINVAL;
+
+ /* copy the FP and status/control registers */
+ err = __copy_from_user(fpsimd.vregs, ctx->vregs,
+ sizeof(fpsimd.vregs));
+ __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
+ __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
+
+ clear_thread_flag(TIF_SVE);
+
+ /* load the hardware registers from the fpsimd_state structure */
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
+
+ return err ? -EFAULT : 0;
+}
+
+
+struct user_ctxs {
+ struct fpsimd_context __user *fpsimd;
+ struct sve_context __user *sve;
+};
+
+#ifdef CONFIG_ARM64_SVE
+
+static int preserve_sve_context(struct sve_context __user *ctx)
+{
+ int err = 0;
+ u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ unsigned int vl = current->thread.sve_vl;
+ unsigned int vq = 0;
+
+ if (test_thread_flag(TIF_SVE))
+ vq = sve_vq_from_vl(vl);
+
+ memset(reserved, 0, sizeof(reserved));
+
+ __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
+ __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
+ &ctx->head.size, err);
+ __put_user_error(vl, &ctx->vl, err);
+ BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
+ err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
+
+ if (vq) {
+ /*
+ * This assumes that the SVE state has already been saved to
+ * the task struct by calling the function
+ * fpsimd_signal_preserve_current_state().
+ */
+ err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
+ current->thread.sve_state,
+ SVE_SIG_REGS_SIZE(vq));
+ }
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_sve_fpsimd_context(struct user_ctxs *user)
+{
+ int err;
+ unsigned int vq;
+ struct user_fpsimd_state fpsimd;
+ struct sve_context sve;
+
+ if (__copy_from_user(&sve, user->sve, sizeof(sve)))
+ return -EFAULT;
+
+ if (sve.vl != current->thread.sve_vl)
+ return -EINVAL;
+
+ if (sve.head.size <= sizeof(*user->sve)) {
+ clear_thread_flag(TIF_SVE);
+ goto fpsimd_only;
+ }
+
+ vq = sve_vq_from_vl(sve.vl);
+
+ if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
+ return -EINVAL;
+
+ /*
+ * Careful: we are about __copy_from_user() directly into
+ * thread.sve_state with preemption enabled, so protection is
+ * needed to prevent a racing context switch from writing stale
+ * registers back over the new data.
+ */
+
+ fpsimd_flush_task_state(current);
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
+ sve_alloc(current);
+ err = __copy_from_user(current->thread.sve_state,
+ (char __user const *)user->sve +
+ SVE_SIG_REGS_OFFSET,
+ SVE_SIG_REGS_SIZE(vq));
+ if (err)
+ return -EFAULT;
+
+ set_thread_flag(TIF_SVE);
+
+fpsimd_only:
+ /* copy the FP and status/control registers */
+ /* restore_sigframe() already checked that user->fpsimd != NULL. */
+ err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
+ sizeof(fpsimd.vregs));
+ __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
+ __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
+
+ /* load the hardware registers from the fpsimd_state structure */
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
+
+ return err ? -EFAULT : 0;
+}
+
+#else /* ! CONFIG_ARM64_SVE */
+
+/* Turn any non-optimised out attempts to use these into a link error: */
+extern int preserve_sve_context(void __user *ctx);
+extern int restore_sve_fpsimd_context(struct user_ctxs *user);
+
+#endif /* ! CONFIG_ARM64_SVE */
+
+
+static int parse_user_sigframe(struct user_ctxs *user,
+ struct rt_sigframe __user *sf)
+{
+ struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
+ struct _aarch64_ctx __user *head;
+ char __user *base = (char __user *)&sc->__reserved;
+ size_t offset = 0;
+ size_t limit = sizeof(sc->__reserved);
+ bool have_extra_context = false;
+ char const __user *const sfp = (char const __user *)sf;
+
+ user->fpsimd = NULL;
+ user->sve = NULL;
+
+ if (!IS_ALIGNED((unsigned long)base, 16))
+ goto invalid;
+
+ while (1) {
+ int err = 0;
+ u32 magic, size;
+ char const __user *userp;
+ struct extra_context const __user *extra;
+ u64 extra_datap;
+ u32 extra_size;
+ struct _aarch64_ctx const __user *end;
+ u32 end_magic, end_size;
+
+ if (limit - offset < sizeof(*head))
+ goto invalid;
+
+ if (!IS_ALIGNED(offset, 16))
+ goto invalid;
+
+ head = (struct _aarch64_ctx __user *)(base + offset);
+ __get_user_error(magic, &head->magic, err);
+ __get_user_error(size, &head->size, err);
+ if (err)
+ return err;
+
+ if (limit - offset < size)
+ goto invalid;
+
+ switch (magic) {
+ case 0:
+ if (size)
+ goto invalid;
+
+ goto done;
+
+ case FPSIMD_MAGIC:
+ if (!system_supports_fpsimd())
+ goto invalid;
+ if (user->fpsimd)
+ goto invalid;
+
+ if (size < sizeof(*user->fpsimd))
+ goto invalid;
+
+ user->fpsimd = (struct fpsimd_context __user *)head;
+ break;
+
+ case ESR_MAGIC:
+ /* ignore */
+ break;
+
+ case SVE_MAGIC:
+ if (!system_supports_sve())
+ goto invalid;
+
+ if (user->sve)
+ goto invalid;
+
+ if (size < sizeof(*user->sve))
+ goto invalid;
+
+ user->sve = (struct sve_context __user *)head;
+ break;
+
+ case EXTRA_MAGIC:
+ if (have_extra_context)
+ goto invalid;
+
+ if (size < sizeof(*extra))
+ goto invalid;
+
+ userp = (char const __user *)head;
+
+ extra = (struct extra_context const __user *)userp;
+ userp += size;
+
+ __get_user_error(extra_datap, &extra->datap, err);
+ __get_user_error(extra_size, &extra->size, err);
+ if (err)
+ return err;
+
+ /* Check for the dummy terminator in __reserved[]: */
+
+ if (limit - offset - size < TERMINATOR_SIZE)
+ goto invalid;
+
+ end = (struct _aarch64_ctx const __user *)userp;
+ userp += TERMINATOR_SIZE;
+
+ __get_user_error(end_magic, &end->magic, err);
+ __get_user_error(end_size, &end->size, err);
+ if (err)
+ return err;
+
+ if (end_magic || end_size)
+ goto invalid;
+
+ /* Prevent looping/repeated parsing of extra_context */
+ have_extra_context = true;
+
+ base = (__force void __user *)extra_datap;
+ if (!IS_ALIGNED((unsigned long)base, 16))
+ goto invalid;
+
+ if (!IS_ALIGNED(extra_size, 16))
+ goto invalid;
+
+ if (base != userp)
+ goto invalid;
+
+ /* Reject "unreasonably large" frames: */
+ if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
+ goto invalid;
+
+ /*
+ * Ignore trailing terminator in __reserved[]
+ * and start parsing extra data:
+ */
+ offset = 0;
+ limit = extra_size;
+
+ if (!access_ok(base, limit))
+ goto invalid;
+
+ continue;
+
+ default:
+ goto invalid;
+ }
+
+ if (size < sizeof(*head))
+ goto invalid;
+
+ if (limit - offset < size)
+ goto invalid;
+
+ offset += size;
+ }
+
+done:
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+static int restore_sigframe(struct pt_regs *regs,
+ struct rt_sigframe __user *sf)
+{
+ sigset_t set;
+ int i, err;
+ struct user_ctxs user;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (err == 0)
+ set_current_blocked(&set);
+
+ for (i = 0; i < 31; i++)
+ __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
+ err);
+ __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
+ __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
+ __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
+
+ /*
+ * Avoid sys_rt_sigreturn() restarting.
+ */
+ forget_syscall(regs);
+
+ err |= !valid_user_regs(&regs->user_regs, current);
+ if (err == 0)
+ err = parse_user_sigframe(&user, sf);
+
+ if (err == 0 && system_supports_fpsimd()) {
+ if (!user.fpsimd)
+ return -EINVAL;
+
+ if (user.sve) {
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ err = restore_sve_fpsimd_context(&user);
+ } else {
+ err = restore_fpsimd_context(user.fpsimd);
+ }
+ }
+
+ return err;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe __user *frame;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 128-bit boundary, then 'sp' should
+ * be word aligned here.
+ */
+ if (regs->sp & 15)
+ goto badframe;
+
+ frame = (struct rt_sigframe __user *)regs->sp;
+
+ if (!access_ok(frame, sizeof (*frame)))
+ goto badframe;
+
+ if (restore_sigframe(regs, frame))
+ goto badframe;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return regs->regs[0];
+
+badframe:
+ arm64_notify_segfault(regs->sp);
+ return 0;
+}
+
+/*
+ * Determine the layout of optional records in the signal frame
+ *
+ * add_all: if true, lays out the biggest possible signal frame for
+ * this task; otherwise, generates a layout for the current state
+ * of the task.
+ */
+static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
+ bool add_all)
+{
+ int err;
+
+ if (system_supports_fpsimd()) {
+ err = sigframe_alloc(user, &user->fpsimd_offset,
+ sizeof(struct fpsimd_context));
+ if (err)
+ return err;
+ }
+
+ /* fault information, if valid */
+ if (add_all || current->thread.fault_code) {
+ err = sigframe_alloc(user, &user->esr_offset,
+ sizeof(struct esr_context));
+ if (err)
+ return err;
+ }
+
+ if (system_supports_sve()) {
+ unsigned int vq = 0;
+
+ if (add_all || test_thread_flag(TIF_SVE)) {
+ int vl = sve_max_vl;
+
+ if (!add_all)
+ vl = current->thread.sve_vl;
+
+ vq = sve_vq_from_vl(vl);
+ }
+
+ err = sigframe_alloc(user, &user->sve_offset,
+ SVE_SIG_CONTEXT_SIZE(vq));
+ if (err)
+ return err;
+ }
+
+ return sigframe_alloc_end(user);
+}
+
+static int setup_sigframe(struct rt_sigframe_user_layout *user,
+ struct pt_regs *regs, sigset_t *set)
+{
+ int i, err = 0;
+ struct rt_sigframe __user *sf = user->sigframe;
+
+ /* set up the stack frame for unwinding */
+ __put_user_error(regs->regs[29], &user->next_frame->fp, err);
+ __put_user_error(regs->regs[30], &user->next_frame->lr, err);
+
+ for (i = 0; i < 31; i++)
+ __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
+ err);
+ __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
+ __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
+ __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
+
+ __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
+
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err == 0 && system_supports_fpsimd()) {
+ struct fpsimd_context __user *fpsimd_ctx =
+ apply_user_offset(user, user->fpsimd_offset);
+ err |= preserve_fpsimd_context(fpsimd_ctx);
+ }
+
+ /* fault information, if valid */
+ if (err == 0 && user->esr_offset) {
+ struct esr_context __user *esr_ctx =
+ apply_user_offset(user, user->esr_offset);
+
+ __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
+ __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
+ __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
+ }
+
+ /* Scalable Vector Extension state, if present */
+ if (system_supports_sve() && err == 0 && user->sve_offset) {
+ struct sve_context __user *sve_ctx =
+ apply_user_offset(user, user->sve_offset);
+ err |= preserve_sve_context(sve_ctx);
+ }
+
+ if (err == 0 && user->extra_offset) {
+ char __user *sfp = (char __user *)user->sigframe;
+ char __user *userp =
+ apply_user_offset(user, user->extra_offset);
+
+ struct extra_context __user *extra;
+ struct _aarch64_ctx __user *end;
+ u64 extra_datap;
+ u32 extra_size;
+
+ extra = (struct extra_context __user *)userp;
+ userp += EXTRA_CONTEXT_SIZE;
+
+ end = (struct _aarch64_ctx __user *)userp;
+ userp += TERMINATOR_SIZE;
+
+ /*
+ * extra_datap is just written to the signal frame.
+ * The value gets cast back to a void __user *
+ * during sigreturn.
+ */
+ extra_datap = (__force u64)userp;
+ extra_size = sfp + round_up(user->size, 16) - userp;
+
+ __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
+ __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
+ __put_user_error(extra_datap, &extra->datap, err);
+ __put_user_error(extra_size, &extra->size, err);
+
+ /* Add the terminator */
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
+ }
+
+ /* set the "end" magic */
+ if (err == 0) {
+ struct _aarch64_ctx __user *end =
+ apply_user_offset(user, user->end_offset);
+
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
+ }
+
+ return err;
+}
+
+static int get_sigframe(struct rt_sigframe_user_layout *user,
+ struct ksignal *ksig, struct pt_regs *regs)
+{
+ unsigned long sp, sp_top;
+ int err;
+
+ init_user_layout(user);
+ err = setup_sigframe_layout(user, false);
+ if (err)
+ return err;
+
+ sp = sp_top = sigsp(regs->sp, ksig);
+
+ sp = round_down(sp - sizeof(struct frame_record), 16);
+ user->next_frame = (struct frame_record __user *)sp;
+
+ sp = round_down(sp, 16) - sigframe_size(user);
+ user->sigframe = (struct rt_sigframe __user *)sp;
+
+ /*
+ * Check that we can actually write to the signal frame.
+ */
+ if (!access_ok(user->sigframe, sp_top - sp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
+ struct rt_sigframe_user_layout *user, int usig)
+{
+ __sigrestore_t sigtramp;
+
+ regs->regs[0] = usig;
+ regs->sp = (unsigned long)user->sigframe;
+ regs->regs[29] = (unsigned long)&user->next_frame->fp;
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+
+ /*
+ * Signal delivery is a (wacky) indirect function call in
+ * userspace, so simulate the same setting of BTYPE as a BLR
+ * <register containing the signal handler entry point>.
+ * Signal delivery to a location in a PROT_BTI guarded page
+ * that is not a function entry point will now trigger a
+ * SIGILL in userspace.
+ *
+ * If the signal handler entry point is not in a PROT_BTI
+ * guarded page, this is harmless.
+ */
+ if (system_supports_bti()) {
+ regs->pstate &= ~PSR_BTYPE_MASK;
+ regs->pstate |= PSR_BTYPE_C;
+ }
+
+ /* TCO (Tag Check Override) always cleared for signal handlers */
+ regs->pstate &= ~PSR_TCO_BIT;
+
+ if (ka->sa.sa_flags & SA_RESTORER)
+ sigtramp = ka->sa.sa_restorer;
+ else
+ sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
+
+ regs->regs[30] = (unsigned long)sigtramp;
+}
+
+static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe_user_layout user;
+ struct rt_sigframe __user *frame;
+ int err = 0;
+
+ fpsimd_signal_preserve_current_state();
+
+ if (get_sigframe(&user, ksig, regs))
+ return 1;
+
+ frame = user.sigframe;
+
+ __put_user_error(0, &frame->uc.uc_flags, err);
+ __put_user_error(NULL, &frame->uc.uc_link, err);
+
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ err |= setup_sigframe(&user, regs, set);
+ if (err == 0) {
+ setup_return(regs, &ksig->ka, &user, usig);
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+ regs->regs[1] = (unsigned long)&frame->info;
+ regs->regs[2] = (unsigned long)&frame->uc;
+ }
+ }
+
+ return err;
+}
+
+static void setup_restart_syscall(struct pt_regs *regs)
+{
+ if (is_compat_task())
+ compat_setup_restart_syscall(regs);
+ else
+ regs->regs[8] = __NR_restart_syscall;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int usig = ksig->sig;
+ int ret;
+
+ rseq_signal_deliver(ksig, regs);
+
+ /*
+ * Set up the stack frame
+ */
+ if (is_compat_task()) {
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
+ else
+ ret = compat_setup_frame(usig, ksig, oldset, regs);
+ } else {
+ ret = setup_rt_frame(usig, ksig, oldset, regs);
+ }
+
+ /*
+ * Check that the resulting registers are actually sane.
+ */
+ ret |= !valid_user_regs(&regs->user_regs, current);
+
+ /* Step into the signal handler if we are stepping */
+ signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+ unsigned long continue_addr = 0, restart_addr = 0;
+ int retval = 0;
+ struct ksignal ksig;
+ bool syscall = in_syscall(regs);
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (syscall) {
+ continue_addr = regs->pc;
+ restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
+ retval = regs->regs[0];
+
+ /*
+ * Avoid additional syscall restarting via ret_to_user.
+ */
+ forget_syscall(regs);
+
+ /*
+ * Prepare for system call restart. We do this here so that a
+ * debugger will see the already changed PC.
+ */
+ switch (retval) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ case -ERESTART_RESTARTBLOCK:
+ regs->regs[0] = regs->orig_x0;
+ regs->pc = restart_addr;
+ break;
+ }
+ }
+
+ /*
+ * Get the signal to deliver. When running under ptrace, at this point
+ * the debugger may change all of our registers.
+ */
+ if (get_signal(&ksig)) {
+ /*
+ * Depending on the signal settings, we may need to revert the
+ * decision to restart the system call, but skip this if a
+ * debugger has chosen to restart at a different PC.
+ */
+ if (regs->pc == restart_addr &&
+ (retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK ||
+ (retval == -ERESTARTSYS &&
+ !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
+ syscall_set_return_value(current, regs, -EINTR, 0);
+ regs->pc = continue_addr;
+ }
+
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /*
+ * Handle restarting a different system call. As above, if a debugger
+ * has chosen to restart at a different PC, ignore the restart.
+ */
+ if (syscall && regs->pc == restart_addr) {
+ if (retval == -ERESTART_RESTARTBLOCK)
+ setup_restart_syscall(regs);
+ user_rewind_single_step(current);
+ }
+
+ restore_saved_sigmask();
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_flags)
+{
+ do {
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+
+ if (thread_flags & _TIF_NEED_RESCHED) {
+ /* Unmask Debug and SError for the next task */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
+ schedule();
+ } else {
+ local_daif_restore(DAIF_PROCCTX);
+
+ if (thread_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+ if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ send_sig_fault(SIGSEGV, SEGV_MTEAERR,
+ (void __user *)NULL, current);
+ }
+
+ if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME) {
+ tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
+ }
+
+ if (thread_flags & _TIF_FOREIGN_FPSTATE)
+ fpsimd_restore_current_state();
+ }
+
+ local_daif_mask();
+ thread_flags = READ_ONCE(current_thread_info()->flags);
+ } while (thread_flags & _TIF_WORK_MASK);
+}
+
+unsigned long __ro_after_init signal_minsigstksz;
+
+/*
+ * Determine the stack space required for guaranteed signal devliery.
+ * This function is used to populate AT_MINSIGSTKSZ at process startup.
+ * cpufeatures setup is assumed to be complete.
+ */
+void __init minsigstksz_setup(void)
+{
+ struct rt_sigframe_user_layout user;
+
+ init_user_layout(&user);
+
+ /*
+ * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
+ * be big enough, but it's our best guess:
+ */
+ if (WARN_ON(setup_sigframe_layout(&user, true)))
+ return;
+
+ signal_minsigstksz = sigframe_size(&user) +
+ round_up(sizeof(struct frame_record), 16) +
+ 16; /* max alignment padding */
+}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
new file mode 100644
index 000000000..2f507f565
--- /dev/null
+++ b/arch/arm64/kernel/signal32.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/signal.c
+ *
+ * Copyright (C) 1995-2009 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Modified by Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/compat.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/ratelimit.h>
+
+#include <asm/esr.h>
+#include <asm/fpsimd.h>
+#include <asm/signal32.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/vdso.h>
+
+struct compat_vfp_sigframe {
+ compat_ulong_t magic;
+ compat_ulong_t size;
+ struct compat_user_vfp {
+ compat_u64 fpregs[32];
+ compat_ulong_t fpscr;
+ } ufp;
+ struct compat_user_vfp_exc {
+ compat_ulong_t fpexc;
+ compat_ulong_t fpinst;
+ compat_ulong_t fpinst2;
+ } ufp_exc;
+} __attribute__((__aligned__(8)));
+
+#define VFP_MAGIC 0x56465001
+#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
+
+#define FSR_WRITE_SHIFT (11)
+
+struct compat_aux_sigframe {
+ struct compat_vfp_sigframe vfp;
+
+ /* Something that isn't a valid magic number for any coprocessor. */
+ unsigned long end_magic;
+} __attribute__((__aligned__(8)));
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
+{
+ compat_sigset_t cset;
+
+ cset.sig[0] = set->sig[0] & 0xffffffffull;
+ cset.sig[1] = set->sig[0] >> 32;
+
+ return copy_to_user(uset, &cset, sizeof(*uset));
+}
+
+static inline int get_sigset_t(sigset_t *set,
+ const compat_sigset_t __user *uset)
+{
+ compat_sigset_t s32;
+
+ if (copy_from_user(&s32, uset, sizeof(*uset)))
+ return -EFAULT;
+
+ set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+ return 0;
+}
+
+/*
+ * VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
+ */
+union __fpsimd_vreg {
+ __uint128_t raw;
+ struct {
+#ifdef __AARCH64EB__
+ u64 hi;
+ u64 lo;
+#else
+ u64 lo;
+ u64 hi;
+#endif
+ };
+};
+
+static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
+{
+ struct user_fpsimd_state const *fpsimd =
+ &current->thread.uw.fpsimd_state;
+ compat_ulong_t magic = VFP_MAGIC;
+ compat_ulong_t size = VFP_STORAGE_SIZE;
+ compat_ulong_t fpscr, fpexc;
+ int i, err = 0;
+
+ /*
+ * Save the hardware registers to the fpsimd_state structure.
+ * Note that this also saves V16-31, which aren't visible
+ * in AArch32.
+ */
+ fpsimd_signal_preserve_current_state();
+
+ /* Place structure header on the stack */
+ __put_user_error(magic, &frame->magic, err);
+ __put_user_error(size, &frame->size, err);
+
+ /*
+ * Now copy the FP registers. Since the registers are packed,
+ * we can copy the prefix we want (V0-V15) as it is.
+ */
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg = {
+ .raw = fpsimd->vregs[i >> 1],
+ };
+
+ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ }
+
+ /* Create an AArch32 fpscr from the fpsr and the fpcr. */
+ fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
+ (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK);
+ __put_user_error(fpscr, &frame->ufp.fpscr, err);
+
+ /*
+ * The exception register aren't available so we fake up a
+ * basic FPEXC and zero everything else.
+ */
+ fpexc = (1 << 30);
+ __put_user_error(fpexc, &frame->ufp_exc.fpexc, err);
+ __put_user_error(0, &frame->ufp_exc.fpinst, err);
+ __put_user_error(0, &frame->ufp_exc.fpinst2, err);
+
+ return err ? -EFAULT : 0;
+}
+
+static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
+{
+ struct user_fpsimd_state fpsimd;
+ compat_ulong_t magic = VFP_MAGIC;
+ compat_ulong_t size = VFP_STORAGE_SIZE;
+ compat_ulong_t fpscr;
+ int i, err = 0;
+
+ __get_user_error(magic, &frame->magic, err);
+ __get_user_error(size, &frame->size, err);
+
+ if (err)
+ return -EFAULT;
+ if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return -EINVAL;
+
+ /* Copy the FP registers into the start of the fpsimd_state. */
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg;
+
+ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ fpsimd.vregs[i >> 1] = vreg.raw;
+ }
+
+ /* Extract the fpsr and the fpcr from the fpscr */
+ __get_user_error(fpscr, &frame->ufp.fpscr, err);
+ fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK;
+ fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
+
+ /*
+ * We don't need to touch the exception register, so
+ * reload the hardware state.
+ */
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
+
+ return err ? -EFAULT : 0;
+}
+
+static int compat_restore_sigframe(struct pt_regs *regs,
+ struct compat_sigframe __user *sf)
+{
+ int err;
+ sigset_t set;
+ struct compat_aux_sigframe __user *aux;
+ unsigned long psr;
+
+ err = get_sigset_t(&set, &sf->uc.uc_sigmask);
+ if (err == 0) {
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ set_current_blocked(&set);
+ }
+
+ __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
+ __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
+ __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
+ __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
+ __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
+ __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
+ __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
+ __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
+ __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
+ __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
+ __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
+ __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
+ __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
+ __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
+ __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
+ __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
+ __get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+ regs->pstate = compat_psr_to_pstate(psr);
+
+ /*
+ * Avoid compat_sys_sigreturn() restarting.
+ */
+ forget_syscall(regs);
+
+ err |= !valid_user_regs(&regs->user_regs, current);
+
+ aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
+ if (err == 0 && system_supports_fpsimd())
+ err |= compat_restore_vfp_context(&aux->vfp);
+
+ return err;
+}
+
+COMPAT_SYSCALL_DEFINE0(sigreturn)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct compat_sigframe __user *frame;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 64-bit boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->compat_sp & 7)
+ goto badframe;
+
+ frame = (struct compat_sigframe __user *)regs->compat_sp;
+
+ if (!access_ok(frame, sizeof (*frame)))
+ goto badframe;
+
+ if (compat_restore_sigframe(regs, frame))
+ goto badframe;
+
+ return regs->regs[0];
+
+badframe:
+ arm64_notify_segfault(regs->compat_sp);
+ return 0;
+}
+
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct compat_rt_sigframe __user *frame;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 64-bit boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->compat_sp & 7)
+ goto badframe;
+
+ frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
+
+ if (!access_ok(frame, sizeof (*frame)))
+ goto badframe;
+
+ if (compat_restore_sigframe(regs, &frame->sig))
+ goto badframe;
+
+ if (compat_restore_altstack(&frame->sig.uc.uc_stack))
+ goto badframe;
+
+ return regs->regs[0];
+
+badframe:
+ arm64_notify_segfault(regs->compat_sp);
+ return 0;
+}
+
+static void __user *compat_get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs,
+ int framesize)
+{
+ compat_ulong_t sp = sigsp(regs->compat_sp, ksig);
+ void __user *frame;
+
+ /*
+ * ATPCS B01 mandates 8-byte alignment
+ */
+ frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7));
+
+ /*
+ * Check that we can actually write to the signal frame.
+ */
+ if (!access_ok(frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
+ compat_ulong_t __user *rc, void __user *frame,
+ int usig)
+{
+ compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
+ compat_ulong_t retcode;
+ compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT);
+ int thumb;
+
+ /* Check if the handler is written for ARM or Thumb */
+ thumb = handler & 1;
+
+ if (thumb)
+ spsr |= PSR_AA32_T_BIT;
+ else
+ spsr &= ~PSR_AA32_T_BIT;
+
+ /* The IT state must be cleared for both ARM and Thumb-2 */
+ spsr &= ~PSR_AA32_IT_MASK;
+
+ /* Restore the original endianness */
+ spsr |= PSR_AA32_ENDSTATE;
+
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ retcode = ptr_to_compat(ka->sa.sa_restorer);
+ } else {
+ /* Set up sigreturn pointer */
+ unsigned int idx = thumb << 1;
+
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ idx += 3;
+
+ retcode = (unsigned long)current->mm->context.sigpage +
+ (idx << 2) + thumb;
+ }
+
+ regs->regs[0] = usig;
+ regs->compat_sp = ptr_to_compat(frame);
+ regs->compat_lr = retcode;
+ regs->pc = handler;
+ regs->pstate = spsr;
+}
+
+static int compat_setup_sigframe(struct compat_sigframe __user *sf,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct compat_aux_sigframe __user *aux;
+ unsigned long psr = pstate_to_compat_psr(regs->pstate);
+ int err = 0;
+
+ __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
+ __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
+ __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
+ __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
+ __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
+ __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
+ __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
+ __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
+ __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
+ __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
+ __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
+ __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
+ __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
+ __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
+ __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
+ __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
+ __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+ __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
+ /* set the compat FSR WnR */
+ __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
+ FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
+ __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
+ __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+
+ err |= put_sigset_t(&sf->uc.uc_sigmask, set);
+
+ aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
+
+ if (err == 0 && system_supports_fpsimd())
+ err |= compat_preserve_vfp_context(&aux->vfp);
+ __put_user_error(0, &aux->end_magic, err);
+
+ return err;
+}
+
+/*
+ * 32-bit signal handling routines called from signal.c
+ */
+int compat_setup_rt_frame(int usig, struct ksignal *ksig,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct compat_rt_sigframe __user *frame;
+ int err = 0;
+
+ frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
+
+ if (!frame)
+ return 1;
+
+ err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
+
+ __put_user_error(0, &frame->sig.uc.uc_flags, err);
+ __put_user_error(0, &frame->sig.uc.uc_link, err);
+
+ err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp);
+
+ err |= compat_setup_sigframe(&frame->sig, regs, set);
+
+ if (err == 0) {
+ compat_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig);
+ regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
+ regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
+ }
+
+ return err;
+}
+
+int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct compat_sigframe __user *frame;
+ int err = 0;
+
+ frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
+
+ if (!frame)
+ return 1;
+
+ __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+
+ err |= compat_setup_sigframe(frame, regs, set);
+ if (err == 0)
+ compat_setup_return(regs, &ksig->ka, frame->retcode, frame, usig);
+
+ return err;
+}
+
+void compat_setup_restart_syscall(struct pt_regs *regs)
+{
+ regs->regs[7] = __NR_compat_restart_syscall;
+}
diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S
new file mode 100644
index 000000000..475d30d47
--- /dev/null
+++ b/arch/arm64/kernel/sigreturn32.S
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AArch32 sigreturn code.
+ * Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
+ *
+ * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright (C) 2012-2018 ARM Ltd.
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+
+#include <asm/unistd.h>
+
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
new file mode 100644
index 000000000..ba40d5775
--- /dev/null
+++ b/arch/arm64/kernel/sleep.S
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/smp.h>
+
+ .text
+/*
+ * Implementation of MPIDR_EL1 hash algorithm through shifting
+ * and OR'ing.
+ *
+ * @dst: register containing hash result
+ * @rs0: register containing affinity level 0 bit shift
+ * @rs1: register containing affinity level 1 bit shift
+ * @rs2: register containing affinity level 2 bit shift
+ * @rs3: register containing affinity level 3 bit shift
+ * @mpidr: register containing MPIDR_EL1 value
+ * @mask: register containing MPIDR mask
+ *
+ * Pseudo C-code:
+ *
+ *u32 dst;
+ *
+ *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
+ * u32 aff0, aff1, aff2, aff3;
+ * u64 mpidr_masked = mpidr & mask;
+ * aff0 = mpidr_masked & 0xff;
+ * aff1 = mpidr_masked & 0xff00;
+ * aff2 = mpidr_masked & 0xff0000;
+ * aff3 = mpidr_masked & 0xff00000000;
+ * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
+ *}
+ * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
+ * Output register: dst
+ * Note: input and output registers must be disjoint register sets
+ (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
+ */
+ .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
+ and \mpidr, \mpidr, \mask // mask out MPIDR bits
+ and \dst, \mpidr, #0xff // mask=aff0
+ lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
+ and \mask, \mpidr, #0xff00 // mask = aff1
+ lsr \mask ,\mask, \rs1
+ orr \dst, \dst, \mask // dst|=(aff1>>rs1)
+ and \mask, \mpidr, #0xff0000 // mask = aff2
+ lsr \mask ,\mask, \rs2
+ orr \dst, \dst, \mask // dst|=(aff2>>rs2)
+ and \mask, \mpidr, #0xff00000000 // mask = aff3
+ lsr \mask ,\mask, \rs3
+ orr \dst, \dst, \mask // dst|=(aff3>>rs3)
+ .endm
+/*
+ * Save CPU state in the provided sleep_stack_data area, and publish its
+ * location for cpu_resume()'s use in sleep_save_stash.
+ *
+ * cpu_resume() will restore this saved state, and return. Because the
+ * link-register is saved and restored, it will appear to return from this
+ * function. So that the caller can tell the suspend/resume paths apart,
+ * __cpu_suspend_enter() will always return a non-zero value, whereas the
+ * path through cpu_resume() will return 0.
+ *
+ * x0 = struct sleep_stack_data area
+ */
+SYM_FUNC_START(__cpu_suspend_enter)
+ stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
+ stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
+ stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
+ stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
+ stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
+ stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
+
+ /* save the sp in cpu_suspend_ctx */
+ mov x2, sp
+ str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
+
+ /* find the mpidr_hash */
+ ldr_l x1, sleep_save_stash
+ mrs x7, mpidr_el1
+ adr_l x9, mpidr_hash
+ ldr x10, [x9, #MPIDR_HASH_MASK]
+ /*
+ * Following code relies on the struct mpidr_hash
+ * members size.
+ */
+ ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
+ add x1, x1, x8, lsl #3
+
+ str x0, [x1]
+ add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
+ stp x29, lr, [sp, #-16]!
+ bl cpu_do_suspend
+ ldp x29, lr, [sp], #16
+ mov x0, #1
+ ret
+SYM_FUNC_END(__cpu_suspend_enter)
+
+ .pushsection ".idmap.text", "awx"
+SYM_CODE_START(cpu_resume)
+ bl el2_setup // if in EL2 drop to EL1 cleanly
+ bl __cpu_setup
+ /* enable the MMU early - so we can access sleep_save_stash by va */
+ adrp x1, swapper_pg_dir
+ bl __enable_mmu
+ ldr x8, =_cpu_resume
+ br x8
+SYM_CODE_END(cpu_resume)
+ .ltorg
+ .popsection
+
+SYM_FUNC_START(_cpu_resume)
+ mrs x1, mpidr_el1
+ adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address
+
+ /* retrieve mpidr_hash members to compute the hash */
+ ldr x2, [x8, #MPIDR_HASH_MASK]
+ ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
+
+ /* x7 contains hash index, let's use it to grab context pointer */
+ ldr_l x0, sleep_save_stash
+ ldr x0, [x0, x7, lsl #3]
+ add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
+ add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
+ /* load sp from context */
+ ldr x2, [x0, #CPU_CTX_SP]
+ mov sp, x2
+ /*
+ * cpu_do_resume expects x0 to contain context address pointer
+ */
+ bl cpu_do_resume
+
+#ifdef CONFIG_KASAN
+ mov x0, sp
+ bl kasan_unpoison_task_stack_below
+#endif
+
+ ldp x19, x20, [x29, #16]
+ ldp x21, x22, [x29, #32]
+ ldp x23, x24, [x29, #48]
+ ldp x25, x26, [x29, #64]
+ ldp x27, x28, [x29, #80]
+ ldp x29, lr, [x29]
+ mov x0, #0
+ ret
+SYM_FUNC_END(_cpu_resume)
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
new file mode 100644
index 000000000..d62447964
--- /dev/null
+++ b/arch/arm64/kernel/smccc-call.S
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, Linaro Limited
+ */
+#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .macro SMCCC instr
+ \instr #0
+ ldr x4, [sp]
+ stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
+ stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
+ ldr x4, [sp, #8]
+ cbz x4, 1f /* no quirk structure */
+ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
+ cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
+ b.ne 1f
+ str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
+1: ret
+ .endm
+
+/*
+ * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
+ */
+SYM_FUNC_START(__arm_smccc_smc)
+ SMCCC smc
+SYM_FUNC_END(__arm_smccc_smc)
+EXPORT_SYMBOL(__arm_smccc_smc)
+
+/*
+ * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
+ */
+SYM_FUNC_START(__arm_smccc_hvc)
+ SMCCC hvc
+SYM_FUNC_END(__arm_smccc_hvc)
+EXPORT_SYMBOL(__arm_smccc_hvc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
new file mode 100644
index 000000000..ae0977b63
--- /dev/null
+++ b/arch/arm64/kernel/smp.c
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SMP initialisation and IPI support
+ * Based on arch/arm/kernel/smp.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/interrupt.h>
+#include <linux/cache.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
+#include <linux/kexec.h>
+#include <linux/kvm_host.h>
+
+#include <asm/alternative.h>
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
+#include <asm/kvm_mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/numa.h>
+#include <asm/processor.h>
+#include <asm/smp_plat.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+#include <asm/ptrace.h>
+#include <asm/virt.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
+/*
+ * as from 2.5, kernels no longer have an init_tasks structure
+ * so we need some other way of telling a new secondary core
+ * where to place its SVC stack
+ */
+struct secondary_data secondary_data;
+/* Number of CPUs which aren't online, but looping in kernel text. */
+static int cpus_stuck_in_kernel;
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_CPU_CRASH_STOP,
+ IPI_TIMER,
+ IPI_IRQ_WORK,
+ IPI_WAKEUP,
+ NR_IPI
+};
+
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void ipi_teardown(int cpu);
+static int op_cpu_kill(unsigned int cpu);
+#else
+static inline int op_cpu_kill(unsigned int cpu)
+{
+ return -ENOSYS;
+}
+#endif
+
+
+/*
+ * Boot a secondary CPU, and assign it the specified idle task.
+ * This also gives us the initial stack to use for this CPU.
+ */
+static int boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops->cpu_boot)
+ return ops->cpu_boot(cpu);
+
+ return -EOPNOTSUPP;
+}
+
+static DECLARE_COMPLETION(cpu_running);
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ int ret;
+ long status;
+
+ /*
+ * We need to tell the secondary core where to find its stack and the
+ * page tables.
+ */
+ secondary_data.task = idle;
+ secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
+ update_cpu_boot_status(CPU_MMU_OFF);
+ __flush_dcache_area(&secondary_data, sizeof(secondary_data));
+
+ /* Now bring the CPU into our world */
+ ret = boot_secondary(cpu, idle);
+ if (ret) {
+ pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
+ return ret;
+ }
+
+ /*
+ * CPU was successfully started, wait for it to come online or
+ * time out.
+ */
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(5000));
+ if (cpu_online(cpu))
+ return 0;
+
+ pr_crit("CPU%u: failed to come online\n", cpu);
+ secondary_data.task = NULL;
+ secondary_data.stack = NULL;
+ __flush_dcache_area(&secondary_data, sizeof(secondary_data));
+ status = READ_ONCE(secondary_data.status);
+ if (status == CPU_MMU_OFF)
+ status = READ_ONCE(__early_cpu_boot_status);
+
+ switch (status & CPU_BOOT_STATUS_MASK) {
+ default:
+ pr_err("CPU%u: failed in unknown state : 0x%lx\n",
+ cpu, status);
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_KILL_ME:
+ if (!op_cpu_kill(cpu)) {
+ pr_crit("CPU%u: died during early boot\n", cpu);
+ break;
+ }
+ pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ fallthrough;
+ case CPU_STUCK_IN_KERNEL:
+ pr_crit("CPU%u: is stuck in kernel\n", cpu);
+ if (status & CPU_STUCK_REASON_52_BIT_VA)
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
+ if (status & CPU_STUCK_REASON_NO_GRAN) {
+ pr_crit("CPU%u: does not support %luK granule\n",
+ cpu, PAGE_SIZE / SZ_1K);
+ }
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_PANIC_KERNEL:
+ panic("CPU%u detected unsupported configuration\n", cpu);
+ }
+
+ return -EIO;
+}
+
+static void init_gic_priority_masking(void)
+{
+ u32 cpuflags;
+
+ if (WARN_ON(!gic_enable_sre()))
+ return;
+
+ cpuflags = read_sysreg(daif);
+
+ WARN_ON(!(cpuflags & PSR_I_BIT));
+
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+}
+
+/*
+ * This is the secondary CPU boot entry. We're using this CPUs
+ * idle thread stack, but a set of temporary page tables.
+ */
+asmlinkage notrace void secondary_start_kernel(void)
+{
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ struct mm_struct *mm = &init_mm;
+ const struct cpu_operations *ops;
+ unsigned int cpu;
+
+ cpu = task_cpu(current);
+ set_my_cpu_offset(per_cpu_offset(cpu));
+
+ /*
+ * All kernel threads share the same mm context; grab a
+ * reference and switch to it.
+ */
+ mmgrab(mm);
+ current->active_mm = mm;
+
+ /*
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
+ */
+ cpu_uninstall_idmap();
+
+ if (system_uses_irq_prio_masking())
+ init_gic_priority_masking();
+
+ rcu_cpu_starting(cpu);
+ trace_hardirqs_off();
+
+ /*
+ * If the system has established the capabilities, make sure
+ * this CPU ticks all of those. If it doesn't, the CPU will
+ * fail to come online.
+ */
+ check_local_cpu_capabilities();
+
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_postboot)
+ ops->cpu_postboot();
+
+ /*
+ * Log the CPU info before it is marked online and might get read.
+ */
+ cpuinfo_store_cpu();
+
+ /*
+ * Enable GIC and timers.
+ */
+ notify_cpu_starting(cpu);
+
+ ipi_setup(cpu);
+
+ store_cpu_topology(cpu);
+ numa_add_cpu(cpu);
+
+ /*
+ * OK, now it's safe to let the boot CPU continue. Wait for
+ * the CPU migration code to notice that the CPU is online
+ * before we continue.
+ */
+ pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
+ cpu, (unsigned long)mpidr,
+ read_cpuid_id());
+ update_cpu_boot_status(CPU_BOOT_SUCCESS);
+ set_cpu_online(cpu, true);
+ complete(&cpu_running);
+
+ local_daif_restore(DAIF_PROCCTX);
+
+ /*
+ * OK, it's off to the idle thread for us
+ */
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
+{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
+ */
+ if (!ops || !ops->cpu_die)
+ return -EOPNOTSUPP;
+
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (ops->cpu_disable)
+ return ops->cpu_disable(cpu);
+
+ return 0;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ int ret;
+
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ remove_cpu_topology(cpu);
+ numa_remove_cpu(cpu);
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ irq_migrate_all_off_this_cpu();
+
+ return 0;
+}
+
+static int op_cpu_kill(unsigned int cpu)
+{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ /*
+ * If we have no means of synchronising with the dying CPU, then assume
+ * that it is really dead. We can only wait for an arbitrary length of
+ * time and hope that it's dead, so let's skip the wait and just hope.
+ */
+ if (!ops->cpu_kill)
+ return 0;
+
+ return ops->cpu_kill(cpu);
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ int err;
+
+ if (!cpu_wait_death(cpu, 5)) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
+ pr_notice("CPU%u: shutdown\n", cpu);
+
+ /*
+ * Now that the dying CPU is beyond the point of no return w.r.t.
+ * in-kernel synchronisation, try to get the firwmare to help us to
+ * verify that it has really left the kernel before we consider
+ * clobbering anything it might still be using.
+ */
+ err = op_cpu_kill(cpu);
+ if (err)
+ pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ */
+void cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ idle_task_exit();
+
+ local_daif_mask();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ (void)cpu_report_death();
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ ops->cpu_die(cpu);
+
+ BUG();
+}
+#endif
+
+static void __cpu_try_die(int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops && ops->cpu_die)
+ ops->cpu_die(cpu);
+#endif
+}
+
+/*
+ * Kill the calling secondary CPU, early in bringup before it is turned
+ * online.
+ */
+void cpu_die_early(void)
+{
+ int cpu = smp_processor_id();
+
+ pr_crit("CPU%d: will not boot\n", cpu);
+
+ /* Mark this CPU absent */
+ set_cpu_present(cpu, 0);
+ rcu_report_dead(cpu);
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ update_cpu_boot_status(CPU_KILL_ME);
+ __cpu_try_die(cpu);
+ }
+
+ update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
+
+ cpu_park_loop();
+}
+
+static void __init hyp_mode_check(void)
+{
+ if (is_hyp_mode_available())
+ pr_info("CPU: All CPU(s) started at EL2\n");
+ else if (is_hyp_mode_mismatched())
+ WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+ "CPU: CPUs started in inconsistent modes");
+ else
+ pr_info("CPU: All CPU(s) started at EL1\n");
+ if (IS_ENABLED(CONFIG_KVM))
+ kvm_compute_layout();
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+ setup_cpu_features();
+ hyp_mode_check();
+ apply_alternatives_all();
+ mark_linear_text_alias_ro();
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ cpuinfo_store_boot_cpu();
+
+ /*
+ * We now know enough about the boot CPU to apply the
+ * alternatives that cannot wait until interrupt handling
+ * and/or scheduling is enabled.
+ */
+ apply_boot_alternatives();
+
+ /* Conditionally switch to GIC PMR for interrupt masking */
+ if (system_uses_irq_prio_masking())
+ init_gic_priority_masking();
+}
+
+static u64 __init of_get_cpu_mpidr(struct device_node *dn)
+{
+ const __be32 *cell;
+ u64 hwid;
+
+ /*
+ * A cpu node with missing "reg" property is
+ * considered invalid to build a cpu_logical_map
+ * entry.
+ */
+ cell = of_get_property(dn, "reg", NULL);
+ if (!cell) {
+ pr_err("%pOF: missing reg property\n", dn);
+ return INVALID_HWID;
+ }
+
+ hwid = of_read_number(cell, of_n_addr_cells(dn));
+ /*
+ * Non affinity bits must be set to 0 in the DT
+ */
+ if (hwid & ~MPIDR_HWID_BITMASK) {
+ pr_err("%pOF: invalid reg property\n", dn);
+ return INVALID_HWID;
+ }
+ return hwid;
+}
+
+/*
+ * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
+ * entries and check for duplicates. If any is found just ignore the
+ * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
+ * matching valid MPIDR values.
+ */
+static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
+{
+ unsigned int i;
+
+ for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
+ if (cpu_logical_map(i) == hwid)
+ return true;
+ return false;
+}
+
+/*
+ * Initialize cpu operations for a logical cpu and
+ * set it in the possible mask on success
+ */
+static int __init smp_cpu_setup(int cpu)
+{
+ const struct cpu_operations *ops;
+
+ if (init_cpu_ops(cpu))
+ return -ENODEV;
+
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_init(cpu))
+ return -ENODEV;
+
+ set_cpu_possible(cpu, true);
+
+ return 0;
+}
+
+static bool bootcpu_valid __initdata;
+static unsigned int cpu_count = 1;
+
+#ifdef CONFIG_ACPI
+static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
+
+struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
+{
+ return &cpu_madt_gicc[cpu];
+}
+
+/*
+ * acpi_map_gic_cpu_interface - parse processor MADT entry
+ *
+ * Carry out sanity checks on MADT processor entry and initialize
+ * cpu_logical_map on success
+ */
+static void __init
+acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+{
+ u64 hwid = processor->arm_mpidr;
+
+ if (!(processor->flags & ACPI_MADT_ENABLED)) {
+ pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
+ return;
+ }
+
+ if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
+ pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
+ return;
+ }
+
+ if (is_mpidr_duplicate(cpu_count, hwid)) {
+ pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
+ return;
+ }
+
+ /* Check if GICC structure of boot CPU is available in the MADT */
+ if (cpu_logical_map(0) == hwid) {
+ if (bootcpu_valid) {
+ pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
+ hwid);
+ return;
+ }
+ bootcpu_valid = true;
+ cpu_madt_gicc[0] = *processor;
+ return;
+ }
+
+ if (cpu_count >= NR_CPUS)
+ return;
+
+ /* map the logical cpu id to cpu MPIDR */
+ set_cpu_logical_map(cpu_count, hwid);
+
+ cpu_madt_gicc[cpu_count] = *processor;
+
+ /*
+ * Set-up the ACPI parking protocol cpu entries
+ * while initializing the cpu_logical_map to
+ * avoid parsing MADT entries multiple times for
+ * nothing (ie a valid cpu_logical_map entry should
+ * contain a valid parking protocol data set to
+ * initialize the cpu if the parking protocol is
+ * the only available enable method).
+ */
+ acpi_set_mailbox_entry(cpu_count, processor);
+
+ cpu_count++;
+}
+
+static int __init
+acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+ if (BAD_MADT_GICC_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(&header->common);
+
+ acpi_map_gic_cpu_interface(processor);
+
+ return 0;
+}
+
+static void __init acpi_parse_and_init_cpus(void)
+{
+ int i;
+
+ /*
+ * do a walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init.
+ */
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ /*
+ * In ACPI, SMP and CPU NUMA information is provided in separate
+ * static tables, namely the MADT and the SRAT.
+ *
+ * Thus, it is simpler to first create the cpu logical map through
+ * an MADT walk and then map the logical cpus to their node ids
+ * as separate steps.
+ */
+ acpi_map_cpus_to_nodes();
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ early_map_cpu_to_node(i, acpi_numa_get_nid(i));
+}
+#else
+#define acpi_parse_and_init_cpus(...) do { } while (0)
+#endif
+
+/*
+ * Enumerate the possible CPU set from the device tree and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
+ */
+static void __init of_parse_and_init_cpus(void)
+{
+ struct device_node *dn;
+
+ for_each_of_cpu_node(dn) {
+ u64 hwid = of_get_cpu_mpidr(dn);
+
+ if (hwid == INVALID_HWID)
+ goto next;
+
+ if (is_mpidr_duplicate(cpu_count, hwid)) {
+ pr_err("%pOF: duplicate cpu reg properties in the DT\n",
+ dn);
+ goto next;
+ }
+
+ /*
+ * The numbering scheme requires that the boot CPU
+ * must be assigned logical id 0. Record it so that
+ * the logical map built from DT is validated and can
+ * be used.
+ */
+ if (hwid == cpu_logical_map(0)) {
+ if (bootcpu_valid) {
+ pr_err("%pOF: duplicate boot cpu reg property in DT\n",
+ dn);
+ goto next;
+ }
+
+ bootcpu_valid = true;
+ early_map_cpu_to_node(0, of_node_to_nid(dn));
+
+ /*
+ * cpu_logical_map has already been
+ * initialized and the boot cpu doesn't need
+ * the enable-method so continue without
+ * incrementing cpu.
+ */
+ continue;
+ }
+
+ if (cpu_count >= NR_CPUS)
+ goto next;
+
+ pr_debug("cpu logical map 0x%llx\n", hwid);
+ set_cpu_logical_map(cpu_count, hwid);
+
+ early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
+next:
+ cpu_count++;
+ }
+}
+
+/*
+ * Enumerate the possible CPU set from the device tree or ACPI and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
+ */
+void __init smp_init_cpus(void)
+{
+ int i;
+
+ if (acpi_disabled)
+ of_parse_and_init_cpus();
+ else
+ acpi_parse_and_init_cpus();
+
+ if (cpu_count > nr_cpu_ids)
+ pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
+ cpu_count, nr_cpu_ids);
+
+ if (!bootcpu_valid) {
+ pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
+ return;
+ }
+
+ /*
+ * We need to set the cpu_logical_map entries before enabling
+ * the cpus so that cpu processor description entries (DT cpu nodes
+ * and ACPI MADT entries) can be retrieved by matching the cpu hwid
+ * with entries in cpu_logical_map while initializing the cpus.
+ * If the cpu set-up fails, invalidate the cpu_logical_map entry.
+ */
+ for (i = 1; i < nr_cpu_ids; i++) {
+ if (cpu_logical_map(i) != INVALID_HWID) {
+ if (smp_cpu_setup(i))
+ set_cpu_logical_map(i, INVALID_HWID);
+ }
+ }
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ const struct cpu_operations *ops;
+ int err;
+ unsigned int cpu;
+ unsigned int this_cpu;
+
+ init_cpu_topology();
+
+ this_cpu = smp_processor_id();
+ store_cpu_topology(this_cpu);
+ numa_store_cpu_info(this_cpu);
+ numa_add_cpu(this_cpu);
+
+ /*
+ * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
+ * secondary CPUs present.
+ */
+ if (max_cpus == 0)
+ return;
+
+ /*
+ * Initialise the present map (which describes the set of CPUs
+ * actually populated at the present time) and release the
+ * secondaries from the bootloader.
+ */
+ for_each_possible_cpu(cpu) {
+
+ per_cpu(cpu_number, cpu) = cpu;
+
+ if (cpu == smp_processor_id())
+ continue;
+
+ ops = get_cpu_ops(cpu);
+ if (!ops)
+ continue;
+
+ err = ops->cpu_prepare(cpu);
+ if (err)
+ continue;
+
+ set_cpu_present(cpu, true);
+ numa_store_cpu_info(cpu);
+ }
+}
+
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+#define S(x,s) [x] = s
+ S(IPI_RESCHEDULE, "Rescheduling interrupts"),
+ S(IPI_CALL_FUNC, "Function call interrupts"),
+ S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
+ S(IPI_TIMER, "Timer broadcast interrupts"),
+ S(IPI_IRQ_WORK, "IRQ work interrupts"),
+ S(IPI_WAKEUP, "CPU wake-up interrupts"),
+};
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
+
+unsigned long irq_err_count;
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < NR_IPI; i++) {
+ unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+ seq_printf(p, " %s\n", ipi_types[i]);
+ }
+
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+ return 0;
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_WAKEUP);
+}
+#endif
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
+static void local_cpu_stop(void)
+{
+ set_cpu_online(smp_processor_id(), false);
+
+ local_daif_mask();
+ sdei_mask_local_cpu();
+ cpu_park_loop();
+}
+
+/*
+ * We need to implement panic_smp_self_stop() for parallel panic() calls, so
+ * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
+ * CPUs that have already stopped themselves.
+ */
+void panic_smp_self_stop(void)
+{
+ local_cpu_stop();
+}
+
+#ifdef CONFIG_KEXEC_CORE
+static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
+#endif
+
+static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
+{
+#ifdef CONFIG_KEXEC_CORE
+ crash_save_cpu(regs, cpu);
+
+ atomic_dec(&waiting_for_crash_ipi);
+
+ local_irq_disable();
+ sdei_mask_local_cpu();
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ __cpu_try_die(cpu);
+
+ /* just in case */
+ cpu_park_loop();
+#endif
+}
+
+/*
+ * Main handler for inter-processor interrupts
+ */
+static void do_handle_IPI(int ipinr)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+
+ switch (ipinr) {
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ local_cpu_stop();
+ break;
+
+ case IPI_CPU_CRASH_STOP:
+ if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
+
+ unreachable();
+ }
+ break;
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ case IPI_TIMER:
+ tick_receive_broadcast();
+ break;
+#endif
+
+#ifdef CONFIG_IRQ_WORK
+ case IPI_IRQ_WORK:
+ irq_work_run();
+ break;
+#endif
+
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+ case IPI_WAKEUP:
+ WARN_ONCE(!acpi_parking_protocol_valid(cpu),
+ "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
+ cpu);
+ break;
+#endif
+
+ default:
+ pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
+ break;
+ }
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+}
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ disable_percpu_irq(ipi_irq_base + i);
+}
+#endif
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < NR_IPI);
+ nr_ipi = min(n, NR_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &cpu_number);
+ WARN_ON(err);
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
+}
+
+void smp_send_reschedule(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_TIMER);
+}
+#endif
+
+/*
+ * The number of CPUs online, not counting this CPU (which may not be
+ * fully online and so not counted in num_online_cpus()).
+ */
+static inline unsigned int num_other_online_cpus(void)
+{
+ unsigned int this_cpu_online = cpu_online(smp_processor_id());
+
+ return num_online_cpus() - this_cpu_online;
+}
+
+void smp_send_stop(void)
+{
+ unsigned long timeout;
+
+ if (num_other_online_cpus()) {
+ cpumask_t mask;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ if (system_state <= SYSTEM_RUNNING)
+ pr_crit("SMP: stopping secondary CPUs\n");
+ smp_cross_call(&mask, IPI_CPU_STOP);
+ }
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while (num_other_online_cpus() && timeout--)
+ udelay(1);
+
+ if (num_other_online_cpus())
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(cpu_online_mask));
+
+ sdei_mask_local_cpu();
+}
+
+#ifdef CONFIG_KEXEC_CORE
+void crash_smp_send_stop(void)
+{
+ static int cpus_stopped;
+ cpumask_t mask;
+ unsigned long timeout;
+
+ /*
+ * This function can be called twice in panic path, but obviously
+ * we execute this only once.
+ */
+ if (cpus_stopped)
+ return;
+
+ cpus_stopped = 1;
+
+ /*
+ * If this cpu is the only one alive at this point in time, online or
+ * not, there are no stop messages to be sent around, so just back out.
+ */
+ if (num_other_online_cpus() == 0)
+ goto skip_ipi;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
+
+ pr_crit("SMP: stopping secondary CPUs\n");
+ smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
+ udelay(1);
+
+ if (atomic_read(&waiting_for_crash_ipi) > 0)
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(&mask));
+
+skip_ipi:
+ sdei_mask_local_cpu();
+ sdei_handler_abort();
+}
+
+bool smp_crash_stop_failed(void)
+{
+ return (atomic_read(&waiting_for_crash_ipi) > 0);
+}
+#endif
+
+/*
+ * not supported here
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+static bool have_cpu_die(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int any_cpu = raw_smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(any_cpu);
+
+ if (ops && ops->cpu_die)
+ return true;
+#endif
+ return false;
+}
+
+bool cpus_are_stuck_in_kernel(void)
+{
+ bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+
+ return !!cpus_stuck_in_kernel || smp_spin_tables;
+}
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
new file mode 100644
index 000000000..056772c26
--- /dev/null
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spin Table SMP initialisation
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/io.h>
+#include <asm/smp_plat.h>
+
+extern void secondary_holding_pen(void);
+volatile unsigned long __section(".mmuoff.data.read")
+secondary_holding_pen_release = INVALID_HWID;
+
+static phys_addr_t cpu_release_addr[NR_CPUS];
+
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
+
+static int smp_spin_table_cpu_init(unsigned int cpu)
+{
+ struct device_node *dn;
+ int ret;
+
+ dn = of_get_cpu_node(cpu, NULL);
+ if (!dn)
+ return -ENODEV;
+
+ /*
+ * Determine the address from which the CPU is polling.
+ */
+ ret = of_property_read_u64(dn, "cpu-release-addr",
+ &cpu_release_addr[cpu]);
+ if (ret)
+ pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
+ cpu);
+
+ of_node_put(dn);
+
+ return ret;
+}
+
+static int smp_spin_table_cpu_prepare(unsigned int cpu)
+{
+ __le64 __iomem *release_addr;
+
+ if (!cpu_release_addr[cpu])
+ return -ENODEV;
+
+ /*
+ * The cpu-release-addr may or may not be inside the linear mapping.
+ * As ioremap_cache will either give us a new mapping or reuse the
+ * existing linear mapping, we can use it to cover both cases. In
+ * either case the memory will be MT_NORMAL.
+ */
+ release_addr = ioremap_cache(cpu_release_addr[cpu],
+ sizeof(*release_addr));
+ if (!release_addr)
+ return -ENOMEM;
+
+ /*
+ * We write the release address as LE regardless of the native
+ * endianness of the kernel. Therefore, any boot-loaders that
+ * read this address need to convert this address to the
+ * boot-loader's endianness before jumping. This is mandated by
+ * the boot protocol.
+ */
+ writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
+ __flush_dcache_area((__force void *)release_addr,
+ sizeof(*release_addr));
+
+ /*
+ * Send an event to wake up the secondary CPU.
+ */
+ sev();
+
+ iounmap(release_addr);
+
+ return 0;
+}
+
+static int smp_spin_table_cpu_boot(unsigned int cpu)
+{
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ return 0;
+}
+
+const struct cpu_operations smp_spin_table_ops = {
+ .name = "spin-table",
+ .cpu_init = smp_spin_table_cpu_init,
+ .cpu_prepare = smp_spin_table_cpu_prepare,
+ .cpu_boot = smp_spin_table_cpu_boot,
+};
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
new file mode 100644
index 000000000..c445828ec
--- /dev/null
+++ b/arch/arm64/kernel/stacktrace.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Stack tracing support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/ftrace.h>
+#include <linux/kprobes.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+
+#include <asm/irq.h>
+#include <asm/pointer_auth.h>
+#include <asm/stack_pointer.h>
+#include <asm/stacktrace.h>
+
+/*
+ * AArch64 PCS assigns the frame pointer to x29.
+ *
+ * A simple function prologue looks like this:
+ * sub sp, sp, #0x10
+ * stp x29, x30, [sp]
+ * mov x29, sp
+ *
+ * A simple function epilogue looks like this:
+ * mov sp, x29
+ * ldp x29, x30, [sp]
+ * add sp, sp, #0x10
+ */
+
+/*
+ * Unwind from one frame record (A) to the next frame record (B).
+ *
+ * We terminate early if the location of B indicates a malformed chain of frame
+ * records (e.g. a cycle), determined based on the location and fp value of A
+ * and the location (but not the fp value) of B.
+ */
+int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+{
+ unsigned long fp = frame->fp;
+ struct stack_info info;
+
+ if (fp & 0xf)
+ return -EINVAL;
+
+ if (!tsk)
+ tsk = current;
+
+ if (!on_accessible_stack(tsk, fp, &info))
+ return -EINVAL;
+
+ if (test_bit(info.type, frame->stacks_done))
+ return -EINVAL;
+
+ /*
+ * As stacks grow downward, any valid record on the same stack must be
+ * at a strictly higher address than the prior record.
+ *
+ * Stacks can nest in several valid orders, e.g.
+ *
+ * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
+ * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+ *
+ * ... but the nesting itself is strict. Once we transition from one
+ * stack to another, it's never valid to unwind back to that first
+ * stack.
+ */
+ if (info.type == frame->prev_type) {
+ if (fp <= frame->prev_fp)
+ return -EINVAL;
+ } else {
+ set_bit(frame->prev_type, frame->stacks_done);
+ }
+
+ /*
+ * Record this frame record's values and location. The prev_fp and
+ * prev_type are only meaningful to the next unwind_frame() invocation.
+ */
+ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ frame->prev_fp = fp;
+ frame->prev_type = info.type;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (tsk->ret_stack &&
+ (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
+ struct ftrace_ret_stack *ret_stack;
+ /*
+ * This is a case where function graph tracer has
+ * modified a return address (LR) in a stack frame
+ * to hook a function return.
+ * So replace it to an original value.
+ */
+ ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
+ if (WARN_ON_ONCE(!ret_stack))
+ return -EINVAL;
+ frame->pc = ret_stack->ret;
+ }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ frame->pc = ptrauth_strip_insn_pac(frame->pc);
+
+ /*
+ * Frames created upon entry from EL0 have NULL FP and PC values, so
+ * don't bother reporting these. Frames created by __noreturn functions
+ * might have a valid FP even if PC is bogus, so only terminate where
+ * both are NULL.
+ */
+ if (!frame->fp && !frame->pc)
+ return -EINVAL;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(unwind_frame);
+
+void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
+ bool (*fn)(void *, unsigned long), void *data)
+{
+ while (1) {
+ int ret;
+
+ if (!fn(data, frame->pc))
+ break;
+ ret = unwind_frame(tsk, frame);
+ if (ret < 0)
+ break;
+ }
+}
+NOKPROBE_SYMBOL(walk_stackframe);
+
+static void dump_backtrace_entry(unsigned long where, const char *loglvl)
+{
+ printk("%s %pS\n", loglvl, (void *)where);
+}
+
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
+{
+ struct stackframe frame;
+ int skip = 0;
+
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+
+ if (regs) {
+ if (user_mode(regs))
+ return;
+ skip = 1;
+ }
+
+ if (!tsk)
+ tsk = current;
+
+ if (!try_get_task_stack(tsk))
+ return;
+
+ if (tsk == current) {
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)dump_backtrace);
+ } else {
+ /*
+ * task blocked in __switch_to
+ */
+ start_backtrace(&frame,
+ thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
+ }
+
+ printk("%sCall trace:\n", loglvl);
+ do {
+ /* skip until specified stack frame */
+ if (!skip) {
+ dump_backtrace_entry(frame.pc, loglvl);
+ } else if (frame.fp == regs->regs[29]) {
+ skip = 0;
+ /*
+ * Mostly, this is the case where this function is
+ * called in panic/abort. As exception handler's
+ * stack frame does not contain the corresponding pc
+ * at which an exception has taken place, use regs->pc
+ * instead.
+ */
+ dump_backtrace_entry(regs->pc, loglvl);
+ }
+ } while (!unwind_frame(tsk, &frame));
+
+ put_task_stack(tsk);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
+{
+ dump_backtrace(NULL, tsk, loglvl);
+ barrier();
+}
+
+#ifdef CONFIG_STACKTRACE
+
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ struct stackframe frame;
+
+ if (regs)
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+ else if (task == current)
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(1),
+ (unsigned long)__builtin_return_address(0));
+ else
+ start_backtrace(&frame, thread_saved_fp(task),
+ thread_saved_pc(task));
+
+ walk_stackframe(task, &frame, consume_entry, cookie);
+}
+
+#endif
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
new file mode 100644
index 000000000..9f8cdeccd
--- /dev/null
+++ b/arch/arm64/kernel/suspend.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/pgtable.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
+#include <asm/exec.h>
+#include <asm/mte.h>
+#include <asm/memory.h>
+#include <asm/mmu_context.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+/*
+ * This is allocated by cpu_suspend_init(), and used to store a pointer to
+ * the 'struct sleep_stack_data' the contains a particular CPUs state.
+ */
+unsigned long *sleep_save_stash;
+
+/*
+ * This hook is provided so that cpu_suspend code can restore HW
+ * breakpoints as early as possible in the resume path, before reenabling
+ * debug exceptions. Code cannot be run from a CPU PM notifier since by the
+ * time the notifier runs debug exceptions might have been enabled already,
+ * with HW breakpoints registers content still in an unknown state.
+ */
+static int (*hw_breakpoint_restore)(unsigned int);
+void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
+{
+ /* Prevent multiple restore hook initializations */
+ if (WARN_ON(hw_breakpoint_restore))
+ return;
+ hw_breakpoint_restore = hw_bp_restore;
+}
+
+void notrace __cpu_suspend_exit(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * We are resuming from reset with the idmap active in TTBR0_EL1.
+ * We must uninstall the idmap and restore the expected MMU
+ * state before we can possibly return to userspace.
+ */
+ cpu_uninstall_idmap();
+
+ /* Restore CnP bit in TTBR1_EL1 */
+ if (system_supports_cnp())
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+
+ /*
+ * PSTATE was not saved over suspend/resume, re-enable any detected
+ * features that might not have been set correctly.
+ */
+ __uaccess_enable_hw_pan();
+ uao_thread_switch(current);
+
+ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * by cpu_suspend()s local_daif_restore() call.
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(cpu);
+
+ /*
+ * On resume, firmware implementing dynamic mitigation will
+ * have turned the mitigation on. If the user has forcefully
+ * disabled it, make sure their wishes are obeyed.
+ */
+ spectre_v4_enable_mitigation(NULL);
+
+ /* Restore additional MTE-specific configuration */
+ mte_suspend_exit();
+}
+
+/*
+ * cpu_suspend
+ *
+ * arg: argument to pass to the finisher function
+ * fn: finisher function pointer
+ *
+ */
+int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+{
+ int ret = 0;
+ unsigned long flags;
+ struct sleep_stack_data state;
+
+ /*
+ * From this point debug exceptions are disabled to prevent
+ * updates to mdscr register (saved and restored along with
+ * general purpose registers) from kernel debuggers.
+ */
+ flags = local_daif_save();
+
+ /*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka suspend finishers) hence
+ * disable graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ if (__cpu_suspend_enter(&state)) {
+ /* Call the suspend finisher */
+ ret = fn(arg);
+
+ /*
+ * Never gets here, unless the suspend finisher fails.
+ * Successful cpu_suspend() should return from cpu_resume(),
+ * returning through this code path is considered an error
+ * If the return value is set to 0 force ret = -EOPNOTSUPP
+ * to make sure a proper error condition is propagated
+ */
+ if (!ret)
+ ret = -EOPNOTSUPP;
+ } else {
+ RCU_NONIDLE(__cpu_suspend_exit());
+ }
+
+ unpause_graph_tracing();
+
+ /*
+ * Restore pstate flags. OS lock and mdscr have been already
+ * restored, so from this point onwards, debugging is fully
+ * renabled if it was enabled when core started shutdown.
+ */
+ local_daif_restore(flags);
+
+ return ret;
+}
+
+static int __init cpu_suspend_init(void)
+{
+ /* ctx_ptr is an array of physical addresses */
+ sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
+ GFP_KERNEL);
+
+ if (WARN_ON(!sleep_save_stash))
+ return -ENOMEM;
+
+ return 0;
+}
+early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
new file mode 100644
index 000000000..d5ffaaab3
--- /dev/null
+++ b/arch/arm64/kernel/sys.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AArch64-specific system calls implementation
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+#include <asm/cpufeature.h>
+#include <asm/syscall.h>
+
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off)
+{
+ if (offset_in_page(off) != 0)
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+}
+
+SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
+{
+ if (personality(personality) == PER_LINUX32 &&
+ !system_supports_32bit_el0())
+ return -EINVAL;
+ return ksys_personality(personality);
+}
+
+asmlinkage long sys_ni_syscall(void);
+
+asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
+{
+ return sys_ni_syscall();
+}
+
+/*
+ * Wrappers to pass the pt_regs argument.
+ */
+#define __arm64_sys_personality __arm64_sys_arm64_personality
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+#include <asm/unistd.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
+
+const syscall_fn_t sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
new file mode 100644
index 000000000..fc40386af
--- /dev/null
+++ b/arch/arm64/kernel/sys32.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/arm64/kernel/sys32.c
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ */
+
+/*
+ * Needed to avoid conflicting __NR_* macros between uapi/asm/unistd.h and
+ * asm/unistd32.h.
+ */
+#define __COMPAT_SYSCALL_NR
+
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+
+#include <asm/syscall.h>
+
+asmlinkage long compat_sys_sigreturn(void);
+asmlinkage long compat_sys_rt_sigreturn(void);
+
+COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname,
+ compat_size_t, sz, struct compat_statfs64 __user *, buf)
+{
+ /*
+ * 32-bit ARM applies an OABI compatibility fixup to statfs64 and
+ * fstatfs64 regardless of whether OABI is in use, and therefore
+ * arbitrary binaries may rely upon it, so we must do the same.
+ * For more details, see commit:
+ *
+ * 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and
+ * fstatfs64")
+ */
+ if (sz == 88)
+ sz = 84;
+
+ return kcompat_sys_statfs64(pathname, sz, buf);
+}
+
+COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz,
+ struct compat_statfs64 __user *, buf)
+{
+ /* see aarch32_statfs64 */
+ if (sz == 88)
+ sz = 84;
+
+ return kcompat_sys_fstatfs64(fd, sz, buf);
+}
+
+/*
+ * Note: off_4k is always in units of 4K. If we can't do the
+ * requested offset because it is not page-aligned, we return -EINVAL.
+ */
+COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off_4k)
+{
+ if (off_4k & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ off_4k >>= (PAGE_SHIFT - 12);
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k);
+}
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define arg_u32p(name) u32, name##_hi, u32, name##_lo
+#else
+#define arg_u32p(name) u32, name##_lo, u32, name##_hi
+#endif
+
+#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo)
+
+COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf,
+ size_t, count, u32, __pad, arg_u32p(pos))
+{
+ return ksys_pread64(fd, buf, count, arg_u64(pos));
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd,
+ const char __user *, buf, size_t, count, u32, __pad,
+ arg_u32p(pos))
+{
+ return ksys_pwrite64(fd, buf, count, arg_u64(pos));
+}
+
+COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname,
+ u32, __pad, arg_u32p(length))
+{
+ return ksys_truncate(pathname, arg_u64(length));
+}
+
+COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad,
+ arg_u32p(length))
+{
+ return ksys_ftruncate(fd, arg_u64(length));
+}
+
+COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad,
+ arg_u32p(offset), size_t, count)
+{
+ return ksys_readahead(fd, arg_u64(offset), count);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice,
+ arg_u32p(offset), arg_u32p(len))
+{
+ return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags,
+ arg_u32p(offset), arg_u32p(nbytes))
+{
+ return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes),
+ flags);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
+ arg_u32p(offset), arg_u32p(len))
+{
+ return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
+}
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+#include <asm/unistd32.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
+
+const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
+ [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
+#include <asm/unistd32.h>
+};
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
new file mode 100644
index 000000000..51274bab2
--- /dev/null
+++ b/arch/arm64/kernel/sys_compat.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/sys_arm.c
+ *
+ * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
+ * Copyright (C) 1995, 1996 Russell King.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/compat.h>
+#include <linux/cpufeature.h>
+#include <linux/personality.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+#include <asm/tlbflush.h>
+#include <asm/unistd.h>
+
+static long
+__do_compat_cache_op(unsigned long start, unsigned long end)
+{
+ long ret;
+
+ do {
+ unsigned long chunk = min(PAGE_SIZE, end - start);
+
+ if (fatal_signal_pending(current))
+ return 0;
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ /*
+ * The workaround requires an inner-shareable tlbi.
+ * We pick the reserved-ASID to minimise the impact.
+ */
+ __tlbi(aside1is, __TLBI_VADDR(0, 0));
+ dsb(ish);
+ }
+
+ ret = __flush_cache_user_range(start, start + chunk);
+ if (ret)
+ return ret;
+
+ cond_resched();
+ start += chunk;
+ } while (start < end);
+
+ return 0;
+}
+
+static inline long
+do_compat_cache_op(unsigned long start, unsigned long end, int flags)
+{
+ if (end < start || flags)
+ return -EINVAL;
+
+ if (!access_ok((const void __user *)start, end - start))
+ return -EFAULT;
+
+ return __do_compat_cache_op(start, end);
+}
+/*
+ * Handle all unrecognised system calls.
+ */
+long compat_arm_syscall(struct pt_regs *regs, int scno)
+{
+ void __user *addr;
+
+ switch (scno) {
+ /*
+ * Flush a region from virtual address 'r0' to virtual address 'r1'
+ * _exclusive_. There is no alignment requirement on either address;
+ * user space does not need to know the hardware cache layout.
+ *
+ * r2 contains flags. It should ALWAYS be passed as ZERO until it
+ * is defined to be something else. For now we ignore it, but may
+ * the fires of hell burn in your belly if you break this rule. ;)
+ *
+ * (at a later date, we may want to allow this call to not flush
+ * various aspects of the cache. Passing '0' will guarantee that
+ * everything necessary gets flushed to maintain consistency in
+ * the specified region).
+ */
+ case __ARM_NR_compat_cacheflush:
+ return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
+
+ case __ARM_NR_compat_set_tls:
+ current->thread.uw.tp_value = regs->regs[0];
+
+ /*
+ * Protect against register corruption from context switch.
+ * See comment in tls_thread_flush.
+ */
+ barrier();
+ write_sysreg(regs->regs[0], tpidrro_el0);
+ return 0;
+
+ default:
+ /*
+ * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
+ * if not implemented, rather than raising SIGILL. This
+ * way the calling program can gracefully determine whether
+ * a feature is supported.
+ */
+ if (scno < __ARM_NR_COMPAT_END)
+ return -ENOSYS;
+ break;
+ }
+
+ addr = (void __user *)instruction_pointer(regs) -
+ (compat_thumb_mode(regs) ? 2 : 4);
+
+ arm64_notify_die("Oops - bad compat syscall(2)", regs,
+ SIGILL, ILL_ILLTRP, addr, 0);
+ return 0;
+}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
new file mode 100644
index 000000000..befde0eaa
--- /dev/null
+++ b/arch/arm64/kernel/syscall.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/errno.h>
+#include <linux/nospec.h>
+#include <linux/ptrace.h>
+#include <linux/syscalls.h>
+
+#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
+#include <asm/fpsimd.h>
+#include <asm/syscall.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+long compat_arm_syscall(struct pt_regs *regs, int scno);
+long sys_ni_syscall(void);
+
+static long do_ni_syscall(struct pt_regs *regs, int scno)
+{
+#ifdef CONFIG_COMPAT
+ long ret;
+ if (is_compat_task()) {
+ ret = compat_arm_syscall(regs, scno);
+ if (ret != -ENOSYS)
+ return ret;
+ }
+#endif
+
+ return sys_ni_syscall();
+}
+
+static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
+{
+ return syscall_fn(regs);
+}
+
+static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
+ unsigned int sc_nr,
+ const syscall_fn_t syscall_table[])
+{
+ long ret;
+
+ if (scno < sc_nr) {
+ syscall_fn_t syscall_fn;
+ syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
+ ret = __invoke_syscall(regs, syscall_fn);
+ } else {
+ ret = do_ni_syscall(regs, scno);
+ }
+
+ syscall_set_return_value(current, regs, 0, ret);
+}
+
+static inline bool has_syscall_work(unsigned long flags)
+{
+ return unlikely(flags & _TIF_SYSCALL_WORK);
+}
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+ u32 reg, val;
+
+ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+ return;
+
+ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+ return;
+
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+ reg = read_sysreg(mdscr_el1);
+ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+ write_sysreg(val, mdscr_el1);
+ asm volatile("msr daifclr, #8");
+ isb();
+
+ /* We will have taken a single-step exception by this point */
+
+ write_sysreg(reg, mdscr_el1);
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+#else
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
+static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
+ const syscall_fn_t syscall_table[])
+{
+ unsigned long flags = current_thread_info()->flags;
+
+ regs->orig_x0 = regs->regs[0];
+ regs->syscallno = scno;
+
+ /*
+ * BTI note:
+ * The architecture does not guarantee that SPSR.BTYPE is zero
+ * on taking an SVC, so we could return to userspace with a
+ * non-zero BTYPE after the syscall.
+ *
+ * This shouldn't matter except when userspace is explicitly
+ * doing something stupid, such as setting PROT_BTI on a page
+ * that lacks conforming BTI/PACIxSP instructions, falling
+ * through from one executable page to another with differing
+ * PROT_BTI, or messing with BTYPE via ptrace: in such cases,
+ * userspace should not be surprised if a SIGILL occurs on
+ * syscall return.
+ *
+ * So, don't touch regs->pstate & PSR_BTYPE_MASK here.
+ * (Similarly for HVC and SMC elsewhere.)
+ */
+
+ cortex_a76_erratum_1463225_svc_handler();
+ local_daif_restore(DAIF_PROCCTX);
+
+ if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
+ /*
+ * Process the asynchronous tag check fault before the actual
+ * syscall. do_notify_resume() will send a signal to userspace
+ * before the syscall is restarted.
+ */
+ syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0);
+ return;
+ }
+
+ if (has_syscall_work(flags)) {
+ /*
+ * The de-facto standard way to skip a system call using ptrace
+ * is to set the system call to -1 (NO_SYSCALL) and set x0 to a
+ * suitable error code for consumption by userspace. However,
+ * this cannot be distinguished from a user-issued syscall(-1)
+ * and so we must set x0 to -ENOSYS here in case the tracer doesn't
+ * issue the skip and we fall into trace_exit with x0 preserved.
+ *
+ * This is slightly odd because it also means that if a tracer
+ * sets the system call number to -1 but does not initialise x0,
+ * then x0 will be preserved for all system calls apart from a
+ * user-issued syscall(-1). However, requesting a skip and not
+ * setting the return value is unlikely to do anything sensible
+ * anyway.
+ */
+ if (scno == NO_SYSCALL)
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
+ scno = syscall_trace_enter(regs);
+ if (scno == NO_SYSCALL)
+ goto trace_exit;
+ }
+
+ invoke_syscall(regs, scno, sc_nr, syscall_table);
+
+ /*
+ * The tracing status may have changed under our feet, so we have to
+ * check again. However, if we were tracing entry, then we always trace
+ * exit regardless, as the old entry assembly did.
+ */
+ if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
+ local_daif_mask();
+ flags = current_thread_info()->flags;
+ if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
+ return;
+ local_daif_restore(DAIF_PROCCTX);
+ }
+
+trace_exit:
+ syscall_trace_exit(regs);
+}
+
+static inline void sve_user_discard(void)
+{
+ if (!system_supports_sve())
+ return;
+
+ clear_thread_flag(TIF_SVE);
+
+ /*
+ * task_fpsimd_load() won't be called to update CPACR_EL1 in
+ * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+ * happens if a context switch or kernel_neon_begin() or context
+ * modification (sigreturn, ptrace) intervenes.
+ * So, ensure that CPACR_EL1 is already correct for the fast-path case.
+ */
+ sve_user_disable();
+}
+
+void do_el0_svc(struct pt_regs *regs)
+{
+ sve_user_discard();
+ el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
+}
+
+#ifdef CONFIG_COMPAT
+void do_el0_svc_compat(struct pt_regs *regs)
+{
+ el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
+ compat_sys_call_table);
+}
+#endif
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
new file mode 100644
index 000000000..eebbc8d71
--- /dev/null
+++ b/arch/arm64/kernel/time.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for ARM (C) 1994-2001 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/clockchips.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/timex.h>
+#include <linux/errno.h>
+#include <linux/profile.h>
+#include <linux/syscore_ops.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/of_clk.h>
+#include <linux/acpi.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#include <asm/thread_info.h>
+#include <asm/stacktrace.h>
+#include <asm/paravirt.h>
+
+unsigned long profile_pc(struct pt_regs *regs)
+{
+ struct stackframe frame;
+
+ if (!in_lock_functions(regs->pc))
+ return regs->pc;
+
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+
+ do {
+ int ret = unwind_frame(NULL, &frame);
+ if (ret < 0)
+ return 0;
+ } while (in_lock_functions(frame.pc));
+
+ return frame.pc;
+}
+EXPORT_SYMBOL(profile_pc);
+
+void __init time_init(void)
+{
+ u32 arch_timer_rate;
+
+ of_clk_init(NULL);
+ timer_probe();
+
+ tick_setup_hrtimer_broadcast();
+
+ arch_timer_rate = arch_timer_get_rate();
+ if (!arch_timer_rate)
+ panic("Unable to initialise architected timer.\n");
+
+ /* Calibrate the delay loop directly */
+ lpj_fine = arch_timer_rate / HZ;
+
+ pv_time_init();
+}
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
new file mode 100644
index 000000000..f35af19b7
--- /dev/null
+++ b/arch/arm64/kernel/topology.c
@@ -0,0 +1,272 @@
+/*
+ * arch/arm64/kernel/topology.c
+ *
+ * Copyright (C) 2011,2013,2014 Linaro Limited.
+ *
+ * Based on the arm32 version written by Vincent Guittot in turn based on
+ * arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arch_topology.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/topology.h>
+
+#ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ /*
+ * if the PPTT doesn't have thread information, assume a homogeneous
+ * machine and return the current CPU's thread state.
+ */
+ if (is_threaded < 0)
+ is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+ return !!is_threaded;
+}
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
+int __init parse_acpi_topology(void)
+{
+ int cpu, topology_id;
+
+ if (acpi_disabled)
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ int i, cache_id;
+
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (acpi_cpu_is_threaded(cpu)) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+
+ i = acpi_find_last_cache_level(cpu);
+
+ if (i > 0) {
+ /*
+ * this is the only part of cpu_topology that has
+ * a direct relationship with the cache topology
+ */
+ cache_id = find_acpi_cpu_cache_topology(cpu, i);
+ if (cache_id > 0)
+ cpu_topology[cpu].llc_id = cache_id;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_ARM64_AMU_EXTN
+
+#undef pr_fmt
+#define pr_fmt(fmt) "AMU: " fmt
+
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
+static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
+static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
+static cpumask_var_t amu_fie_cpus;
+
+/* Initialize counter reference per-cpu variables for the current CPU */
+void init_cpu_freq_invariance_counters(void)
+{
+ this_cpu_write(arch_core_cycles_prev,
+ read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
+ this_cpu_write(arch_const_cycles_prev,
+ read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
+}
+
+static int validate_cpu_freq_invariance_counters(int cpu)
+{
+ u64 max_freq_hz, ratio;
+
+ if (!cpu_has_amu_feat(cpu)) {
+ pr_debug("CPU%d: counters are not supported.\n", cpu);
+ return -EINVAL;
+ }
+
+ if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
+ !per_cpu(arch_core_cycles_prev, cpu))) {
+ pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
+ return -EINVAL;
+ }
+
+ /* Convert maximum frequency from KHz to Hz and validate */
+ max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000ULL;
+ if (unlikely(!max_freq_hz)) {
+ pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
+ return -EINVAL;
+ }
+
+ /*
+ * Pre-compute the fixed ratio between the frequency of the constant
+ * counter and the maximum frequency of the CPU.
+ *
+ * const_freq
+ * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALE²
+ * cpuinfo_max_freq
+ *
+ * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE²
+ * in order to ensure a good resolution for arch_max_freq_scale for
+ * very low arch timer frequencies (down to the KHz range which should
+ * be unlikely).
+ */
+ ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
+ ratio = div64_u64(ratio, max_freq_hz);
+ if (!ratio) {
+ WARN_ONCE(1, "System timer frequency too low.\n");
+ return -EINVAL;
+ }
+
+ per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
+
+ return 0;
+}
+
+static inline bool
+enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy) {
+ pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
+ return false;
+ }
+
+ if (cpumask_subset(policy->related_cpus, valid_cpus))
+ cpumask_or(amu_fie_cpus, policy->related_cpus,
+ amu_fie_cpus);
+
+ cpufreq_cpu_put(policy);
+
+ return true;
+}
+
+static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
+#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
+
+static int __init init_amu_fie(void)
+{
+ cpumask_var_t valid_cpus;
+ bool have_policy = false;
+ int ret = 0;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto free_valid_mask;
+ }
+
+ for_each_present_cpu(cpu) {
+ if (validate_cpu_freq_invariance_counters(cpu))
+ continue;
+ cpumask_set_cpu(cpu, valid_cpus);
+ have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
+ }
+
+ /*
+ * If we are not restricted by cpufreq policies, we only enable
+ * the use of the AMU feature for FIE if all CPUs support AMU.
+ * Otherwise, enable_policy_freq_counters has already enabled
+ * policy cpus.
+ */
+ if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
+ cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
+
+ if (!cpumask_empty(amu_fie_cpus)) {
+ pr_info("CPUs[%*pbl]: counters will be used for FIE.",
+ cpumask_pr_args(amu_fie_cpus));
+ static_branch_enable(&amu_fie_key);
+ }
+
+ /*
+ * If the system is not fully invariant after AMU init, disable
+ * partial use of counters for frequency invariance.
+ */
+ if (!topology_scale_freq_invariant())
+ static_branch_disable(&amu_fie_key);
+
+free_valid_mask:
+ free_cpumask_var(valid_cpus);
+
+ return ret;
+}
+late_initcall_sync(init_amu_fie);
+
+bool arch_freq_counters_available(const struct cpumask *cpus)
+{
+ return amu_freq_invariant() &&
+ cpumask_subset(cpus, amu_fie_cpus);
+}
+
+void topology_scale_freq_tick(void)
+{
+ u64 prev_core_cnt, prev_const_cnt;
+ u64 core_cnt, const_cnt, scale;
+ int cpu = smp_processor_id();
+
+ if (!amu_freq_invariant())
+ return;
+
+ if (!cpumask_test_cpu(cpu, amu_fie_cpus))
+ return;
+
+ const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
+ core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
+ prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
+ prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
+
+ if (unlikely(core_cnt <= prev_core_cnt ||
+ const_cnt <= prev_const_cnt))
+ goto store_and_exit;
+
+ /*
+ * /\core arch_max_freq_scale
+ * scale = ------- * --------------------
+ * /\const SCHED_CAPACITY_SCALE
+ *
+ * See validate_cpu_freq_invariance_counters() for details on
+ * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
+ */
+ scale = core_cnt - prev_core_cnt;
+ scale *= this_cpu_read(arch_max_freq_scale);
+ scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
+ const_cnt - prev_const_cnt);
+
+ scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
+ this_cpu_write(freq_scale, (unsigned long)scale);
+
+store_and_exit:
+ this_cpu_write(arch_core_cycles_prev, core_cnt);
+ this_cpu_write(arch_const_cycles_prev, const_cnt);
+}
+#endif /* CONFIG_ARM64_AMU_EXTN */
diff --git a/arch/arm64/kernel/trace-events-emulation.h b/arch/arm64/kernel/trace-events-emulation.h
new file mode 100644
index 000000000..6c40f58b8
--- /dev/null
+++ b/arch/arm64/kernel/trace-events-emulation.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM emulation
+
+#if !defined(_TRACE_EMULATION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EMULATION_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(instruction_emulation,
+
+ TP_PROTO(const char *instr, u64 addr),
+ TP_ARGS(instr, addr),
+
+ TP_STRUCT__entry(
+ __string(instr, instr)
+ __field(u64, addr)
+ ),
+
+ TP_fast_assign(
+ __assign_str(instr, instr);
+ __entry->addr = addr;
+ ),
+
+ TP_printk("instr=\"%s\" addr=0x%llx", __get_str(instr), __entry->addr)
+);
+
+#endif /* _TRACE_EMULATION_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+#define TRACE_INCLUDE_FILE trace-events-emulation
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
new file mode 100644
index 000000000..10a58017d
--- /dev/null
+++ b/arch/arm64/kernel/traps.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on arch/arm/kernel/traps.c
+ *
+ * Copyright (C) 1995-2009 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/bug.h>
+#include <linux/context_tracking.h>
+#include <linux/signal.h>
+#include <linux/personality.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sizes.h>
+#include <linux/syscalls.h>
+#include <linux/mm_types.h>
+#include <linux/kasan.h>
+
+#include <asm/atomic.h>
+#include <asm/bug.h>
+#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
+#include <asm/esr.h>
+#include <asm/exception.h>
+#include <asm/extable.h>
+#include <asm/insn.h>
+#include <asm/kprobes.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+#include <asm/stack_pointer.h>
+#include <asm/stacktrace.h>
+#include <asm/exception.h>
+#include <asm/system_misc.h>
+#include <asm/sysreg.h>
+
+static const char *handler[]= {
+ "Synchronous Abort",
+ "IRQ",
+ "FIQ",
+ "Error"
+};
+
+int show_unhandled_signals = 0;
+
+static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
+{
+ unsigned long addr = instruction_pointer(regs);
+ char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ int i;
+
+ if (user_mode(regs))
+ return;
+
+ for (i = -4; i < 1; i++) {
+ unsigned int val, bad;
+
+ bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
+
+ if (!bad)
+ p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
+ else {
+ p += sprintf(p, "bad PC value");
+ break;
+ }
+ }
+
+ printk("%sCode: %s\n", lvl, str);
+}
+
+#ifdef CONFIG_PREEMPT
+#define S_PREEMPT " PREEMPT"
+#elif defined(CONFIG_PREEMPT_RT)
+#define S_PREEMPT " PREEMPT_RT"
+#else
+#define S_PREEMPT ""
+#endif
+
+#define S_SMP " SMP"
+
+static int __die(const char *str, long err, struct pt_regs *regs)
+{
+ static int die_counter;
+ int ret;
+
+ pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
+ str, err, ++die_counter);
+
+ /* trap and error numbers are mostly meaningless on ARM */
+ ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
+ if (ret == NOTIFY_STOP)
+ return ret;
+
+ print_modules();
+ show_regs(regs);
+
+ dump_kernel_instr(KERN_EMERG, regs);
+
+ return ret;
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, long err)
+{
+ int ret;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&die_lock, flags);
+
+ oops_enter();
+
+ console_verbose();
+ bust_spinlocks(1);
+ ret = __die(str, err, regs);
+
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ oops_exit();
+
+ if (in_interrupt())
+ panic("%s: Fatal exception in interrupt", str);
+ if (panic_on_oops)
+ panic("%s: Fatal exception", str);
+
+ raw_spin_unlock_irqrestore(&die_lock, flags);
+
+ if (ret != NOTIFY_STOP)
+ make_task_dead(SIGSEGV);
+}
+
+static void arm64_show_signal(int signo, const char *str)
+{
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ struct task_struct *tsk = current;
+ unsigned int esr = tsk->thread.fault_code;
+ struct pt_regs *regs = task_pt_regs(tsk);
+
+ /* Leave if the signal won't be shown */
+ if (!show_unhandled_signals ||
+ !unhandled_signal(tsk, signo) ||
+ !__ratelimit(&rs))
+ return;
+
+ pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
+ if (esr)
+ pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
+
+ pr_cont("%s", str);
+ print_vma_addr(KERN_CONT " in ", regs->pc);
+ pr_cont("\n");
+ __show_regs(regs);
+}
+
+void arm64_force_sig_fault(int signo, int code, void __user *addr,
+ const char *str)
+{
+ arm64_show_signal(signo, str);
+ if (signo == SIGKILL)
+ force_sig(SIGKILL);
+ else
+ force_sig_fault(signo, code, addr);
+}
+
+void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
+ const char *str)
+{
+ arm64_show_signal(SIGBUS, str);
+ force_sig_mceerr(code, addr, lsb);
+}
+
+void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
+ const char *str)
+{
+ arm64_show_signal(SIGTRAP, str);
+ force_sig_ptrace_errno_trap(errno, addr);
+}
+
+void arm64_notify_die(const char *str, struct pt_regs *regs,
+ int signo, int sicode, void __user *addr,
+ int err)
+{
+ if (user_mode(regs)) {
+ WARN_ON(regs != current_pt_regs());
+ current->thread.fault_address = 0;
+ current->thread.fault_code = err;
+
+ arm64_force_sig_fault(signo, sicode, addr, str);
+ } else {
+ die(str, regs, err);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+#define PSTATE_IT_1_0_SHIFT 25
+#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
+#define PSTATE_IT_7_2_SHIFT 10
+#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
+
+static u32 compat_get_it_state(struct pt_regs *regs)
+{
+ u32 it, pstate = regs->pstate;
+
+ it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
+ it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
+
+ return it;
+}
+
+static void compat_set_it_state(struct pt_regs *regs, u32 it)
+{
+ u32 pstate_it;
+
+ pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
+ pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
+
+ regs->pstate &= ~PSR_AA32_IT_MASK;
+ regs->pstate |= pstate_it;
+}
+
+static void advance_itstate(struct pt_regs *regs)
+{
+ u32 it;
+
+ /* ARM mode */
+ if (!(regs->pstate & PSR_AA32_T_BIT) ||
+ !(regs->pstate & PSR_AA32_IT_MASK))
+ return;
+
+ it = compat_get_it_state(regs);
+
+ /*
+ * If this is the last instruction of the block, wipe the IT
+ * state. Otherwise advance it.
+ */
+ if (!(it & 7))
+ it = 0;
+ else
+ it = (it & 0xe0) | ((it << 1) & 0x1f);
+
+ compat_set_it_state(regs, it);
+}
+#else
+static void advance_itstate(struct pt_regs *regs)
+{
+}
+#endif
+
+void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
+{
+ regs->pc += size;
+
+ /*
+ * If we were single stepping, we want to get the step exception after
+ * we return from the trap.
+ */
+ if (user_mode(regs))
+ user_fastforward_single_step(current);
+
+ if (compat_user_mode(regs))
+ advance_itstate(regs);
+ else
+ regs->pstate &= ~PSR_BTYPE_MASK;
+}
+
+static int user_insn_read(struct pt_regs *regs, u32 *insnp)
+{
+ u32 instr;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+
+ if (compat_thumb_mode(regs)) {
+ /* 16-bit Thumb instruction */
+ __le16 instr_le;
+ if (get_user(instr_le, (__le16 __user *)pc))
+ return -EFAULT;
+ instr = le16_to_cpu(instr_le);
+ if (aarch32_insn_is_wide(instr)) {
+ u32 instr2;
+
+ if (get_user(instr_le, (__le16 __user *)(pc + 2)))
+ return -EFAULT;
+ instr2 = le16_to_cpu(instr_le);
+ instr = (instr << 16) | instr2;
+ }
+ } else {
+ /* 32-bit ARM instruction */
+ __le32 instr_le;
+ if (get_user(instr_le, (__le32 __user *)pc))
+ return -EFAULT;
+ instr = le32_to_cpu(instr_le);
+ }
+
+ *insnp = instr;
+ return 0;
+}
+
+void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
+{
+ const char *desc;
+ struct pt_regs *regs = current_pt_regs();
+
+ if (WARN_ON(!user_mode(regs)))
+ return;
+
+ switch (signal) {
+ case SIGILL:
+ desc = "undefined instruction";
+ break;
+ case SIGSEGV:
+ desc = "illegal memory access";
+ break;
+ default:
+ desc = "unknown or unrecoverable error";
+ break;
+ }
+
+ /* Force signals we don't understand to SIGKILL */
+ if (WARN_ON(signal != SIGKILL &&
+ siginfo_layout(signal, code) != SIL_FAULT)) {
+ signal = SIGKILL;
+ }
+
+ arm64_notify_die(desc, regs, signal, code, (void __user *)address, err);
+}
+
+/*
+ * Set up process info to signal segmentation fault - called on access error.
+ */
+void arm64_notify_segfault(unsigned long addr)
+{
+ int code;
+
+ mmap_read_lock(current->mm);
+ if (find_vma(current->mm, addr) == NULL)
+ code = SEGV_MAPERR;
+ else
+ code = SEGV_ACCERR;
+ mmap_read_unlock(current->mm);
+
+ force_signal_inject(SIGSEGV, code, addr, 0);
+}
+
+void do_el0_undef(struct pt_regs *regs, unsigned long esr)
+{
+ u32 insn;
+
+ /* check for AArch32 breakpoint instructions */
+ if (!aarch32_break_handler(regs))
+ return;
+
+ if (user_insn_read(regs, &insn))
+ goto out_err;
+
+ if (try_emulate_mrs(regs, insn))
+ return;
+
+ if (try_emulate_armv8_deprecated(regs, insn))
+ return;
+
+out_err:
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+}
+
+void do_el1_undef(struct pt_regs *regs, unsigned long esr)
+{
+ u32 insn;
+
+ if (aarch64_insn_read((void *)regs->pc, &insn))
+ goto out_err;
+
+ if (try_emulate_el1_ssbs(regs, insn))
+ return;
+
+out_err:
+ die("Oops - Undefined instruction", regs, esr);
+}
+
+void do_el0_bti(struct pt_regs *regs)
+{
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+}
+
+void do_el1_bti(struct pt_regs *regs, unsigned long esr)
+{
+ die("Oops - BTI", regs, esr);
+}
+
+void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
+}
+
+void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ /*
+ * Unexpected FPAC exception in the kernel: kill the task before it
+ * does any more harm.
+ */
+ die("Oops - FPAC", regs, esr);
+}
+
+#define __user_cache_maint(insn, address, res) \
+ if (address >= user_addr_max()) { \
+ res = -EFAULT; \
+ } else { \
+ uaccess_ttbr0_enable(); \
+ asm volatile ( \
+ "1: " insn ", %1\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT)); \
+ uaccess_ttbr0_disable(); \
+ }
+
+static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
+{
+ unsigned long address;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
+ int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
+ int ret = 0;
+
+ address = untagged_addr(pt_regs_read_reg(regs, rt));
+
+ switch (crm) {
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
+ __user_cache_maint("dc civac", address, ret);
+ break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
+ __user_cache_maint("dc civac", address, ret);
+ break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
+ __user_cache_maint("sys 3, c7, c13, 1", address, ret);
+ break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
+ __user_cache_maint("sys 3, c7, c12, 1", address, ret);
+ break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
+ __user_cache_maint("dc civac", address, ret);
+ break;
+ case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
+ __user_cache_maint("ic ivau", address, ret);
+ break;
+ default:
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ return;
+ }
+
+ if (ret)
+ arm64_notify_segfault(address);
+ else
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
+static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
+ unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ /* Hide DIC so that we can trap the unnecessary maintenance...*/
+ val &= ~BIT(CTR_DIC_SHIFT);
+
+ /* ... and fake IminLine to reduce the number of traps. */
+ val &= ~CTR_IMINLINE_MASK;
+ val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
+ }
+
+ pt_regs_write_reg(regs, rt, val);
+
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
+static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
+
+ pt_regs_write_reg(regs, rt, arch_timer_read_counter());
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
+static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
+
+ pt_regs_write_reg(regs, rt, arch_timer_get_rate());
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
+static void mrs_handler(unsigned int esr, struct pt_regs *regs)
+{
+ u32 sysreg, rt;
+
+ rt = ESR_ELx_SYS64_ISS_RT(esr);
+ sysreg = esr_sys64_to_sysreg(esr);
+
+ if (do_emulate_mrs(regs, sysreg, rt) != 0)
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+}
+
+static void wfi_handler(unsigned int esr, struct pt_regs *regs)
+{
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
+struct sys64_hook {
+ unsigned int esr_mask;
+ unsigned int esr_val;
+ void (*handler)(unsigned int esr, struct pt_regs *regs);
+};
+
+static const struct sys64_hook sys64_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
+ .handler = user_cache_maint_handler,
+ },
+ {
+ /* Trap read access to CTR_EL0 */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
+ .handler = ctr_read_handler,
+ },
+ {
+ /* Trap read access to CNTVCT_EL0 */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
+ .handler = cntvct_read_handler,
+ },
+ {
+ /* Trap read access to CNTFRQ_EL0 */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
+ .handler = cntfrq_read_handler,
+ },
+ {
+ /* Trap read access to CPUID registers */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
+ .handler = mrs_handler,
+ },
+ {
+ /* Trap WFI instructions executed in userspace */
+ .esr_mask = ESR_ELx_WFx_MASK,
+ .esr_val = ESR_ELx_WFx_WFI_VAL,
+ .handler = wfi_handler,
+ },
+ {},
+};
+
+#ifdef CONFIG_COMPAT
+static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
+{
+ int cond;
+
+ /* Only a T32 instruction can trap without CV being set */
+ if (!(esr & ESR_ELx_CV)) {
+ u32 it;
+
+ it = compat_get_it_state(regs);
+ if (!it)
+ return true;
+
+ cond = it >> 4;
+ } else {
+ cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
+ }
+
+ return aarch32_opcode_cond_checks[cond](regs->pstate);
+}
+
+static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
+
+ pt_regs_write_reg(regs, reg, arch_timer_get_rate());
+ arm64_skip_faulting_instruction(regs, 4);
+}
+
+static const struct sys64_hook cp15_32_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
+ .handler = compat_cntfrq_read_handler,
+ },
+ {},
+};
+
+static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
+ int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
+ u64 val = arch_timer_read_counter();
+
+ pt_regs_write_reg(regs, rt, lower_32_bits(val));
+ pt_regs_write_reg(regs, rt2, upper_32_bits(val));
+ arm64_skip_faulting_instruction(regs, 4);
+}
+
+static const struct sys64_hook cp15_64_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
+ .handler = compat_cntvct_read_handler,
+ },
+ {},
+};
+
+void do_el0_cp15(unsigned long esr, struct pt_regs *regs)
+{
+ const struct sys64_hook *hook, *hook_base;
+
+ if (!cp15_cond_valid(esr, regs)) {
+ /*
+ * There is no T16 variant of a CP access, so we
+ * always advance PC by 4 bytes.
+ */
+ arm64_skip_faulting_instruction(regs, 4);
+ return;
+ }
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_CP15_32:
+ hook_base = cp15_32_hooks;
+ break;
+ case ESR_ELx_EC_CP15_64:
+ hook_base = cp15_64_hooks;
+ break;
+ default:
+ do_el0_undef(regs, esr);
+ return;
+ }
+
+ for (hook = hook_base; hook->handler; hook++)
+ if ((hook->esr_mask & esr) == hook->esr_val) {
+ hook->handler(esr, regs);
+ return;
+ }
+
+ /*
+ * New cp15 instructions may previously have been undefined at
+ * EL0. Fall back to our usual undefined instruction handler
+ * so that we handle these consistently.
+ */
+ do_el0_undef(regs, esr);
+}
+#endif
+
+void do_el0_sys(unsigned long esr, struct pt_regs *regs)
+{
+ const struct sys64_hook *hook;
+
+ for (hook = sys64_hooks; hook->handler; hook++)
+ if ((hook->esr_mask & esr) == hook->esr_val) {
+ hook->handler(esr, regs);
+ return;
+ }
+
+ /*
+ * New SYS instructions may previously have been undefined at EL0. Fall
+ * back to our usual undefined instruction handler so that we handle
+ * these consistently.
+ */
+ do_el0_undef(regs, esr);
+}
+
+static const char *esr_class_str[] = {
+ [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
+ [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
+ [ESR_ELx_EC_WFx] = "WFI/WFE",
+ [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
+ [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
+ [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
+ [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
+ [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
+ [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
+ [ESR_ELx_EC_PAC] = "PAC",
+ [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
+ [ESR_ELx_EC_BTI] = "BTI",
+ [ESR_ELx_EC_ILL] = "PSTATE.IL",
+ [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
+ [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
+ [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
+ [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
+ [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
+ [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
+ [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
+ [ESR_ELx_EC_SVE] = "SVE",
+ [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
+ [ESR_ELx_EC_FPAC] = "FPAC",
+ [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
+ [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
+ [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
+ [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
+ [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
+ [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
+ [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
+ [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
+ [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
+ [ESR_ELx_EC_SERROR] = "SError",
+ [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
+ [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
+ [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
+ [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
+ [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
+ [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
+ [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
+ [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
+ [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
+};
+
+const char *esr_get_class_string(u32 esr)
+{
+ return esr_class_str[ESR_ELx_EC(esr)];
+}
+
+/*
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
+ */
+asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+{
+ arm64_enter_nmi(regs);
+
+ console_verbose();
+
+ pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+ handler[reason], smp_processor_id(), esr,
+ esr_get_class_string(esr));
+
+ __show_regs(regs);
+ local_daif_mask();
+ panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+ void __user *pc = (void __user *)instruction_pointer(regs);
+
+ current->thread.fault_address = 0;
+ current->thread.fault_code = esr;
+
+ arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
+ "Bad EL0 synchronous exception");
+}
+
+#ifdef CONFIG_VMAP_STACK
+
+DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
+ __aligned(16);
+
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned long tsk_stk = (unsigned long)current->stack;
+ unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
+ unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+ unsigned int esr = read_sysreg(esr_el1);
+ unsigned long far = read_sysreg(far_el1);
+
+ arm64_enter_nmi(regs);
+
+ console_verbose();
+ pr_emerg("Insufficient stack space to handle exception!");
+
+ pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
+ pr_emerg("FAR: 0x%016lx\n", far);
+
+ pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
+ tsk_stk, tsk_stk + THREAD_SIZE);
+ pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
+ irq_stk, irq_stk + IRQ_STACK_SIZE);
+ pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
+ ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
+
+ __show_regs(regs);
+
+ /*
+ * We use nmi_panic to limit the potential for recusive overflows, and
+ * to get a better stack trace.
+ */
+ nmi_panic(NULL, "kernel stack overflow");
+ cpu_park_loop();
+}
+#endif
+
+void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
+{
+ console_verbose();
+
+ pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
+ smp_processor_id(), esr, esr_get_class_string(esr));
+ if (regs)
+ __show_regs(regs);
+
+ nmi_panic(regs, "Asynchronous SError Interrupt");
+
+ cpu_park_loop();
+ unreachable();
+}
+
+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
+{
+ u32 aet = arm64_ras_serror_get_severity(esr);
+
+ switch (aet) {
+ case ESR_ELx_AET_CE: /* corrected error */
+ case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
+ /*
+ * The CPU can make progress. We may take UEO again as
+ * a more severe error.
+ */
+ return false;
+
+ case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
+ case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
+ /*
+ * The CPU can't make progress. The exception may have
+ * been imprecise.
+ *
+ * Neoverse-N1 #1349291 means a non-KVM SError reported as
+ * Unrecoverable should be treated as Uncontainable. We
+ * call arm64_serror_panic() in both cases.
+ */
+ return true;
+
+ case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
+ default:
+ /* Error has been silently propagated */
+ arm64_serror_panic(regs, esr);
+ }
+}
+
+asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
+{
+ arm64_enter_nmi(regs);
+
+ /* non-RAS errors are not containable */
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
+ arm64_serror_panic(regs, esr);
+
+ arm64_exit_nmi(regs);
+}
+
+/* GENERIC_BUG traps */
+
+int is_valid_bugaddr(unsigned long addr)
+{
+ /*
+ * bug_handler() only called for BRK #BUG_BRK_IMM.
+ * So the answer is trivial -- any spurious instances with no
+ * bug table entry will be rejected by report_bug() and passed
+ * back to the debug-monitors code and handled as a fatal
+ * unexpected debug exception.
+ */
+ return 1;
+}
+
+static int bug_handler(struct pt_regs *regs, unsigned int esr)
+{
+ switch (report_bug(regs->pc, regs)) {
+ case BUG_TRAP_TYPE_BUG:
+ die("Oops - BUG", regs, esr);
+ break;
+
+ case BUG_TRAP_TYPE_WARN:
+ break;
+
+ default:
+ /* unknown/unrecognised bug trap type */
+ return DBG_HOOK_ERROR;
+ }
+
+ /* If thread survives, skip over the BUG instruction and continue: */
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook bug_break_hook = {
+ .fn = bug_handler,
+ .imm = BUG_BRK_IMM,
+};
+
+static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
+{
+ pr_err("%s generated an invalid instruction at %pS!\n",
+ in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
+ (void *)instruction_pointer(regs));
+
+ /* We cannot handle this */
+ return DBG_HOOK_ERROR;
+}
+
+static struct break_hook fault_break_hook = {
+ .fn = reserved_fault_handler,
+ .imm = FAULT_BRK_IMM,
+};
+
+#ifdef CONFIG_KASAN_SW_TAGS
+
+#define KASAN_ESR_RECOVER 0x20
+#define KASAN_ESR_WRITE 0x10
+#define KASAN_ESR_SIZE_MASK 0x0f
+#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
+
+static int kasan_handler(struct pt_regs *regs, unsigned int esr)
+{
+ bool recover = esr & KASAN_ESR_RECOVER;
+ bool write = esr & KASAN_ESR_WRITE;
+ size_t size = KASAN_ESR_SIZE(esr);
+ u64 addr = regs->regs[0];
+ u64 pc = regs->pc;
+
+ kasan_report(addr, size, write, pc);
+
+ /*
+ * The instrumentation allows to control whether we can proceed after
+ * a crash was detected. This is done by passing the -recover flag to
+ * the compiler. Disabling recovery allows to generate more compact
+ * code.
+ *
+ * Unfortunately disabling recovery doesn't work for the kernel right
+ * now. KASAN reporting is disabled in some contexts (for example when
+ * the allocator accesses slab object metadata; this is controlled by
+ * current->kasan_depth). All these accesses are detected by the tool,
+ * even though the reports for them are not printed.
+ *
+ * This is something that might be fixed at some point in the future.
+ */
+ if (!recover)
+ die("Oops - KASAN", regs, esr);
+
+ /* If thread survives, skip over the brk instruction and continue: */
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook kasan_break_hook = {
+ .fn = kasan_handler,
+ .imm = KASAN_BRK_IMM,
+ .mask = KASAN_BRK_MASK,
+};
+#endif
+
+/*
+ * Initial handler for AArch64 BRK exceptions
+ * This handler only used until debug_traps_init().
+ */
+int __init early_brk64(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+#ifdef CONFIG_KASAN_SW_TAGS
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
+ return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
+#endif
+ return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
+}
+
+void __init trap_init(void)
+{
+ register_kernel_break_hook(&bug_break_hook);
+ register_kernel_break_hook(&fault_break_hook);
+#ifdef CONFIG_KASAN_SW_TAGS
+ register_kernel_break_hook(&kasan_break_hook);
+#endif
+ debug_traps_init();
+}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
new file mode 100644
index 000000000..debb8995d
--- /dev/null
+++ b/arch/arm64/kernel/vdso.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDSO implementations.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/cache.h>
+#include <linux/clocksource.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/time_namespace.h>
+#include <linux/timekeeper_internal.h>
+#include <linux/vmalloc.h>
+#include <vdso/datapage.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+
+#include <asm/cacheflush.h>
+#include <asm/signal32.h>
+#include <asm/vdso.h>
+
+extern char vdso_start[], vdso_end[];
+extern char vdso32_start[], vdso32_end[];
+
+enum vdso_abi {
+ VDSO_ABI_AA64,
+ VDSO_ABI_AA32,
+};
+
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES,
+};
+
+struct vdso_abi_info {
+ const char *name;
+ const char *vdso_code_start;
+ const char *vdso_code_end;
+ unsigned long vdso_pages;
+ /* Data Mapping */
+ struct vm_special_mapping *dm;
+ /* Code Mapping */
+ struct vm_special_mapping *cm;
+};
+
+static struct vdso_abi_info vdso_info[] __ro_after_init = {
+ [VDSO_ABI_AA64] = {
+ .name = "vdso",
+ .vdso_code_start = vdso_start,
+ .vdso_code_end = vdso_end,
+ },
+#ifdef CONFIG_COMPAT_VDSO
+ [VDSO_ABI_AA32] = {
+ .name = "vdso32",
+ .vdso_code_start = vdso32_start,
+ .vdso_code_end = vdso32_end,
+ },
+#endif /* CONFIG_COMPAT_VDSO */
+};
+
+/*
+ * The vDSO data page.
+ */
+static union {
+ struct vdso_data data[CS_BASES];
+ u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = vdso_data_store.data;
+
+static int __vdso_remap(enum vdso_abi abi,
+ const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+ unsigned long vdso_size = vdso_info[abi].vdso_code_end -
+ vdso_info[abi].vdso_code_start;
+
+ if (vdso_size != new_size)
+ return -EINVAL;
+
+ current->mm->context.vdso = (void *)new_vma->vm_start;
+
+ return 0;
+}
+
+static int __vdso_init(enum vdso_abi abi)
+{
+ int i;
+ struct page **vdso_pagelist;
+ unsigned long pfn;
+
+ if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
+ pr_err("vDSO is not a valid ELF object!\n");
+ return -EINVAL;
+ }
+
+ vdso_info[abi].vdso_pages = (
+ vdso_info[abi].vdso_code_end -
+ vdso_info[abi].vdso_code_start) >>
+ PAGE_SHIFT;
+
+ vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (vdso_pagelist == NULL)
+ return -ENOMEM;
+
+ /* Grab the vDSO code pages. */
+ pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
+
+ for (i = 0; i < vdso_info[abi].vdso_pages; i++)
+ vdso_pagelist[i] = pfn_to_page(pfn + i);
+
+ vdso_info[abi].cm->pages = vdso_pagelist;
+
+ return 0;
+}
+
+#ifdef CONFIG_TIME_NS
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
+{
+ return (struct vdso_data *)(vvar_page);
+}
+
+/*
+ * The vvar mapping contains data for a specific time namespace, so when a task
+ * changes namespace we must unmap its vvar data for the old namespace.
+ * Subsequent faults will map in data for the new namespace.
+ *
+ * For more details see timens_setup_vdso_data().
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+
+ mmap_read_lock(mm);
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
+ zap_page_range(vma, vma->vm_start, size);
+#ifdef CONFIG_COMPAT_VDSO
+ if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
+ zap_page_range(vma, vma->vm_start, size);
+#endif
+ }
+
+ mmap_read_unlock(mm);
+ return 0;
+}
+
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_mm == current->mm))
+ return current->nsproxy->time_ns->vvar_page;
+
+ /*
+ * VM_PFNMAP | VM_IO protect .fault() handler from being called
+ * through interfaces like /proc/$pid/mem or
+ * process_vm_{readv,writev}() as long as there's no .access()
+ * in special_mapping_vmops.
+ * For more details check_vma_flags() and __access_remote_vm()
+ */
+ WARN(1, "vvar_page accessed remotely");
+
+ return NULL;
+}
+#else
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+#endif
+
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long pfn;
+
+ switch (vmf->pgoff) {
+ case VVAR_DATA_PAGE_OFFSET:
+ if (timens_page)
+ pfn = page_to_pfn(timens_page);
+ else
+ pfn = sym_to_pfn(vdso_data);
+ break;
+#ifdef CONFIG_TIME_NS
+ case VVAR_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = sym_to_pfn(vdso_data);
+ break;
+#endif /* CONFIG_TIME_NS */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static int vvar_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+
+ if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __setup_additional_pages(enum vdso_abi abi,
+ struct mm_struct *mm,
+ struct linux_binprm *bprm,
+ int uses_interp)
+{
+ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+ unsigned long gp_flags = 0;
+ void *ret;
+
+ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+
+ vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
+ /* Be sure to map the data page */
+ vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
+
+ vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = ERR_PTR(vdso_base);
+ goto up_fail;
+ }
+
+ ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
+ VM_READ|VM_MAYREAD|VM_PFNMAP,
+ vdso_info[abi].dm);
+ if (IS_ERR(ret))
+ goto up_fail;
+
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
+ gp_flags = VM_ARM64_BTI;
+
+ vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
+ mm->context.vdso = (void *)vdso_base;
+ ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+ VM_READ|VM_EXEC|gp_flags|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ vdso_info[abi].cm);
+ if (IS_ERR(ret))
+ goto up_fail;
+
+ return 0;
+
+up_fail:
+ mm->context.vdso = NULL;
+ return PTR_ERR(ret);
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Create and map the vectors page for AArch32 tasks.
+ */
+static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
+}
+
+enum aarch32_map {
+ AA32_MAP_VECTORS, /* kuser helpers */
+ AA32_MAP_SIGPAGE,
+ AA32_MAP_VVAR,
+ AA32_MAP_VDSO,
+};
+
+static struct page *aarch32_vectors_page __ro_after_init;
+static struct page *aarch32_sig_page __ro_after_init;
+
+static struct vm_special_mapping aarch32_vdso_maps[] = {
+ [AA32_MAP_VECTORS] = {
+ .name = "[vectors]", /* ABI */
+ .pages = &aarch32_vectors_page,
+ },
+ [AA32_MAP_SIGPAGE] = {
+ .name = "[sigpage]", /* ABI */
+ .pages = &aarch32_sig_page,
+ },
+ [AA32_MAP_VVAR] = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+ .mremap = vvar_mremap,
+ },
+ [AA32_MAP_VDSO] = {
+ .name = "[vdso]",
+ .mremap = aarch32_vdso_mremap,
+ },
+};
+
+static int aarch32_alloc_kuser_vdso_page(void)
+{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned long vdso_page;
+
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
+
+ vdso_page = get_zeroed_page(GFP_ATOMIC);
+ if (!vdso_page)
+ return -ENOMEM;
+
+ memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
+ kuser_sz);
+ aarch32_vectors_page = virt_to_page(vdso_page);
+ flush_dcache_page(aarch32_vectors_page);
+ return 0;
+}
+
+static int aarch32_alloc_sigpage(void)
+{
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+ unsigned long sigpage;
+
+ sigpage = get_zeroed_page(GFP_ATOMIC);
+ if (!sigpage)
+ return -ENOMEM;
+
+ memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+ aarch32_sig_page = virt_to_page(sigpage);
+ flush_dcache_page(aarch32_sig_page);
+ return 0;
+}
+
+static int __aarch32_alloc_vdso_pages(void)
+{
+
+ if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
+ return 0;
+
+ vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
+ vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
+
+ return __vdso_init(VDSO_ABI_AA32);
+}
+
+static int __init aarch32_alloc_vdso_pages(void)
+{
+ int ret;
+
+ ret = __aarch32_alloc_vdso_pages();
+ if (ret)
+ return ret;
+
+ ret = aarch32_alloc_sigpage();
+ if (ret)
+ return ret;
+
+ return aarch32_alloc_kuser_vdso_page();
+}
+arch_initcall(aarch32_alloc_vdso_pages);
+
+static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
+{
+ void *ret;
+
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
+
+ /*
+ * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
+ * not safe to CoW the page containing the CPU exception vectors.
+ */
+ ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC,
+ &aarch32_vdso_maps[AA32_MAP_VECTORS]);
+
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+static int aarch32_sigreturn_setup(struct mm_struct *mm)
+{
+ unsigned long addr;
+ void *ret;
+
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = ERR_PTR(addr);
+ goto out;
+ }
+
+ /*
+ * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
+ * set breakpoints.
+ */
+ ret = _install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYWRITE | VM_MAYEXEC,
+ &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
+ if (IS_ERR(ret))
+ goto out;
+
+ mm->context.sigpage = (void *)addr;
+
+out:
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ ret = aarch32_kuser_helpers_setup(mm);
+ if (ret)
+ goto out;
+
+ if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
+ ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
+ uses_interp);
+ if (ret)
+ goto out;
+ }
+
+ ret = aarch32_sigreturn_setup(mm);
+out:
+ mmap_write_unlock(mm);
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+static int vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
+}
+
+enum aarch64_map {
+ AA64_MAP_VVAR,
+ AA64_MAP_VDSO,
+};
+
+static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
+ [AA64_MAP_VVAR] = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+ .mremap = vvar_mremap,
+ },
+ [AA64_MAP_VDSO] = {
+ .name = "[vdso]",
+ .mremap = vdso_mremap,
+ },
+};
+
+static int __init vdso_init(void)
+{
+ vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
+ vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
+
+ return __vdso_init(VDSO_ABI_AA64);
+}
+arch_initcall(vdso_init);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
+ mmap_write_unlock(mm);
+
+ return ret;
+}
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
new file mode 100644
index 000000000..652e31d82
--- /dev/null
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso.lds
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
new file mode 100644
index 000000000..d65f52264
--- /dev/null
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Building a vDSO image for AArch64.
+#
+# Author: Will Deacon <will.deacon@arm.com>
+# Heavily based on the vDSO Makefiles for other archs.
+#
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_AARCH64_JUMP_SLOT|R_AARCH64_GLOB_DAT|R_AARCH64_ABS64
+include $(srctree)/lib/vdso/Makefile
+
+obj-vdso := vgettimeofday.o note.o sigreturn.o
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
+
+# -Bsymbolic has been added for consistency with arm, the compat vDSO and
+# potential future proofing if we end up with internal calls to the exported
+# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
+# preparation in build-time C")).
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
+ -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id=sha1 -n \
+ $(btildflags-y) -T
+
+ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
+ccflags-y += -DDISABLE_BRANCH_PROFILING
+
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+KCOV_INSTRUMENT := n
+
+CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+
+obj-y += vdso.o
+targets += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency (incbin is bad)
+$(obj)/vdso.o : $(obj)/vdso.so
+
+# Link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
+
+# Strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+# Actual build commands
+quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
+
+# Install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
new file mode 100755
index 000000000..8b806eacd
--- /dev/null
+++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Author: Will Deacon <will.deacon@arm.com>
+#
+
+LC_ALL=C
+sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S
new file mode 100644
index 000000000..3d4e82290
--- /dev/null
+++ b/arch/arm64/kernel/vdso/note.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+#include <linux/build-salt.h>
+#include <asm/assembler.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
+
+BUILD_SALT
+
+emit_aarch64_feature_1_and
diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S
new file mode 100644
index 000000000..0e18729ab
--- /dev/null
+++ b/arch/arm64/kernel/vdso/sigreturn.S
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Sigreturn trampoline for returning from a signal when the SA_RESTORER
+ * flag is not set. It serves primarily as a hall of shame for crappy
+ * unwinders and features an exciting but mysterious NOP instruction.
+ *
+ * It's also fragile as hell, so please think twice before changing anything
+ * in here.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/unistd.h>
+
+ .text
+
+/*
+ * NOTE!!! You may notice that all of the .cfi directives in this file have
+ * been commented out. This is because they have been shown to trigger segfaults
+ * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread
+ * cleanup handlers during the thread cancellation dance. By omitting the
+ * directives, we trigger an arm64-specific fallback path in the unwinder which
+ * recognises the signal frame and restores many of the registers directly from
+ * the sigcontext. Re-enabling the cfi directives here therefore needs to be
+ * much more comprehensive to reduce the risk of further regressions.
+ */
+
+/* Ensure that the mysterious NOP can be associated with a function. */
+// .cfi_startproc
+
+/*
+ * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in
+ * the .eh_frame section to be annotated as a signal frame. This allows DWARF
+ * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify
+ * the next frame using the unmodified return address instead of subtracting 1,
+ * which may yield the wrong FDE.
+ */
+// .cfi_signal_frame
+
+/*
+ * Tell the unwinder where to locate the frame record linking back to the
+ * interrupted context. We don't provide unwind info for registers other than
+ * the frame pointer and the link register here; in practice, this is likely to
+ * be insufficient for unwinding in C/C++ based runtimes, especially without a
+ * means to restore the stack pointer. Thankfully, unwinders and debuggers
+ * already have baked-in strategies for attempting to unwind out of signals.
+ */
+// .cfi_def_cfa x29, 0
+// .cfi_offset x29, 0 * 8
+// .cfi_offset x30, 1 * 8
+
+/*
+ * This mysterious NOP is required for some unwinders (e.g. libc++) that
+ * unconditionally subtract one from the result of _Unwind_GetIP() in order to
+ * identify the calling function.
+ * Hack borrowed from arch/powerpc/kernel/vdso64/sigtramp.S.
+ */
+ nop // Mysterious NOP
+
+/*
+ * GDB, libgcc and libunwind rely on being able to identify the sigreturn
+ * instruction sequence to unwind from signal handlers. We cannot, therefore,
+ * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the
+ * unwinder. Thankfully, this function is only ever called from a RET and so
+ * omitting the landing pad is perfectly fine.
+ */
+SYM_CODE_START(__kernel_rt_sigreturn)
+// PLEASE DO NOT MODIFY
+ mov x8, #__NR_rt_sigreturn
+// PLEASE DO NOT MODIFY
+ svc #0
+// PLEASE DO NOT MODIFY
+// .cfi_endproc
+SYM_CODE_END(__kernel_rt_sigreturn)
+
+emit_aarch64_feature_1_and
diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso/vdso.S
new file mode 100644
index 000000000..c4b1990bf
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vdso.S
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+
+ .globl vdso_start, vdso_end
+ .section .rodata
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/arm64/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
+
+emit_aarch64_feature_1_and
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000..b840ab1b7
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GNU linker script for the VDSO library.
+*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ * Heavily based on the vDSO linker scripts for other archs.
+ */
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+
+SECTIONS
+{
+ PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+#ifdef CONFIG_TIME_NS
+ PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
+#endif
+ . = VDSO_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ /*
+ * Discard .note.gnu.property sections which are unused and have
+ * different alignment requirement from vDSO note sections.
+ */
+ /DISCARD/ : {
+ *(.note.GNU-stack .note.gnu.property)
+ }
+ .note : { *(.note.*) } :text :note
+
+ . = ALIGN(16);
+
+ .text : { *(.text*) } :text =0xd503201f
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ _end = .;
+ PROVIDE(end = .);
+
+ /DISCARD/ : {
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_2.6.39 {
+ global:
+ __kernel_rt_sigreturn;
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+ local: *;
+ };
+}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_sigtramp = __kernel_rt_sigreturn;
diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000..4236cf34d
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM64 userspace implementations of gettimeofday() and similar.
+ *
+ * Copyright (C) 2018 ARM Limited
+ *
+ */
+
+int __kernel_clock_gettime(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __kernel_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __kernel_clock_getres(clockid_t clock_id,
+ struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/arm64/kernel/vdso32/.gitignore b/arch/arm64/kernel/vdso32/.gitignore
new file mode 100644
index 000000000..3542fa24e
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso.lds
+vdso.so.raw
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
new file mode 100644
index 000000000..abad38c57
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -0,0 +1,205 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for vdso32
+#
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
+include $(srctree)/lib/vdso/Makefile
+
+# Same as cc-*option, but using CC_COMPAT instead of CC
+ifeq ($(CONFIG_CC_IS_CLANG), y)
+CC_COMPAT ?= $(CC)
+CC_COMPAT += --target=arm-linux-gnueabi
+else
+CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
+endif
+
+ifeq ($(CONFIG_LD_IS_LLD), y)
+LD_COMPAT ?= $(LD)
+else
+LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld
+endif
+
+cc32-option = $(call try-run,\
+ $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+cc32-disable-warning = $(call try-run,\
+ $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+cc32-as-instr = $(call try-run,\
+ printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+
+# We cannot use the global flags to compile the vDSO files, the main reason
+# being that the 32-bit compiler may be older than the main (64-bit) compiler
+# and therefore may not understand flags set using $(cc-option ...). Besides,
+# arch-specific options should be taken from the arm Makefile instead of the
+# arm64 one.
+# As a result we set our own flags here.
+
+# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
+VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc
+VDSO_CPPFLAGS += -isystem $(shell $(CC_COMPAT) -print-file-name=include 2>/dev/null)
+VDSO_CPPFLAGS += $(LINUXINCLUDE)
+
+# Common C and assembly flags
+# From top-level Makefile
+VDSO_CAFLAGS := $(VDSO_CPPFLAGS)
+VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
+ifdef CONFIG_DEBUG_INFO
+VDSO_CAFLAGS += -g
+endif
+
+# From arm Makefile
+VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm)
+VDSO_CAFLAGS += -mabi=aapcs-linux -mfloat-abi=soft
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+VDSO_CAFLAGS += -mbig-endian
+else
+VDSO_CAFLAGS += -mlittle-endian
+endif
+
+# From arm vDSO Makefile
+VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector
+VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING
+
+
+# Try to compile for ARMv8. If the compiler is too old and doesn't support it,
+# fall back to v7. There is no easy way to check for what architecture the code
+# is being compiled, so define a macro specifying that (see arch/arm/Makefile).
+VDSO_CAFLAGS += $(call cc32-option,-march=armv8-a -D__LINUX_ARM_ARCH__=8,\
+ -march=armv7-a -D__LINUX_ARM_ARCH__=7)
+
+VDSO_CFLAGS := $(VDSO_CAFLAGS)
+VDSO_CFLAGS += -DENABLE_COMPAT_VDSO=1
+# KBUILD_CFLAGS from top-level Makefile
+VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common \
+ -Werror-implicit-function-declaration \
+ -Wno-format-security \
+ -std=gnu89
+VDSO_CFLAGS += -O2
+# Some useful compiler-dependent flags from top-level Makefile
+VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,)
+VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+VDSO_CFLAGS += -fno-strict-overflow
+VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes)
+VDSO_CFLAGS += -Werror=date-time
+VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types)
+
+# The 32-bit compiler does not provide 128-bit integers, which are used in
+# some headers that are indirectly included from the vDSO code.
+# This hack makes the compiler happy and should trigger a warning/error if
+# variables of such type are referenced.
+VDSO_CFLAGS += -D__uint128_t='void*'
+# Silence some warnings coming from headers that operate on long's
+# (on GCC 4.8 or older, there is unfortunately no way to silence this warning)
+VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
+VDSO_CFLAGS += -Wno-int-to-pointer-cast
+
+# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is
+# unreliable.
+ifeq ($(CONFIG_THUMB2_COMPAT_VDSO), y)
+VDSO_CFLAGS += -mthumb -fomit-frame-pointer
+else
+VDSO_CFLAGS += -marm
+endif
+
+VDSO_AFLAGS := $(VDSO_CAFLAGS)
+VDSO_AFLAGS += -D__ASSEMBLY__
+
+# Check for binutils support for dmb ishld
+dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1)
+
+VDSO_CFLAGS += $(dmbinstr)
+VDSO_AFLAGS += $(dmbinstr)
+
+# From arm vDSO Makefile
+VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
+VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
+VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id=sha1
+
+
+# Borrow vdsomunge.c from the arm vDSO
+# We have to use a relative path because scripts/Makefile.host prefixes
+# $(hostprogs) with $(obj)
+munge := ../../../arm/vdso/vdsomunge
+hostprogs := $(munge)
+
+c-obj-vdso := note.o
+c-obj-vdso-gettimeofday := vgettimeofday.o
+
+ifneq ($(c-gettimeofday-y),)
+VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y)
+endif
+
+VDSO_CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+
+# Build rules
+targets := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) vdso.so vdso.so.dbg vdso.so.raw
+c-obj-vdso := $(addprefix $(obj)/, $(c-obj-vdso))
+c-obj-vdso-gettimeofday := $(addprefix $(obj)/, $(c-obj-vdso-gettimeofday))
+asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso))
+obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
+
+obj-y += vdso.o
+targets += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency (vdso.s includes vdso.so through incbin)
+$(obj)/vdso.o: $(obj)/vdso.so
+
+include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+# Strip rule for vdso.so
+$(obj)/vdso.so: OBJCOPYFLAGS := -S
+$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
+ $(call if_changed,vdsomunge)
+
+# Link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
+
+# Compilation rules for the vDSO sources
+$(c-obj-vdso): %.o: %.c FORCE
+ $(call if_changed_dep,vdsocc)
+$(c-obj-vdso-gettimeofday): %.o: %.c FORCE
+ $(call if_changed_dep,vdsocc_gettimeofday)
+$(asm-obj-vdso): %.o: %.S FORCE
+ $(call if_changed_dep,vdsoas)
+
+# Actual build commands
+quiet_cmd_vdsold_and_vdso_check = LD32 $@
+ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
+
+quiet_cmd_vdsold = LD32 $@
+ cmd_vdsold = $(LD_COMPAT) $(VDSO_LDFLAGS) \
+ -T $(filter %.lds,$^) $(filter %.o,$^) -o $@
+quiet_cmd_vdsocc = CC32 $@
+ cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
+quiet_cmd_vdsocc_gettimeofday = CC32 $@
+ cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
+quiet_cmd_vdsoas = AS32 $@
+ cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
+
+quiet_cmd_vdsomunge = MUNGE $@
+ cmd_vdsomunge = $(obj)/$(munge) $< $@
+
+# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO)
+gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+# The AArch64 nm should be able to read an AArch32 binary
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+# Install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL32 $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/arm64/kernel/vdso32/note.c b/arch/arm64/kernel/vdso32/note.c
new file mode 100644
index 000000000..eff5bf9ef
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/note.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2018 ARM Limited
+ *
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+#include <linux/build-salt.h>
+
+ELFNOTE32("Linux", 0, LINUX_VERSION_CODE);
+BUILD_SALT;
diff --git a/arch/arm64/kernel/vdso32/vdso.S b/arch/arm64/kernel/vdso32/vdso.S
new file mode 100644
index 000000000..e72ac7bc4
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vdso.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/page.h>
+
+ .globl vdso32_start, vdso32_end
+ .section .rodata
+ .balign PAGE_SIZE
+vdso32_start:
+ .incbin "arch/arm64/kernel/vdso32/vdso.so"
+ .balign PAGE_SIZE
+vdso32_end:
+
+ .previous
diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
new file mode 100644
index 000000000..3348ce5ea
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Adapted from arm64 version.
+ *
+ * GNU linker script for the VDSO library.
+ * Heavily based on the vDSO linker scripts for other archs.
+ *
+ * Copyright (C) 2012-2018 ARM Limited
+ */
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
+OUTPUT_ARCH(arm)
+
+SECTIONS
+{
+ PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+#ifdef CONFIG_TIME_NS
+ PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE);
+#endif
+ . = VDSO_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ .text : { *(.text*) } :text =0xe7f001f2
+
+ .got : { *(.got) }
+ .rel.plt : { *(.rel.plt) }
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+}
+
+VERSION
+{
+ LINUX_2.6 {
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ __vdso_clock_gettime64;
+ local: *;
+ };
+}
diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c
new file mode 100644
index 000000000..5acff29c5
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vgettimeofday.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM64 compat userspace implementations of gettimeofday() and similar.
+ *
+ * Copyright (C) 2018 ARM Limited
+ *
+ */
+
+int __vdso_clock_gettime(clockid_t clock,
+ struct old_timespec32 *ts)
+{
+ return __cvdso_clock_gettime32(clock, ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32(clock_id, res);
+}
+
+/* Avoid unresolved references emitted by GCC */
+
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..71f4b5f24
--- /dev/null
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ld script to make ARM Linux kernel
+ * taken from the i386 version by Russell King
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#define RO_EXCEPTION_TABLE_ALIGN 8
+#define RUNTIME_DISCARD_EXIT
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/hyp_image.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+#include "image.h"
+
+OUTPUT_ARCH(aarch64)
+ENTRY(_text)
+
+jiffies = jiffies_64;
+
+
+#ifdef CONFIG_KVM
+#define HYPERVISOR_EXTABLE \
+ . = ALIGN(SZ_8); \
+ __start___kvm_ex_table = .; \
+ *(__kvm_ex_table) \
+ __stop___kvm_ex_table = .;
+
+#define HYPERVISOR_PERCPU_SECTION \
+ . = ALIGN(PAGE_SIZE); \
+ HYP_SECTION_NAME(.data..percpu) : { \
+ *(HYP_SECTION_NAME(.data..percpu)) \
+ }
+#else /* CONFIG_KVM */
+#define HYPERVISOR_EXTABLE
+#define HYPERVISOR_PERCPU_SECTION
+#endif
+
+#define HYPERVISOR_TEXT \
+ /* \
+ * Align to 4 KB so that \
+ * a) the HYP vector table is at its minimum \
+ * alignment of 2048 bytes \
+ * b) the HYP init code will not cross a page \
+ * boundary if its size does not exceed \
+ * 4 KB (see related ASSERT() below) \
+ */ \
+ . = ALIGN(SZ_4K); \
+ __hyp_idmap_text_start = .; \
+ *(.hyp.idmap.text) \
+ __hyp_idmap_text_end = .; \
+ __hyp_text_start = .; \
+ *(.hyp.text) \
+ HYPERVISOR_EXTABLE \
+ __hyp_text_end = .;
+
+#define IDMAP_TEXT \
+ . = ALIGN(SZ_4K); \
+ __idmap_text_start = .; \
+ *(.idmap.text) \
+ __idmap_text_end = .;
+
+#ifdef CONFIG_HIBERNATION
+#define HIBERNATE_TEXT \
+ . = ALIGN(SZ_4K); \
+ __hibernate_exit_text_start = .; \
+ *(.hibernate_exit.text) \
+ __hibernate_exit_text_end = .;
+#else
+#define HIBERNATE_TEXT
+#endif
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_TEXT \
+ . = ALIGN(PAGE_SIZE); \
+ __entry_tramp_text_start = .; \
+ *(.entry.tramp.text) \
+ . = ALIGN(PAGE_SIZE); \
+ __entry_tramp_text_end = .;
+#else
+#define TRAMP_TEXT
+#endif
+
+/*
+ * The size of the PE/COFF section that covers the kernel image, which
+ * runs from _stext to _edata, must be a round multiple of the PE/COFF
+ * FileAlignment, which we set to its minimum value of 0x200. '_stext'
+ * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
+ * boundary should be sufficient.
+ */
+PECOFF_FILE_ALIGNMENT = 0x200;
+
+#ifdef CONFIG_EFI
+#define PECOFF_EDATA_PADDING \
+ .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
+#else
+#define PECOFF_EDATA_PADDING
+#endif
+
+SECTIONS
+{
+ /*
+ * XXX: The linker does not define how output sections are
+ * assigned to input sections when there are multiple statements
+ * matching the same input section name. There is no documented
+ * order of matching.
+ */
+ DISCARDS
+ /DISCARD/ : {
+ *(.interp .dynamic)
+ *(.dynsym .dynstr .hash .gnu.hash)
+ }
+
+ . = KIMAGE_VADDR;
+
+ .head.text : {
+ _text = .;
+ HEAD_TEXT
+ }
+ .text : { /* Real text segment */
+ _stext = .; /* Text and read-only data */
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ ENTRY_TEXT
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ HYPERVISOR_TEXT
+ IDMAP_TEXT
+ HIBERNATE_TEXT
+ TRAMP_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ . = ALIGN(16);
+ *(.got) /* Global offset table */
+ }
+
+ /*
+ * Make sure that the .got.plt is either completely empty or it
+ * contains only the lazy dispatch entries.
+ */
+ .got.plt : { *(.got.plt) }
+ ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
+ "Unexpected GOT/PLT entries detected!")
+
+ . = ALIGN(SEGMENT_ALIGN);
+ _etext = .; /* End of text section */
+
+ /* everything from this point to __init_begin will be marked RO NX */
+ RO_DATA(PAGE_SIZE)
+
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+ idmap_pg_end = .;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_pg_dir = .;
+ . += PAGE_SIZE;
+#endif
+
+ reserved_pg_dir = .;
+ . += PAGE_SIZE;
+
+ swapper_pg_dir = .;
+ . += PAGE_SIZE;
+
+ . = ALIGN(SEGMENT_ALIGN);
+ __init_begin = .;
+ __inittext_begin = .;
+
+ INIT_TEXT_SECTION(8)
+
+ __exittext_begin = .;
+ .exit.text : {
+ EXIT_TEXT
+ }
+ __exittext_end = .;
+
+ . = ALIGN(4);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ . = ALIGN(SEGMENT_ALIGN);
+ __inittext_end = .;
+ __initdata_begin = .;
+
+ .init.data : {
+ INIT_DATA
+ INIT_SETUP(16)
+ INIT_CALLS
+ CON_INITCALL
+ INIT_RAM_FS
+ *(.init.rodata.* .init.bss) /* from the EFI stub */
+ }
+ .exit.data : {
+ EXIT_DATA
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ HYPERVISOR_PERCPU_SECTION
+
+ .rela.dyn : ALIGN(8) {
+ *(.rela .rela*)
+ }
+
+ __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
+ __rela_size = SIZEOF(.rela.dyn);
+
+#ifdef CONFIG_RELR
+ .relr.dyn : ALIGN(8) {
+ *(.relr.dyn)
+ }
+
+ __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
+ __relr_size = SIZEOF(.relr.dyn);
+#endif
+
+ . = ALIGN(SEGMENT_ALIGN);
+ __initdata_end = .;
+ __init_end = .;
+
+ _data = .;
+ _sdata = .;
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
+
+ /*
+ * Data written with the MMU off but read with the MMU on requires
+ * cache lines to be invalidated, discarding up to a Cache Writeback
+ * Granule (CWG) of data from the cache. Keep the section that
+ * requires this type of maintenance to be in its own Cache Writeback
+ * Granule (CWG) area so the cache maintenance operations don't
+ * interfere with adjacent data.
+ */
+ .mmuoff.data.write : ALIGN(SZ_2K) {
+ __mmuoff_data_start = .;
+ *(.mmuoff.data.write)
+ }
+ . = ALIGN(SZ_2K);
+ .mmuoff.data.read : {
+ *(.mmuoff.data.read)
+ __mmuoff_data_end = .;
+ }
+
+ PECOFF_EDATA_PADDING
+ __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
+
+ . = ALIGN(PAGE_SIZE);
+ init_pg_dir = .;
+ . += INIT_DIR_SIZE;
+ init_pg_end = .;
+
+ . = ALIGN(SEGMENT_ALIGN);
+ __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+ HEAD_SYMBOLS
+
+ /*
+ * Sections that should stay zero sized, which is safer to
+ * explicitly check instead of blindly discarding.
+ */
+ .plt : {
+ *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
+ }
+ ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
+
+ .data.rel.ro : { *(.data.rel.ro) }
+ ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
+}
+
+#include "image-vars.h"
+
+/*
+ * The HYP init code and ID map text can't be longer than a page each,
+ * and should not cross a page boundary.
+ */
+ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+ "HYP init code too big or misaligned")
+ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+ "ID map text too big or misaligned")
+#ifdef CONFIG_HIBERNATION
+ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
+ <= SZ_4K, "Hibernate exit text too big or misaligned")
+#endif
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
+ "Entry trampoline text too big")
+#endif
+/*
+ * If padding is applied before .head.text, virt<->phys conversions will fail.
+ */
+ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")