From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- arch/arm64/kernel/.gitignore | 2 + arch/arm64/kernel/Makefile | 91 + arch/arm64/kernel/acpi.c | 519 ++++ arch/arm64/kernel/acpi_numa.c | 120 + arch/arm64/kernel/acpi_parking_protocol.c | 132 + arch/arm64/kernel/alternative.c | 300 +++ arch/arm64/kernel/armv8_deprecated.c | 627 +++++ arch/arm64/kernel/asm-offsets.c | 221 ++ arch/arm64/kernel/cacheinfo.c | 117 + arch/arm64/kernel/compat_alignment.c | 383 +++ arch/arm64/kernel/cpu-reset.S | 53 + arch/arm64/kernel/cpu_errata.c | 763 ++++++ arch/arm64/kernel/cpu_ops.c | 118 + arch/arm64/kernel/cpufeature.c | 3542 +++++++++++++++++++++++++ arch/arm64/kernel/cpuidle.c | 74 + arch/arm64/kernel/cpuinfo.c | 480 ++++ arch/arm64/kernel/crash_core.c | 40 + arch/arm64/kernel/crash_dump.c | 49 + arch/arm64/kernel/debug-monitors.c | 460 ++++ arch/arm64/kernel/efi-header.S | 181 ++ arch/arm64/kernel/efi-rt-wrapper.S | 87 + arch/arm64/kernel/efi.c | 223 ++ arch/arm64/kernel/elfcore.c | 138 + arch/arm64/kernel/entry-common.c | 932 +++++++ arch/arm64/kernel/entry-fpsimd.S | 134 + arch/arm64/kernel/entry-ftrace.S | 353 +++ arch/arm64/kernel/entry.S | 1099 ++++++++ arch/arm64/kernel/fpsimd.c | 2136 +++++++++++++++ arch/arm64/kernel/ftrace.c | 517 ++++ arch/arm64/kernel/head.S | 902 +++++++ arch/arm64/kernel/hibernate-asm.S | 95 + arch/arm64/kernel/hibernate.c | 477 ++++ arch/arm64/kernel/hw_breakpoint.c | 1023 +++++++ arch/arm64/kernel/hyp-stub.S | 258 ++ arch/arm64/kernel/idle.c | 45 + arch/arm64/kernel/idreg-override.c | 338 +++ arch/arm64/kernel/image-vars.h | 115 + arch/arm64/kernel/image.h | 67 + arch/arm64/kernel/io.c | 97 + arch/arm64/kernel/irq.c | 136 + arch/arm64/kernel/jump_label.c | 28 + arch/arm64/kernel/kaslr.c | 38 + arch/arm64/kernel/kexec_image.c | 138 + arch/arm64/kernel/kgdb.c | 358 +++ arch/arm64/kernel/kuser32.S | 65 + arch/arm64/kernel/machine_kexec.c | 344 +++ arch/arm64/kernel/machine_kexec_file.c | 191 ++ arch/arm64/kernel/module-plts.c | 368 +++ arch/arm64/kernel/module.c | 602 +++++ arch/arm64/kernel/mte.c | 606 +++++ arch/arm64/kernel/paravirt.c | 176 ++ arch/arm64/kernel/patch-scs.c | 262 ++ arch/arm64/kernel/patching.c | 167 ++ arch/arm64/kernel/pci.c | 233 ++ arch/arm64/kernel/perf_callchain.c | 178 ++ arch/arm64/kernel/perf_regs.c | 106 + arch/arm64/kernel/pi/Makefile | 34 + arch/arm64/kernel/pi/kaslr_early.c | 110 + arch/arm64/kernel/pointer_auth.c | 113 + arch/arm64/kernel/probes/Makefile | 6 + arch/arm64/kernel/probes/decode-insn.c | 168 ++ arch/arm64/kernel/probes/decode-insn.h | 33 + arch/arm64/kernel/probes/kprobes.c | 425 +++ arch/arm64/kernel/probes/kprobes_trampoline.S | 86 + arch/arm64/kernel/probes/simulate-insn.c | 202 ++ arch/arm64/kernel/probes/simulate-insn.h | 20 + arch/arm64/kernel/probes/uprobes.c | 208 ++ arch/arm64/kernel/process.c | 760 ++++++ arch/arm64/kernel/proton-pack.c | 1156 ++++++++ arch/arm64/kernel/psci.c | 124 + arch/arm64/kernel/ptrace.c | 2309 ++++++++++++++++ arch/arm64/kernel/reloc_test_core.c | 77 + arch/arm64/kernel/reloc_test_syms.S | 85 + arch/arm64/kernel/relocate_kernel.S | 100 + arch/arm64/kernel/return_address.c | 50 + arch/arm64/kernel/sdei.c | 267 ++ arch/arm64/kernel/setup.c | 459 ++++ arch/arm64/kernel/signal.c | 1379 ++++++++++ arch/arm64/kernel/signal32.c | 495 ++++ arch/arm64/kernel/sigreturn32.S | 47 + arch/arm64/kernel/sleep.S | 158 ++ arch/arm64/kernel/smccc-call.S | 131 + arch/arm64/kernel/smp.c | 1096 ++++++++ arch/arm64/kernel/smp_spin_table.c | 127 + arch/arm64/kernel/stacktrace.c | 242 ++ arch/arm64/kernel/suspend.c | 175 ++ arch/arm64/kernel/sys.c | 61 + arch/arm64/kernel/sys32.c | 135 + arch/arm64/kernel/sys_compat.c | 118 + arch/arm64/kernel/syscall.c | 164 ++ arch/arm64/kernel/time.c | 72 + arch/arm64/kernel/topology.c | 348 +++ arch/arm64/kernel/trace-events-emulation.h | 36 + arch/arm64/kernel/traps.c | 1192 +++++++++ arch/arm64/kernel/vdso-wrap.S | 24 + arch/arm64/kernel/vdso.c | 451 ++++ arch/arm64/kernel/vdso/.gitignore | 2 + arch/arm64/kernel/vdso/Makefile | 90 + arch/arm64/kernel/vdso/gen_vdso_offsets.sh | 16 + arch/arm64/kernel/vdso/note.S | 23 + arch/arm64/kernel/vdso/sigreturn.S | 80 + arch/arm64/kernel/vdso/vdso.lds.S | 112 + arch/arm64/kernel/vdso/vgettimeofday.c | 29 + arch/arm64/kernel/vdso32-wrap.S | 19 + arch/arm64/kernel/vdso32/.gitignore | 3 + arch/arm64/kernel/vdso32/Makefile | 184 ++ arch/arm64/kernel/vdso32/note.c | 15 + arch/arm64/kernel/vdso32/vdso.lds.S | 92 + arch/arm64/kernel/vdso32/vgettimeofday.c | 45 + arch/arm64/kernel/vmlinux.lds.S | 388 +++ arch/arm64/kernel/watchdog_hld.c | 36 + 111 files changed, 35681 insertions(+) create mode 100644 arch/arm64/kernel/.gitignore create mode 100644 arch/arm64/kernel/Makefile create mode 100644 arch/arm64/kernel/acpi.c create mode 100644 arch/arm64/kernel/acpi_numa.c create mode 100644 arch/arm64/kernel/acpi_parking_protocol.c create mode 100644 arch/arm64/kernel/alternative.c create mode 100644 arch/arm64/kernel/armv8_deprecated.c create mode 100644 arch/arm64/kernel/asm-offsets.c create mode 100644 arch/arm64/kernel/cacheinfo.c create mode 100644 arch/arm64/kernel/compat_alignment.c create mode 100644 arch/arm64/kernel/cpu-reset.S create mode 100644 arch/arm64/kernel/cpu_errata.c create mode 100644 arch/arm64/kernel/cpu_ops.c create mode 100644 arch/arm64/kernel/cpufeature.c create mode 100644 arch/arm64/kernel/cpuidle.c create mode 100644 arch/arm64/kernel/cpuinfo.c create mode 100644 arch/arm64/kernel/crash_core.c create mode 100644 arch/arm64/kernel/crash_dump.c create mode 100644 arch/arm64/kernel/debug-monitors.c create mode 100644 arch/arm64/kernel/efi-header.S create mode 100644 arch/arm64/kernel/efi-rt-wrapper.S create mode 100644 arch/arm64/kernel/efi.c create mode 100644 arch/arm64/kernel/elfcore.c create mode 100644 arch/arm64/kernel/entry-common.c create mode 100644 arch/arm64/kernel/entry-fpsimd.S create mode 100644 arch/arm64/kernel/entry-ftrace.S create mode 100644 arch/arm64/kernel/entry.S create mode 100644 arch/arm64/kernel/fpsimd.c create mode 100644 arch/arm64/kernel/ftrace.c create mode 100644 arch/arm64/kernel/head.S create mode 100644 arch/arm64/kernel/hibernate-asm.S create mode 100644 arch/arm64/kernel/hibernate.c create mode 100644 arch/arm64/kernel/hw_breakpoint.c create mode 100644 arch/arm64/kernel/hyp-stub.S create mode 100644 arch/arm64/kernel/idle.c create mode 100644 arch/arm64/kernel/idreg-override.c create mode 100644 arch/arm64/kernel/image-vars.h create mode 100644 arch/arm64/kernel/image.h create mode 100644 arch/arm64/kernel/io.c create mode 100644 arch/arm64/kernel/irq.c create mode 100644 arch/arm64/kernel/jump_label.c create mode 100644 arch/arm64/kernel/kaslr.c create mode 100644 arch/arm64/kernel/kexec_image.c create mode 100644 arch/arm64/kernel/kgdb.c create mode 100644 arch/arm64/kernel/kuser32.S create mode 100644 arch/arm64/kernel/machine_kexec.c create mode 100644 arch/arm64/kernel/machine_kexec_file.c create mode 100644 arch/arm64/kernel/module-plts.c create mode 100644 arch/arm64/kernel/module.c create mode 100644 arch/arm64/kernel/mte.c create mode 100644 arch/arm64/kernel/paravirt.c create mode 100644 arch/arm64/kernel/patch-scs.c create mode 100644 arch/arm64/kernel/patching.c create mode 100644 arch/arm64/kernel/pci.c create mode 100644 arch/arm64/kernel/perf_callchain.c create mode 100644 arch/arm64/kernel/perf_regs.c create mode 100644 arch/arm64/kernel/pi/Makefile create mode 100644 arch/arm64/kernel/pi/kaslr_early.c create mode 100644 arch/arm64/kernel/pointer_auth.c create mode 100644 arch/arm64/kernel/probes/Makefile create mode 100644 arch/arm64/kernel/probes/decode-insn.c create mode 100644 arch/arm64/kernel/probes/decode-insn.h create mode 100644 arch/arm64/kernel/probes/kprobes.c create mode 100644 arch/arm64/kernel/probes/kprobes_trampoline.S create mode 100644 arch/arm64/kernel/probes/simulate-insn.c create mode 100644 arch/arm64/kernel/probes/simulate-insn.h create mode 100644 arch/arm64/kernel/probes/uprobes.c create mode 100644 arch/arm64/kernel/process.c create mode 100644 arch/arm64/kernel/proton-pack.c create mode 100644 arch/arm64/kernel/psci.c create mode 100644 arch/arm64/kernel/ptrace.c create mode 100644 arch/arm64/kernel/reloc_test_core.c create mode 100644 arch/arm64/kernel/reloc_test_syms.S create mode 100644 arch/arm64/kernel/relocate_kernel.S create mode 100644 arch/arm64/kernel/return_address.c create mode 100644 arch/arm64/kernel/sdei.c create mode 100644 arch/arm64/kernel/setup.c create mode 100644 arch/arm64/kernel/signal.c create mode 100644 arch/arm64/kernel/signal32.c create mode 100644 arch/arm64/kernel/sigreturn32.S create mode 100644 arch/arm64/kernel/sleep.S create mode 100644 arch/arm64/kernel/smccc-call.S create mode 100644 arch/arm64/kernel/smp.c create mode 100644 arch/arm64/kernel/smp_spin_table.c create mode 100644 arch/arm64/kernel/stacktrace.c create mode 100644 arch/arm64/kernel/suspend.c create mode 100644 arch/arm64/kernel/sys.c create mode 100644 arch/arm64/kernel/sys32.c create mode 100644 arch/arm64/kernel/sys_compat.c create mode 100644 arch/arm64/kernel/syscall.c create mode 100644 arch/arm64/kernel/time.c create mode 100644 arch/arm64/kernel/topology.c create mode 100644 arch/arm64/kernel/trace-events-emulation.h create mode 100644 arch/arm64/kernel/traps.c create mode 100644 arch/arm64/kernel/vdso-wrap.S create mode 100644 arch/arm64/kernel/vdso.c create mode 100644 arch/arm64/kernel/vdso/.gitignore create mode 100644 arch/arm64/kernel/vdso/Makefile create mode 100755 arch/arm64/kernel/vdso/gen_vdso_offsets.sh create mode 100644 arch/arm64/kernel/vdso/note.S create mode 100644 arch/arm64/kernel/vdso/sigreturn.S create mode 100644 arch/arm64/kernel/vdso/vdso.lds.S create mode 100644 arch/arm64/kernel/vdso/vgettimeofday.c create mode 100644 arch/arm64/kernel/vdso32-wrap.S create mode 100644 arch/arm64/kernel/vdso32/.gitignore create mode 100644 arch/arm64/kernel/vdso32/Makefile create mode 100644 arch/arm64/kernel/vdso32/note.c create mode 100644 arch/arm64/kernel/vdso32/vdso.lds.S create mode 100644 arch/arm64/kernel/vdso32/vgettimeofday.c create mode 100644 arch/arm64/kernel/vmlinux.lds.S create mode 100644 arch/arm64/kernel/watchdog_hld.c (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/.gitignore b/arch/arm64/kernel/.gitignore new file mode 100644 index 0000000000..bbb90f92d0 --- /dev/null +++ b/arch/arm64/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +vmlinux.lds diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile new file mode 100644 index 0000000000..d95b3d6b47 --- /dev/null +++ b/arch/arm64/kernel/Makefile @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +CFLAGS_armv8_deprecated.o := -I$(src) + +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) + +# Remove stack protector to avoid triggering unneeded stack canary +# checks due to randomize_kstack_offset. +CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong +CFLAGS_syscall.o += -fno-stack-protector + +# When KASAN is enabled, a stack trace is recorded for every alloc/free, which +# can significantly impact performance. Avoid instrumenting the stack trace +# collection code to minimize this impact. +KASAN_SANITIZE_stacktrace.o := n + +# It's not safe to invoke KCOV when portions of the kernel environment aren't +# available or are out-of-sync with HW state. Since `noinstr` doesn't always +# inhibit KCOV instrumentation, disable it for the entire compilation unit. +KCOV_INSTRUMENT_entry-common.o := n +KCOV_INSTRUMENT_idle.o := n + +# Object file lists. +obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ + entry-common.o entry-fpsimd.o process.o ptrace.o \ + setup.o signal.o sys.o stacktrace.o time.o traps.o \ + io.o vdso.o hyp-stub.o psci.o cpu_ops.o \ + return_address.o cpuinfo.o cpu_errata.o \ + cpufeature.o alternative.o cacheinfo.o \ + smp.o smp_spin_table.o topology.o smccc-call.o \ + syscall.o proton-pack.o idreg-override.o idle.o \ + patching.o + +obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ + sys_compat.o +obj-$(CONFIG_COMPAT) += sigreturn32.o +obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o +obj-$(CONFIG_KUSER_HELPERS) += kuser32.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o +obj-$(CONFIG_MODULES) += module.o module-plts.o +obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o +obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o +obj-$(CONFIG_CPU_PM) += sleep.o suspend.o +obj-$(CONFIG_CPU_IDLE) += cpuidle.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o +obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o +obj-$(CONFIG_PARAVIRT) += paravirt.o +obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o pi/ +obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o +obj-$(CONFIG_ELF_CORE) += elfcore.o +obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ + cpu-reset.o +obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o +obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o +arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_CRASH_CORE) += crash_core.o +obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o +obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_ARM64_MTE) += mte.o +obj-y += vdso-wrap.o +obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o +obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o +CFLAGS_patch-scs.o += -mbranch-protection=none + +# Force dependency (vdso*-wrap.S includes vdso.so through incbin) +$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so +$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so + +obj-y += probes/ +obj-y += head.o +extra-y += vmlinux.lds + +ifeq ($(CONFIG_DEBUG_EFI),y) +AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\"" +endif + +# for cleaning +subdir- += vdso vdso32 diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c new file mode 100644 index 0000000000..dba8fcec7f --- /dev/null +++ b/arch/arm64/kernel/acpi.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM64 Specific Low-Level ACPI Boot Support + * + * Copyright (C) 2013-2014, Linaro Ltd. + * Author: Al Stone + * Author: Graeme Gregory + * Author: Hanjun Guo + * Author: Tomasz Nowicki + * Author: Naresh Bhat + */ + +#define pr_fmt(fmt) "ACPI: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +int acpi_noirq = 1; /* skip ACPI IRQ initialization */ +int acpi_disabled = 1; +EXPORT_SYMBOL(acpi_disabled); + +int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ +EXPORT_SYMBOL(acpi_pci_disabled); + +static bool param_acpi_off __initdata; +static bool param_acpi_on __initdata; +static bool param_acpi_force __initdata; + +static int __init parse_acpi(char *arg) +{ + if (!arg) + return -EINVAL; + + /* "acpi=off" disables both ACPI table parsing and interpreter */ + if (strcmp(arg, "off") == 0) + param_acpi_off = true; + else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */ + param_acpi_on = true; + else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */ + param_acpi_force = true; + else + return -EINVAL; /* Core will print when we return error */ + + return 0; +} +early_param("acpi", parse_acpi); + +static bool __init dt_is_stub(void) +{ + int node; + + fdt_for_each_subnode(node, initial_boot_params, 0) { + const char *name = fdt_get_name(initial_boot_params, node, NULL); + if (strcmp(name, "chosen") == 0) + continue; + if (strcmp(name, "hypervisor") == 0 && + of_flat_dt_is_compatible(node, "xen,xen")) + continue; + + return false; + } + + return true; +} + +/* + * __acpi_map_table() will be called before page_init(), so early_ioremap() + * or early_memremap() should be called here to for ACPI table mapping. + */ +void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) +{ + if (!size) + return NULL; + + return early_memremap(phys, size); +} + +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) +{ + if (!map || !size) + return; + + early_memunmap(map, size); +} + +bool __init acpi_psci_present(void) +{ + return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT; +} + +/* Whether HVC must be used instead of SMC as the PSCI conduit */ +bool acpi_psci_use_hvc(void) +{ + return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC; +} + +/* + * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity + * checks on it + * + * Return 0 on success, <0 on failure + */ +static int __init acpi_fadt_sanity_check(void) +{ + struct acpi_table_header *table; + struct acpi_table_fadt *fadt; + acpi_status status; + int ret = 0; + + /* + * FADT is required on arm64; retrieve it to check its presence + * and carry out revision and ACPI HW reduced compliancy tests + */ + status = acpi_get_table(ACPI_SIG_FADT, 0, &table); + if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + + pr_err("Failed to get FADT table, %s\n", msg); + return -ENODEV; + } + + fadt = (struct acpi_table_fadt *)table; + + /* + * Revision in table header is the FADT Major revision, and there + * is a minor revision of FADT which was introduced by ACPI 5.1, + * we only deal with ACPI 5.1 or newer revision to get GIC and SMP + * boot protocol configuration data. + */ + if (table->revision < 5 || + (table->revision == 5 && fadt->minor_revision < 1)) { + pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n", + table->revision, fadt->minor_revision); + + if (!fadt->arm_boot_flags) { + ret = -EINVAL; + goto out; + } + pr_err("FADT has ARM boot flags set, assuming 5.1\n"); + } + + if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) { + pr_err("FADT not ACPI hardware reduced compliant\n"); + ret = -EINVAL; + } + +out: + /* + * acpi_get_table() creates FADT table mapping that + * should be released after parsing and before resuming boot + */ + acpi_put_table(table); + return ret; +} + +/* + * acpi_boot_table_init() called from setup_arch(), always. + * 1. find RSDP and get its address, and then find XSDT + * 2. extract all tables and checksums them all + * 3. check ACPI FADT revision + * 4. check ACPI FADT HW reduced flag + * + * We can parse ACPI boot-time tables such as MADT after + * this function is called. + * + * On return ACPI is enabled if either: + * + * - ACPI tables are initialized and sanity checks passed + * - acpi=force was passed in the command line and ACPI was not disabled + * explicitly through acpi=off command line parameter + * + * ACPI is disabled on function return otherwise + */ +void __init acpi_boot_table_init(void) +{ + /* + * Enable ACPI instead of device tree unless + * - ACPI has been disabled explicitly (acpi=off), or + * - the device tree is not empty (it has more than just a /chosen node, + * and a /hypervisor node when running on Xen) + * and ACPI has not been [force] enabled (acpi=on|force) + */ + if (param_acpi_off || + (!param_acpi_on && !param_acpi_force && !dt_is_stub())) + goto done; + + /* + * ACPI is disabled at this point. Enable it in order to parse + * the ACPI tables and carry out sanity checks + */ + enable_acpi(); + + /* + * If ACPI tables are initialized and FADT sanity checks passed, + * leave ACPI enabled and carry on booting; otherwise disable ACPI + * on initialization error. + * If acpi=force was passed on the command line it forces ACPI + * to be enabled even if its initialization failed. + */ + if (acpi_table_init() || acpi_fadt_sanity_check()) { + pr_err("Failed to init ACPI tables\n"); + if (!param_acpi_force) + disable_acpi(); + } + +done: + if (acpi_disabled) { + if (earlycon_acpi_spcr_enable) + early_init_dt_scan_chosen_stdout(); + } else { + acpi_parse_spcr(earlycon_acpi_spcr_enable, true); + if (IS_ENABLED(CONFIG_ACPI_BGRT)) + acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); + } +} + +static pgprot_t __acpi_get_writethrough_mem_attribute(void) +{ + /* + * Although UEFI specifies the use of Normal Write-through for + * EFI_MEMORY_WT, it is seldom used in practice and not implemented + * by most (all?) CPUs. Rather than allocate a MAIR just for this + * purpose, emit a warning and use Normal Non-cacheable instead. + */ + pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n"); + return __pgprot(PROT_NORMAL_NC); +} + +pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) +{ + /* + * According to "Table 8 Map: EFI memory types to AArch64 memory + * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is + * mapped to a corresponding MAIR attribute encoding. + * The EFI memory attribute advises all possible capabilities + * of a memory region. + */ + + u64 attr; + + attr = efi_mem_attributes(addr); + if (attr & EFI_MEMORY_WB) + return PAGE_KERNEL; + if (attr & EFI_MEMORY_WC) + return __pgprot(PROT_NORMAL_NC); + if (attr & EFI_MEMORY_WT) + return __acpi_get_writethrough_mem_attribute(); + return __pgprot(PROT_DEVICE_nGnRnE); +} + +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +{ + efi_memory_desc_t *md, *region = NULL; + pgprot_t prot; + + if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP))) + return NULL; + + for_each_efi_memory_desc(md) { + u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); + + if (phys < md->phys_addr || phys >= end) + continue; + + if (phys + size > end) { + pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n"); + return NULL; + } + region = md; + break; + } + + /* + * It is fine for AML to remap regions that are not represented in the + * EFI memory map at all, as it only describes normal memory, and MMIO + * regions that require a virtual mapping to make them accessible to + * the EFI runtime services. + */ + prot = __pgprot(PROT_DEVICE_nGnRnE); + if (region) { + switch (region->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: + if (memblock_is_map_memory(phys) || + !memblock_is_region_memory(phys, size)) { + pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys); + return NULL; + } + /* + * Mapping kernel memory is permitted if the region in + * question is covered by a single memblock with the + * NOMAP attribute set: this enables the use of ACPI + * table overrides passed via initramfs, which are + * reserved in memory using arch_reserve_mem_area() + * below. As this particular use case only requires + * read access, fall through to the R/O mapping case. + */ + fallthrough; + + case EFI_RUNTIME_SERVICES_CODE: + /* + * This would be unusual, but not problematic per se, + * as long as we take care not to create a writable + * mapping for executable code. + */ + prot = PAGE_KERNEL_RO; + break; + + case EFI_ACPI_RECLAIM_MEMORY: + /* + * ACPI reclaim memory is used to pass firmware tables + * and other data that is intended for consumption by + * the OS only, which may decide it wants to reclaim + * that memory and use it for something else. We never + * do that, but we usually add it to the linear map + * anyway, in which case we should use the existing + * mapping. + */ + if (memblock_is_map_memory(phys)) + return (void __iomem *)__phys_to_virt(phys); + fallthrough; + + default: + if (region->attribute & EFI_MEMORY_WB) + prot = PAGE_KERNEL; + else if (region->attribute & EFI_MEMORY_WC) + prot = __pgprot(PROT_NORMAL_NC); + else if (region->attribute & EFI_MEMORY_WT) + prot = __acpi_get_writethrough_mem_attribute(); + } + } + return ioremap_prot(phys, size, pgprot_val(prot)); +} + +/* + * Claim Synchronous External Aborts as a firmware first notification. + * + * Used by KVM and the arch do_sea handler. + * @regs may be NULL when called from process context. + */ +int apei_claim_sea(struct pt_regs *regs) +{ + int err = -ENOENT; + bool return_to_irqs_enabled; + unsigned long current_flags; + + if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) + return err; + + current_flags = local_daif_save_flags(); + + /* current_flags isn't useful here as daif doesn't tell us about pNMI */ + return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags()); + + if (regs) + return_to_irqs_enabled = interrupts_enabled(regs); + + /* + * SEA can interrupt SError, mask it and describe this as an NMI so + * that APEI defers the handling. + */ + local_daif_restore(DAIF_ERRCTX); + nmi_enter(); + err = ghes_notify_sea(); + nmi_exit(); + + /* + * APEI NMI-like notifications are deferred to irq_work. Unless + * we interrupted irqs-masked code, we can do that now. + */ + if (!err) { + if (return_to_irqs_enabled) { + local_daif_restore(DAIF_PROCCTX_NOIRQ); + __irq_enter(); + irq_work_run(); + __irq_exit(); + } else { + pr_warn_ratelimited("APEI work queued but not completed"); + err = -EINPROGRESS; + } + } + + local_daif_restore(current_flags); + + return err; +} + +void arch_reserve_mem_area(acpi_physical_address addr, size_t size) +{ + memblock_mark_nomap(addr, size); +} + +#ifdef CONFIG_ACPI_FFH +/* + * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as + * specified in https://developer.arm.com/docs/den0048/latest + */ +struct acpi_ffh_data { + struct acpi_ffh_info info; + void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *args, + struct arm_smccc_quirk *res); + void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); +}; + +int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt) +{ + enum arm_smccc_conduit conduit; + struct acpi_ffh_data *ffh_ctxt; + + if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) + return -EOPNOTSUPP; + + conduit = arm_smccc_1_1_get_conduit(); + if (conduit == SMCCC_CONDUIT_NONE) { + pr_err("%s: invalid SMCCC conduit\n", __func__); + return -EOPNOTSUPP; + } + + ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); + if (!ffh_ctxt) + return -ENOMEM; + + if (conduit == SMCCC_CONDUIT_SMC) { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc; + } else { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc; + } + + memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info)); + + *region_ctxt = ffh_ctxt; + return AE_OK; +} + +static bool acpi_ffh_smccc_owner_allowed(u32 fid) +{ + int owner = ARM_SMCCC_OWNER_NUM(fid); + + if (owner == ARM_SMCCC_OWNER_STANDARD || + owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM) + return true; + + return false; +} + +int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context) +{ + int ret = 0; + struct acpi_ffh_data *ffh_ctxt = region_context; + + if (ffh_ctxt->info.offset == 0) { + /* SMC/HVC 32bit call */ + struct arm_smccc_res res; + u32 a[8] = { 0 }, *ptr = (u32 *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) || + !acpi_ffh_smccc_owner_allowed(*ptr) || + ffh_ctxt->info.length > 32) { + ret = AE_ERROR; + } else { + int idx, len = ffh_ctxt->info.length >> 2; + + for (idx = 0; idx < len; idx++) + a[idx] = *(ptr + idx); + + ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4], + a[5], a[6], a[7], &res, NULL); + memcpy(value, &res, sizeof(res)); + } + + } else if (ffh_ctxt->info.offset == 1) { + /* SMC/HVC 64bit call */ + struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) || + !acpi_ffh_smccc_owner_allowed(r->a0) || + ffh_ctxt->info.length > sizeof(*r)) { + ret = AE_ERROR; + } else { + ffh_ctxt->invoke_ffh64_fn(r, r); + memcpy(value, r, ffh_ctxt->info.length); + } + } else { + ret = AE_ERROR; + } + + return ret; +} +#endif /* CONFIG_ACPI_FFH */ diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c new file mode 100644 index 0000000000..e51535a5f9 --- /dev/null +++ b/arch/arm64/kernel/acpi_numa.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI 5.1 based NUMA setup for ARM64 + * Lots of code was borrowed from arch/x86/mm/srat.c + * + * Copyright 2004 Andi Kleen, SuSE Labs. + * Copyright (C) 2013-2016, Linaro Ltd. + * Author: Hanjun Guo + * + * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs. + * + * Called from acpi_numa_init while reading the SRAT and SLIT tables. + * Assumes all memory regions belonging to a single proximity domain + * are in one chunk. Holes between them will be included in the node. + */ + +#define pr_fmt(fmt) "ACPI: NUMA: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; + +int __init acpi_numa_get_nid(unsigned int cpu) +{ + return acpi_early_node_map[cpu]; +} + +static inline int get_cpu_for_acpi_id(u32 uid) +{ + int cpu; + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + if (uid == get_acpi_id_for_cpu(cpu)) + return cpu; + + return -EINVAL; +} + +static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_srat_gicc_affinity *pa; + int cpu, pxm, node; + + if (srat_disabled()) + return -EINVAL; + + pa = (struct acpi_srat_gicc_affinity *)header; + if (!pa) + return -EINVAL; + + if (!(pa->flags & ACPI_SRAT_GICC_ENABLED)) + return 0; + + pxm = pa->proximity_domain; + node = pxm_to_node(pxm); + + /* + * If we can't map the UID to a logical cpu this + * means that the UID is not part of possible cpus + * so we do not need a NUMA mapping for it, skip + * the SRAT entry and keep parsing. + */ + cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid); + if (cpu < 0) + return 0; + + acpi_early_node_map[cpu] = node; + pr_info("SRAT: PXM %d -> MPIDR 0x%llx -> Node %d\n", pxm, + cpu_logical_map(cpu), node); + + return 0; +} + +void __init acpi_map_cpus_to_nodes(void) +{ + acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GICC_AFFINITY, + acpi_parse_gicc_pxm, 0); +} + +/* Callback for Proximity Domain -> ACPI processor UID mapping */ +void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) +{ + int pxm, node; + + if (srat_disabled()) + return; + + if (pa->header.length < sizeof(struct acpi_srat_gicc_affinity)) { + pr_err("SRAT: Invalid SRAT header length: %d\n", + pa->header.length); + bad_srat(); + return; + } + + if (!(pa->flags & ACPI_SRAT_GICC_ENABLED)) + return; + + pxm = pa->proximity_domain; + node = acpi_map_pxm_to_node(pxm); + + if (node == NUMA_NO_NODE) { + pr_err("SRAT: Too many proximity domains %d\n", pxm); + bad_srat(); + return; + } + + node_set(node, numa_nodes_parsed); +} + diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c new file mode 100644 index 0000000000..b1990e38ae --- /dev/null +++ b/arch/arm64/kernel/acpi_parking_protocol.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM64 ACPI Parking Protocol implementation + * + * Authors: Lorenzo Pieralisi + * Mark Salter + */ +#include +#include +#include + +#include + +struct parking_protocol_mailbox { + __le32 cpu_id; + __le32 reserved; + __le64 entry_point; +}; + +struct cpu_mailbox_entry { + struct parking_protocol_mailbox __iomem *mailbox; + phys_addr_t mailbox_addr; + u8 version; + u8 gic_cpu_id; +}; + +static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS]; + +void __init acpi_set_mailbox_entry(int cpu, + struct acpi_madt_generic_interrupt *p) +{ + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + + cpu_entry->mailbox_addr = p->parked_address; + cpu_entry->version = p->parking_version; + cpu_entry->gic_cpu_id = p->cpu_interface_number; +} + +bool acpi_parking_protocol_valid(int cpu) +{ + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + + return cpu_entry->mailbox_addr && cpu_entry->version; +} + +static int acpi_parking_protocol_cpu_init(unsigned int cpu) +{ + pr_debug("%s: ACPI parked addr=%llx\n", __func__, + cpu_mailbox_entries[cpu].mailbox_addr); + + return 0; +} + +static int acpi_parking_protocol_cpu_prepare(unsigned int cpu) +{ + return 0; +} + +static int acpi_parking_protocol_cpu_boot(unsigned int cpu) +{ + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + struct parking_protocol_mailbox __iomem *mailbox; + u32 cpu_id; + + /* + * Map mailbox memory with attribute device nGnRE (ie ioremap - + * this deviates from the parking protocol specifications since + * the mailboxes are required to be mapped nGnRnE; the attribute + * discrepancy is harmless insofar as the protocol specification + * is concerned). + * If the mailbox is mistakenly allocated in the linear mapping + * by FW ioremap will fail since the mapping will be prevented + * by the kernel (it clashes with the linear mapping attributes + * specifications). + */ + mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox)); + if (!mailbox) + return -EIO; + + cpu_id = readl_relaxed(&mailbox->cpu_id); + /* + * Check if firmware has set-up the mailbox entry properly + * before kickstarting the respective cpu. + */ + if (cpu_id != ~0U) { + iounmap(mailbox); + return -ENXIO; + } + + /* + * stash the mailbox address mapping to use it for further FW + * checks in the postboot method + */ + cpu_entry->mailbox = mailbox; + + /* + * We write the entry point and cpu id as LE regardless of the + * native endianness of the kernel. Therefore, any boot-loaders + * that read this address need to convert this address to the + * Boot-Loader's endianness before jumping. + */ + writeq_relaxed(__pa_symbol(secondary_entry), + &mailbox->entry_point); + writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); + + arch_send_wakeup_ipi_mask(cpumask_of(cpu)); + + return 0; +} + +static void acpi_parking_protocol_cpu_postboot(void) +{ + int cpu = smp_processor_id(); + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + struct parking_protocol_mailbox __iomem *mailbox = cpu_entry->mailbox; + u64 entry_point; + + entry_point = readq_relaxed(&mailbox->entry_point); + /* + * Check if firmware has cleared the entry_point as expected + * by the protocol specification. + */ + WARN_ON(entry_point); +} + +const struct cpu_operations acpi_parking_protocol_ops = { + .name = "parking-protocol", + .cpu_init = acpi_parking_protocol_cpu_init, + .cpu_prepare = acpi_parking_protocol_cpu_prepare, + .cpu_boot = acpi_parking_protocol_cpu_boot, + .cpu_postboot = acpi_parking_protocol_cpu_postboot +}; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c new file mode 100644 index 0000000000..8ff6610af4 --- /dev/null +++ b/arch/arm64/kernel/alternative.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * alternative runtime patching + * inspired by the x86 version + * + * Copyright (C) 2014 ARM Ltd. + */ + +#define pr_fmt(fmt) "alternatives: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f) +#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) +#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) + +#define ALT_CAP(a) ((a)->cpucap & ~ARM64_CB_BIT) +#define ALT_HAS_CB(a) ((a)->cpucap & ARM64_CB_BIT) + +/* Volatile, as we may be patching the guts of READ_ONCE() */ +static volatile int all_alternatives_applied; + +static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); + +struct alt_region { + struct alt_instr *begin; + struct alt_instr *end; +}; + +bool alternative_is_applied(u16 cpucap) +{ + if (WARN_ON(cpucap >= ARM64_NCAPS)) + return false; + + return test_bit(cpucap, applied_alternatives); +} + +/* + * Check if the target PC is within an alternative block. + */ +static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) +{ + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); +} + +#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) + +static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) +{ + u32 insn; + + insn = le32_to_cpu(*altinsnptr); + + if (aarch64_insn_is_branch_imm(insn)) { + s32 offset = aarch64_get_branch_offset(insn); + unsigned long target; + + target = (unsigned long)altinsnptr + offset; + + /* + * If we're branching inside the alternate sequence, + * do not rewrite the instruction, as it is already + * correct. Otherwise, generate the new instruction. + */ + if (branch_insn_requires_update(alt, target)) { + offset = target - (unsigned long)insnptr; + insn = aarch64_set_branch_offset(insn, offset); + } + } else if (aarch64_insn_is_adrp(insn)) { + s32 orig_offset, new_offset; + unsigned long target; + + /* + * If we're replacing an adrp instruction, which uses PC-relative + * immediate addressing, adjust the offset to reflect the new + * PC. adrp operates on 4K aligned addresses. + */ + orig_offset = aarch64_insn_adrp_get_offset(insn); + target = align_down(altinsnptr, SZ_4K) + orig_offset; + new_offset = target - align_down(insnptr, SZ_4K); + insn = aarch64_insn_adrp_set_offset(insn, new_offset); + } else if (aarch64_insn_uses_literal(insn)) { + /* + * Disallow patching unhandled instructions using PC relative + * literal addresses + */ + BUG(); + } + + return insn; +} + +static noinstr void patch_alternative(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + __le32 *replptr; + int i; + + replptr = ALT_REPL_PTR(alt); + for (i = 0; i < nr_inst; i++) { + u32 insn; + + insn = get_alt_insn(alt, origptr + i, replptr + i); + updptr[i] = cpu_to_le32(insn); + } +} + +/* + * We provide our own, private D-cache cleaning function so that we don't + * accidentally call into the cache.S code, which is patched by us at + * runtime. + */ +static noinstr void clean_dcache_range_nopatch(u64 start, u64 end) +{ + u64 cur, d_size, ctr_el0; + + ctr_el0 = arm64_ftr_reg_ctrel0.sys_val; + d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0, + CTR_EL0_DminLine_SHIFT); + cur = start & ~(d_size - 1); + do { + /* + * We must clean+invalidate to the PoC in order to avoid + * Cortex-A53 errata 826319, 827319, 824069 and 819472 + * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE) + */ + asm volatile("dc civac, %0" : : "r" (cur) : "memory"); + } while (cur += d_size, cur < end); +} + +static void __apply_alternatives(const struct alt_region *region, + bool is_module, + unsigned long *cpucap_mask) +{ + struct alt_instr *alt; + __le32 *origptr, *updptr; + alternative_cb_t alt_cb; + + for (alt = region->begin; alt < region->end; alt++) { + int nr_inst; + int cap = ALT_CAP(alt); + + if (!test_bit(cap, cpucap_mask)) + continue; + + if (!cpus_have_cap(cap)) + continue; + + if (ALT_HAS_CB(alt)) + BUG_ON(alt->alt_len != 0); + else + BUG_ON(alt->alt_len != alt->orig_len); + + origptr = ALT_ORIG_PTR(alt); + updptr = is_module ? origptr : lm_alias(origptr); + nr_inst = alt->orig_len / AARCH64_INSN_SIZE; + + if (ALT_HAS_CB(alt)) + alt_cb = ALT_REPL_PTR(alt); + else + alt_cb = patch_alternative; + + alt_cb(alt, origptr, updptr, nr_inst); + + if (!is_module) { + clean_dcache_range_nopatch((u64)origptr, + (u64)(origptr + nr_inst)); + } + } + + /* + * The core module code takes care of cache maintenance in + * flush_module_icache(). + */ + if (!is_module) { + dsb(ish); + icache_inval_all_pou(); + isb(); + + bitmap_or(applied_alternatives, applied_alternatives, + cpucap_mask, ARM64_NCAPS); + bitmap_and(applied_alternatives, applied_alternatives, + system_cpucaps, ARM64_NCAPS); + } +} + +static void __init apply_alternatives_vdso(void) +{ + struct alt_region region; + const struct elf64_hdr *hdr; + const struct elf64_shdr *shdr; + const struct elf64_shdr *alt; + DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); + + bitmap_fill(all_capabilities, ARM64_NCAPS); + + hdr = (struct elf64_hdr *)vdso_start; + shdr = (void *)hdr + hdr->e_shoff; + alt = find_section(hdr, shdr, ".altinstructions"); + if (!alt) + return; + + region = (struct alt_region){ + .begin = (void *)hdr + alt->sh_offset, + .end = (void *)hdr + alt->sh_offset + alt->sh_size, + }; + + __apply_alternatives(®ion, false, &all_capabilities[0]); +} + +static const struct alt_region kernel_alternatives __initconst = { + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, +}; + +/* + * We might be patching the stop_machine state machine, so implement a + * really simple polling protocol here. + */ +static int __init __apply_alternatives_multi_stop(void *unused) +{ + /* We always have a CPU 0 at this point (__init) */ + if (smp_processor_id()) { + while (!all_alternatives_applied) + cpu_relax(); + isb(); + } else { + DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS); + + bitmap_complement(remaining_capabilities, boot_cpucaps, + ARM64_NCAPS); + + BUG_ON(all_alternatives_applied); + __apply_alternatives(&kernel_alternatives, false, + remaining_capabilities); + /* Barriers provided by the cache flushing */ + all_alternatives_applied = 1; + } + + return 0; +} + +void __init apply_alternatives_all(void) +{ + pr_info("applying system-wide alternatives\n"); + + apply_alternatives_vdso(); + /* better not try code patching on a live SMP system */ + stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); +} + +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + pr_info("applying boot alternatives\n"); + + __apply_alternatives(&kernel_alternatives, false, + &boot_cpucaps[0]); +} + +#ifdef CONFIG_MODULES +void apply_alternatives_module(void *start, size_t length) +{ + struct alt_region region = { + .begin = start, + .end = start + length, + }; + DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); + + bitmap_fill(all_capabilities, ARM64_NCAPS); + + __apply_alternatives(®ion, true, &all_capabilities[0]); +} +#endif + +noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + for (int i = 0; i < nr_inst; i++) + updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); +} +EXPORT_SYMBOL(alt_cb_patch_nops); diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c new file mode 100644 index 0000000000..e459cfd337 --- /dev/null +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014 ARM Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace-events-emulation.h" + +/* + * The runtime support for deprecated instruction support can be in one of + * following three states - + * + * 0 = undef + * 1 = emulate (software emulation) + * 2 = hw (supported in hardware) + */ +enum insn_emulation_mode { + INSN_UNDEF, + INSN_EMULATE, + INSN_HW, +}; + +enum legacy_insn_status { + INSN_DEPRECATED, + INSN_OBSOLETE, + INSN_UNAVAILABLE, +}; + +struct insn_emulation { + const char *name; + enum legacy_insn_status status; + bool (*try_emulate)(struct pt_regs *regs, + u32 insn); + int (*set_hw_mode)(bool enable); + + int current_mode; + int min; + int max; + + /* + * sysctl for this emulation + a sentinal entry. + */ + struct ctl_table sysctl[2]; +}; + +#define ARM_OPCODE_CONDTEST_FAIL 0 +#define ARM_OPCODE_CONDTEST_PASS 1 +#define ARM_OPCODE_CONDTEST_UNCOND 2 + +#define ARM_OPCODE_CONDITION_UNCOND 0xf + +static unsigned int __maybe_unused aarch32_check_condition(u32 opcode, u32 psr) +{ + u32 cc_bits = opcode >> 28; + + if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) { + if ((*aarch32_opcode_cond_checks[cc_bits])(psr)) + return ARM_OPCODE_CONDTEST_PASS; + else + return ARM_OPCODE_CONDTEST_FAIL; + } + return ARM_OPCODE_CONDTEST_UNCOND; +} + +#ifdef CONFIG_SWP_EMULATION +/* + * Implement emulation of the SWP/SWPB instructions using load-exclusive and + * store-exclusive. + * + * Syntax of SWP{B} instruction: SWP{B} , , [] + * Where: Rt = destination + * Rt2 = source + * Rn = address + */ + +/* + * Error-checking SWP macros implemented using ldxr{b}/stxr{b} + */ + +/* Arbitrary constant to ensure forward-progress of the LL/SC loop */ +#define __SWP_LL_SC_LOOPS 4 + +#define __user_swpX_asm(data, addr, res, temp, temp2, B) \ +do { \ + uaccess_enable_privileged(); \ + __asm__ __volatile__( \ + " mov %w3, %w6\n" \ + "0: ldxr"B" %w2, [%4]\n" \ + "1: stxr"B" %w0, %w1, [%4]\n" \ + " cbz %w0, 2f\n" \ + " sub %w3, %w3, #1\n" \ + " cbnz %w3, 0b\n" \ + " mov %w0, %w5\n" \ + " b 3f\n" \ + "2:\n" \ + " mov %w1, %w2\n" \ + "3:\n" \ + _ASM_EXTABLE_UACCESS_ERR(0b, 3b, %w0) \ + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ + : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ + : "r" ((unsigned long)addr), "i" (-EAGAIN), \ + "i" (__SWP_LL_SC_LOOPS) \ + : "memory"); \ + uaccess_disable_privileged(); \ +} while (0) + +#define __user_swp_asm(data, addr, res, temp, temp2) \ + __user_swpX_asm(data, addr, res, temp, temp2, "") +#define __user_swpb_asm(data, addr, res, temp, temp2) \ + __user_swpX_asm(data, addr, res, temp, temp2, "b") + +/* + * Bit 22 of the instruction encoding distinguishes between + * the SWP and SWPB variants (bit set means SWPB). + */ +#define TYPE_SWPB (1 << 22) + +static int emulate_swpX(unsigned int address, unsigned int *data, + unsigned int type) +{ + unsigned int res = 0; + + if ((type != TYPE_SWPB) && (address & 0x3)) { + /* SWP to unaligned address not permitted */ + pr_debug("SWP instruction on unaligned pointer!\n"); + return -EFAULT; + } + + while (1) { + unsigned long temp, temp2; + + if (type == TYPE_SWPB) + __user_swpb_asm(*data, address, res, temp, temp2); + else + __user_swp_asm(*data, address, res, temp, temp2); + + if (likely(res != -EAGAIN) || signal_pending(current)) + break; + + cond_resched(); + } + + return res; +} + +/* + * swp_handler logs the id of calling process, dissects the instruction, sanity + * checks the memory location, calls emulate_swpX for the actual operation and + * deals with fixup/error handling before returning + */ +static int swp_handler(struct pt_regs *regs, u32 instr) +{ + u32 destreg, data, type, address = 0; + const void __user *user_ptr; + int rn, rt2, res = 0; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); + + type = instr & TYPE_SWPB; + + switch (aarch32_check_condition(instr, regs->pstate)) { + case ARM_OPCODE_CONDTEST_PASS: + break; + case ARM_OPCODE_CONDTEST_FAIL: + /* Condition failed - return to next instruction */ + goto ret; + case ARM_OPCODE_CONDTEST_UNCOND: + /* If unconditional encoding - not a SWP, undef */ + return -EFAULT; + default: + return -EINVAL; + } + + rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET); + rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET); + + address = (u32)regs->user_regs.regs[rn]; + data = (u32)regs->user_regs.regs[rt2]; + destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET); + + pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n", + rn, address, destreg, + aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); + + /* Check access in reasonable access range for both SWP and SWPB */ + user_ptr = (const void __user *)(unsigned long)(address & ~3); + if (!access_ok(user_ptr, 4)) { + pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", + address); + goto fault; + } + + res = emulate_swpX(address, &data, type); + if (res == -EFAULT) + goto fault; + else if (res == 0) + regs->user_regs.regs[destreg] = data; + +ret: + if (type == TYPE_SWPB) + trace_instruction_emulation("swpb", regs->pc); + else + trace_instruction_emulation("swp", regs->pc); + + pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n", + current->comm, (unsigned long)current->pid, regs->pc); + + arm64_skip_faulting_instruction(regs, 4); + return 0; + +fault: + pr_debug("SWP{B} emulation: access caused memory abort!\n"); + arm64_notify_segfault(address); + + return 0; +} + +static bool try_emulate_swp(struct pt_regs *regs, u32 insn) +{ + /* SWP{B} only exists in ARM state and does not exist in Thumb */ + if (!compat_user_mode(regs) || compat_thumb_mode(regs)) + return false; + + if ((insn & 0x0fb00ff0) != 0x01000090) + return false; + + return swp_handler(regs, insn) == 0; +} + +static struct insn_emulation insn_swp = { + .name = "swp", + .status = INSN_OBSOLETE, + .try_emulate = try_emulate_swp, + .set_hw_mode = NULL, +}; +#endif /* CONFIG_SWP_EMULATION */ + +#ifdef CONFIG_CP15_BARRIER_EMULATION +static int cp15barrier_handler(struct pt_regs *regs, u32 instr) +{ + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); + + switch (aarch32_check_condition(instr, regs->pstate)) { + case ARM_OPCODE_CONDTEST_PASS: + break; + case ARM_OPCODE_CONDTEST_FAIL: + /* Condition failed - return to next instruction */ + goto ret; + case ARM_OPCODE_CONDTEST_UNCOND: + /* If unconditional encoding - not a barrier instruction */ + return -EFAULT; + default: + return -EINVAL; + } + + switch (aarch32_insn_mcr_extract_crm(instr)) { + case 10: + /* + * dmb - mcr p15, 0, Rt, c7, c10, 5 + * dsb - mcr p15, 0, Rt, c7, c10, 4 + */ + if (aarch32_insn_mcr_extract_opc2(instr) == 5) { + dmb(sy); + trace_instruction_emulation( + "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc); + } else { + dsb(sy); + trace_instruction_emulation( + "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc); + } + break; + case 5: + /* + * isb - mcr p15, 0, Rt, c7, c5, 4 + * + * Taking an exception or returning from one acts as an + * instruction barrier. So no explicit barrier needed here. + */ + trace_instruction_emulation( + "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc); + break; + } + +ret: + pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n", + current->comm, (unsigned long)current->pid, regs->pc); + + arm64_skip_faulting_instruction(regs, 4); + return 0; +} + +static int cp15_barrier_set_hw_mode(bool enable) +{ + if (enable) + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN); + else + sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0); + return 0; +} + +static bool try_emulate_cp15_barrier(struct pt_regs *regs, u32 insn) +{ + if (!compat_user_mode(regs) || compat_thumb_mode(regs)) + return false; + + if ((insn & 0x0fff0fdf) == 0x0e070f9a) + return cp15barrier_handler(regs, insn) == 0; + + if ((insn & 0x0fff0fff) == 0x0e070f95) + return cp15barrier_handler(regs, insn) == 0; + + return false; +} + +static struct insn_emulation insn_cp15_barrier = { + .name = "cp15_barrier", + .status = INSN_DEPRECATED, + .try_emulate = try_emulate_cp15_barrier, + .set_hw_mode = cp15_barrier_set_hw_mode, +}; +#endif /* CONFIG_CP15_BARRIER_EMULATION */ + +#ifdef CONFIG_SETEND_EMULATION +static int setend_set_hw_mode(bool enable) +{ + if (!cpu_supports_mixed_endian_el0()) + return -EINVAL; + + if (enable) + sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0); + else + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED); + return 0; +} + +static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) +{ + char *insn; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); + + if (big_endian) { + insn = "setend be"; + regs->pstate |= PSR_AA32_E_BIT; + } else { + insn = "setend le"; + regs->pstate &= ~PSR_AA32_E_BIT; + } + + trace_instruction_emulation(insn, regs->pc); + pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n", + current->comm, (unsigned long)current->pid, regs->pc); + + return 0; +} + +static int a32_setend_handler(struct pt_regs *regs, u32 instr) +{ + int rc = compat_setend_handler(regs, (instr >> 9) & 1); + arm64_skip_faulting_instruction(regs, 4); + return rc; +} + +static int t16_setend_handler(struct pt_regs *regs, u32 instr) +{ + int rc = compat_setend_handler(regs, (instr >> 3) & 1); + arm64_skip_faulting_instruction(regs, 2); + return rc; +} + +static bool try_emulate_setend(struct pt_regs *regs, u32 insn) +{ + if (compat_thumb_mode(regs) && + (insn & 0xfffffff7) == 0x0000b650) + return t16_setend_handler(regs, insn) == 0; + + if (compat_user_mode(regs) && + (insn & 0xfffffdff) == 0xf1010000) + return a32_setend_handler(regs, insn) == 0; + + return false; +} + +static struct insn_emulation insn_setend = { + .name = "setend", + .status = INSN_DEPRECATED, + .try_emulate = try_emulate_setend, + .set_hw_mode = setend_set_hw_mode, +}; +#endif /* CONFIG_SETEND_EMULATION */ + +static struct insn_emulation *insn_emulations[] = { +#ifdef CONFIG_SWP_EMULATION + &insn_swp, +#endif +#ifdef CONFIG_CP15_BARRIER_EMULATION + &insn_cp15_barrier, +#endif +#ifdef CONFIG_SETEND_EMULATION + &insn_setend, +#endif +}; + +static DEFINE_MUTEX(insn_emulation_mutex); + +static void enable_insn_hw_mode(void *data) +{ + struct insn_emulation *insn = data; + if (insn->set_hw_mode) + insn->set_hw_mode(true); +} + +static void disable_insn_hw_mode(void *data) +{ + struct insn_emulation *insn = data; + if (insn->set_hw_mode) + insn->set_hw_mode(false); +} + +/* Run set_hw_mode(mode) on all active CPUs */ +static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable) +{ + if (!insn->set_hw_mode) + return -EINVAL; + if (enable) + on_each_cpu(enable_insn_hw_mode, (void *)insn, true); + else + on_each_cpu(disable_insn_hw_mode, (void *)insn, true); + return 0; +} + +/* + * Run set_hw_mode for all insns on a starting CPU. + * Returns: + * 0 - If all the hooks ran successfully. + * -EINVAL - At least one hook is not supported by the CPU. + */ +static int run_all_insn_set_hw_mode(unsigned int cpu) +{ + int rc = 0; + unsigned long flags; + + /* + * Disable IRQs to serialize against an IPI from + * run_all_cpu_set_hw_mode(), ensuring the HW is programmed to the most + * recent enablement state if the two race with one another. + */ + local_irq_save(flags); + for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) { + struct insn_emulation *insn = insn_emulations[i]; + bool enable = READ_ONCE(insn->current_mode) == INSN_HW; + if (insn->set_hw_mode && insn->set_hw_mode(enable)) { + pr_warn("CPU[%u] cannot support the emulation of %s", + cpu, insn->name); + rc = -EINVAL; + } + } + local_irq_restore(flags); + + return rc; +} + +static int update_insn_emulation_mode(struct insn_emulation *insn, + enum insn_emulation_mode prev) +{ + int ret = 0; + + switch (prev) { + case INSN_UNDEF: /* Nothing to be done */ + break; + case INSN_EMULATE: + break; + case INSN_HW: + if (!run_all_cpu_set_hw_mode(insn, false)) + pr_notice("Disabled %s support\n", insn->name); + break; + } + + switch (insn->current_mode) { + case INSN_UNDEF: + break; + case INSN_EMULATE: + break; + case INSN_HW: + ret = run_all_cpu_set_hw_mode(insn, true); + if (!ret) + pr_notice("Enabled %s support\n", insn->name); + break; + } + + return ret; +} + +static int emulation_proc_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret = 0; + struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode); + enum insn_emulation_mode prev_mode = insn->current_mode; + + mutex_lock(&insn_emulation_mutex); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (ret || !write || prev_mode == insn->current_mode) + goto ret; + + ret = update_insn_emulation_mode(insn, prev_mode); + if (ret) { + /* Mode change failed, revert to previous mode. */ + WRITE_ONCE(insn->current_mode, prev_mode); + update_insn_emulation_mode(insn, INSN_UNDEF); + } +ret: + mutex_unlock(&insn_emulation_mutex); + return ret; +} + +static void __init register_insn_emulation(struct insn_emulation *insn) +{ + struct ctl_table *sysctl; + + insn->min = INSN_UNDEF; + + switch (insn->status) { + case INSN_DEPRECATED: + insn->current_mode = INSN_EMULATE; + /* Disable the HW mode if it was turned on at early boot time */ + run_all_cpu_set_hw_mode(insn, false); + insn->max = INSN_HW; + break; + case INSN_OBSOLETE: + insn->current_mode = INSN_UNDEF; + insn->max = INSN_EMULATE; + break; + case INSN_UNAVAILABLE: + insn->current_mode = INSN_UNDEF; + insn->max = INSN_UNDEF; + break; + } + + /* Program the HW if required */ + update_insn_emulation_mode(insn, INSN_UNDEF); + + if (insn->status != INSN_UNAVAILABLE) { + sysctl = &insn->sysctl[0]; + + sysctl->mode = 0644; + sysctl->maxlen = sizeof(int); + + sysctl->procname = insn->name; + sysctl->data = &insn->current_mode; + sysctl->extra1 = &insn->min; + sysctl->extra2 = &insn->max; + sysctl->proc_handler = emulation_proc_handler; + + register_sysctl_sz("abi", sysctl, 1); + } +} + +bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn) +{ + for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) { + struct insn_emulation *ie = insn_emulations[i]; + + if (ie->status == INSN_UNAVAILABLE) + continue; + + /* + * A trap may race with the mode being changed + * INSN_EMULATE<->INSN_HW. Try to emulate the instruction to + * avoid a spurious UNDEF. + */ + if (READ_ONCE(ie->current_mode) == INSN_UNDEF) + continue; + + if (ie->try_emulate(regs, insn)) + return true; + } + + return false; +} + +/* + * Invoked as core_initcall, which guarantees that the instruction + * emulation is ready for userspace. + */ +static int __init armv8_deprecated_init(void) +{ +#ifdef CONFIG_SETEND_EMULATION + if (!system_supports_mixed_endian_el0()) { + insn_setend.status = INSN_UNAVAILABLE; + pr_info("setend instruction emulation is not supported on this system\n"); + } + +#endif + for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) { + struct insn_emulation *ie = insn_emulations[i]; + + if (ie->status == INSN_UNAVAILABLE) + continue; + + register_insn_emulation(ie); + } + + cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING, + "arm64/isndep:starting", + run_all_insn_set_hw_mode, NULL); + return 0; +} + +core_initcall(armv8_deprecated_init); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c new file mode 100644 index 0000000000..5ff1942b04 --- /dev/null +++ b/arch/arm64/kernel/asm-offsets.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/asm-offsets.c + * + * Copyright (C) 1995-2003 Russell King + * 2001-2002 Keith Owens + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int main(void) +{ + DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + BLANK(); + DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu)); + DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); + DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); +#endif +#ifdef CONFIG_SHADOW_CALL_STACK + DEFINE(TSK_TI_SCS_BASE, offsetof(struct task_struct, thread_info.scs_base)); + DEFINE(TSK_TI_SCS_SP, offsetof(struct task_struct, thread_info.scs_sp)); +#endif + DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); +#ifdef CONFIG_STACKPROTECTOR + DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); +#endif + BLANK(); + DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); + DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user)); +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); +#endif +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL + DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); +#endif +#ifdef CONFIG_ARM64_MTE + DEFINE(THREAD_MTE_CTRL, offsetof(struct task_struct, thread.mte_ctrl)); +#endif + BLANK(); + DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); + DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); + DEFINE(S_X4, offsetof(struct pt_regs, regs[4])); + DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); + DEFINE(S_X8, offsetof(struct pt_regs, regs[8])); + DEFINE(S_X10, offsetof(struct pt_regs, regs[10])); + DEFINE(S_X12, offsetof(struct pt_regs, regs[12])); + DEFINE(S_X14, offsetof(struct pt_regs, regs[14])); + DEFINE(S_X16, offsetof(struct pt_regs, regs[16])); + DEFINE(S_X18, offsetof(struct pt_regs, regs[18])); + DEFINE(S_X20, offsetof(struct pt_regs, regs[20])); + DEFINE(S_X22, offsetof(struct pt_regs, regs[22])); + DEFINE(S_X24, offsetof(struct pt_regs, regs[24])); + DEFINE(S_X26, offsetof(struct pt_regs, regs[26])); + DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); + DEFINE(S_FP, offsetof(struct pt_regs, regs[29])); + DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); + DEFINE(S_SP, offsetof(struct pt_regs, sp)); + DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); + DEFINE(S_PC, offsetof(struct pt_regs, pc)); + DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); + DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1)); + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); + DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); + DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); + BLANK(); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS + DEFINE(FREGS_X0, offsetof(struct ftrace_regs, regs[0])); + DEFINE(FREGS_X2, offsetof(struct ftrace_regs, regs[2])); + DEFINE(FREGS_X4, offsetof(struct ftrace_regs, regs[4])); + DEFINE(FREGS_X6, offsetof(struct ftrace_regs, regs[6])); + DEFINE(FREGS_X8, offsetof(struct ftrace_regs, regs[8])); + DEFINE(FREGS_FP, offsetof(struct ftrace_regs, fp)); + DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr)); + DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp)); + DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc)); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + DEFINE(FREGS_DIRECT_TRAMP, offsetof(struct ftrace_regs, direct_tramp)); +#endif + DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs)); + BLANK(); +#endif +#ifdef CONFIG_COMPAT + DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0)); + DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0)); + BLANK(); +#endif + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); + DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); + BLANK(); + DEFINE(VM_EXEC, VM_EXEC); + BLANK(); + DEFINE(PAGE_SZ, PAGE_SIZE); + BLANK(); + DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); + DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); + BLANK(); + DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET); + DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT); + DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); + BLANK(); + DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); + BLANK(); + DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val)); + DEFINE(FTR_OVR_MASK_OFFSET, offsetof(struct arm64_ftr_override, mask)); + BLANK(); +#ifdef CONFIG_KVM + DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); + DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); + DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); + DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); + DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); + DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); + DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); + DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); + DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); + DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); + DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); + DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); + DEFINE(NVHE_INIT_MAIR_EL2, offsetof(struct kvm_nvhe_init_params, mair_el2)); + DEFINE(NVHE_INIT_TCR_EL2, offsetof(struct kvm_nvhe_init_params, tcr_el2)); + DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2)); + DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va)); + DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa)); + DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2)); + DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr)); + DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr)); +#endif +#ifdef CONFIG_CPU_PM + DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); + DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); + DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); + DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs)); + DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs)); +#endif + DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0)); + DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); + DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); + DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); + DEFINE(ARM_SMCCC_1_2_REGS_X0_OFFS, offsetof(struct arm_smccc_1_2_regs, a0)); + DEFINE(ARM_SMCCC_1_2_REGS_X2_OFFS, offsetof(struct arm_smccc_1_2_regs, a2)); + DEFINE(ARM_SMCCC_1_2_REGS_X4_OFFS, offsetof(struct arm_smccc_1_2_regs, a4)); + DEFINE(ARM_SMCCC_1_2_REGS_X6_OFFS, offsetof(struct arm_smccc_1_2_regs, a6)); + DEFINE(ARM_SMCCC_1_2_REGS_X8_OFFS, offsetof(struct arm_smccc_1_2_regs, a8)); + DEFINE(ARM_SMCCC_1_2_REGS_X10_OFFS, offsetof(struct arm_smccc_1_2_regs, a10)); + DEFINE(ARM_SMCCC_1_2_REGS_X12_OFFS, offsetof(struct arm_smccc_1_2_regs, a12)); + DEFINE(ARM_SMCCC_1_2_REGS_X14_OFFS, offsetof(struct arm_smccc_1_2_regs, a14)); + DEFINE(ARM_SMCCC_1_2_REGS_X16_OFFS, offsetof(struct arm_smccc_1_2_regs, a16)); + BLANK(); + DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); + DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); + DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); + DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val)); + BLANK(); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); +#endif +#ifdef CONFIG_ARM_SDE_INTERFACE + DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs)); + DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority)); +#endif +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia)); +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL + DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); +#endif + BLANK(); +#endif +#ifdef CONFIG_KEXEC_CORE + DEFINE(KIMAGE_ARCH_DTB_MEM, offsetof(struct kimage, arch.dtb_mem)); + DEFINE(KIMAGE_ARCH_EL2_VECTORS, offsetof(struct kimage, arch.el2_vectors)); + DEFINE(KIMAGE_ARCH_ZERO_PAGE, offsetof(struct kimage, arch.zero_page)); + DEFINE(KIMAGE_ARCH_PHYS_OFFSET, offsetof(struct kimage, arch.phys_offset)); + DEFINE(KIMAGE_ARCH_TTBR1, offsetof(struct kimage, arch.ttbr1)); + DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head)); + DEFINE(KIMAGE_START, offsetof(struct kimage, start)); + BLANK(); +#endif +#ifdef CONFIG_FUNCTION_TRACER + DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func)); +#endif + BLANK(); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + DEFINE(FGRET_REGS_X0, offsetof(struct fgraph_ret_regs, regs[0])); + DEFINE(FGRET_REGS_X1, offsetof(struct fgraph_ret_regs, regs[1])); + DEFINE(FGRET_REGS_X2, offsetof(struct fgraph_ret_regs, regs[2])); + DEFINE(FGRET_REGS_X3, offsetof(struct fgraph_ret_regs, regs[3])); + DEFINE(FGRET_REGS_X4, offsetof(struct fgraph_ret_regs, regs[4])); + DEFINE(FGRET_REGS_X5, offsetof(struct fgraph_ret_regs, regs[5])); + DEFINE(FGRET_REGS_X6, offsetof(struct fgraph_ret_regs, regs[6])); + DEFINE(FGRET_REGS_X7, offsetof(struct fgraph_ret_regs, regs[7])); + DEFINE(FGRET_REGS_FP, offsetof(struct fgraph_ret_regs, fp)); + DEFINE(FGRET_REGS_SIZE, sizeof(struct fgraph_ret_regs)); +#endif +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + DEFINE(FTRACE_OPS_DIRECT_CALL, offsetof(struct ftrace_ops, direct_call)); +#endif + return 0; +} diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c new file mode 100644 index 0000000000..d9c9218fa1 --- /dev/null +++ b/arch/arm64/kernel/cacheinfo.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM64 cacheinfo support + * + * Copyright (C) 2015 ARM Ltd. + * All Rights Reserved + */ + +#include +#include +#include + +#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */ + +int cache_line_size(void) +{ + if (coherency_max_size != 0) + return coherency_max_size; + + return cache_line_size_of_cpu(); +} +EXPORT_SYMBOL_GPL(cache_line_size); + +static inline enum cache_type get_cache_type(int level) +{ + u64 clidr; + + if (level > MAX_CACHE_LEVEL) + return CACHE_TYPE_NOCACHE; + clidr = read_sysreg(clidr_el1); + return CLIDR_CTYPE(clidr, level); +} + +static void ci_leaf_init(struct cacheinfo *this_leaf, + enum cache_type type, unsigned int level) +{ + this_leaf->level = level; + this_leaf->type = type; +} + +static void detect_cache_level(unsigned int *level_p, unsigned int *leaves_p) +{ + unsigned int ctype, level, leaves; + + for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { + ctype = get_cache_type(level); + if (ctype == CACHE_TYPE_NOCACHE) { + level--; + break; + } + /* Separate instruction and data caches */ + leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; + } + + *level_p = level; + *leaves_p = leaves; +} + +int early_cache_level(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + + detect_cache_level(&this_cpu_ci->num_levels, &this_cpu_ci->num_leaves); + + return 0; +} + +int init_cache_level(unsigned int cpu) +{ + unsigned int level, leaves; + int fw_level, ret; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + + detect_cache_level(&level, &leaves); + + if (acpi_disabled) { + fw_level = of_find_last_cache_level(cpu); + } else { + ret = acpi_get_cache_info(cpu, &fw_level, NULL); + if (ret < 0) + fw_level = 0; + } + + if (level < fw_level) { + /* + * some external caches not specified in CLIDR_EL1 + * the information may be available in the device tree + * only unified external caches are considered here + */ + leaves += (fw_level - level); + level = fw_level; + } + + this_cpu_ci->num_levels = level; + this_cpu_ci->num_leaves = leaves; + return 0; +} + +int populate_cache_leaves(unsigned int cpu) +{ + unsigned int level, idx; + enum cache_type type; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + + for (idx = 0, level = 1; level <= this_cpu_ci->num_levels && + idx < this_cpu_ci->num_leaves; idx++, level++) { + type = get_cache_type(level); + if (type == CACHE_TYPE_SEPARATE) { + ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); + ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); + } else { + ci_leaf_init(this_leaf++, type, level); + } + } + return 0; +} diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c new file mode 100644 index 0000000000..deff21bfa6 --- /dev/null +++ b/arch/arm64/kernel/compat_alignment.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-only +// based on arch/arm/mm/alignment.c + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998 + * + * Speed optimisations and better fault handling by Russell King. + */ +#define CODING_BITS(i) (i & 0x0e000000) + +#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ +#define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */ +#define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */ +#define LDST_L_BIT(i) (i & (1 << 20)) /* Load */ + +#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0) + +#define LDSTHD_I_BIT(i) (i & (1 << 22)) /* double/half-word immed */ + +#define RN_BITS(i) ((i >> 16) & 15) /* Rn */ +#define RD_BITS(i) ((i >> 12) & 15) /* Rd */ +#define RM_BITS(i) (i & 15) /* Rm */ + +#define REGMASK_BITS(i) (i & 0xffff) + +#define BAD_INSTR 0xdeadc0de + +/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */ +#define IS_T32(hi16) \ + (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800)) + +union offset_union { + unsigned long un; + signed long sn; +}; + +#define TYPE_ERROR 0 +#define TYPE_FAULT 1 +#define TYPE_LDST 2 +#define TYPE_DONE 3 + +static void +do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, + union offset_union offset) +{ + if (!LDST_U_BIT(instr)) + offset.un = -offset.un; + + if (!LDST_P_BIT(instr)) + addr += offset.un; + + if (!LDST_P_BIT(instr) || LDST_W_BIT(instr)) + regs->regs[RN_BITS(instr)] = addr; +} + +static int +do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs) +{ + unsigned int rd = RD_BITS(instr); + unsigned int rd2; + int load; + + if ((instr & 0xfe000000) == 0xe8000000) { + /* ARMv7 Thumb-2 32-bit LDRD/STRD */ + rd2 = (instr >> 8) & 0xf; + load = !!(LDST_L_BIT(instr)); + } else if (((rd & 1) == 1) || (rd == 14)) { + return TYPE_ERROR; + } else { + load = ((instr & 0xf0) == 0xd0); + rd2 = rd + 1; + } + + if (load) { + unsigned int val, val2; + + if (get_user(val, (u32 __user *)addr) || + get_user(val2, (u32 __user *)(addr + 4))) + return TYPE_FAULT; + regs->regs[rd] = val; + regs->regs[rd2] = val2; + } else { + if (put_user(regs->regs[rd], (u32 __user *)addr) || + put_user(regs->regs[rd2], (u32 __user *)(addr + 4))) + return TYPE_FAULT; + } + return TYPE_LDST; +} + +/* + * LDM/STM alignment handler. + * + * There are 4 variants of this instruction: + * + * B = rn pointer before instruction, A = rn pointer after instruction + * ------ increasing address -----> + * | | r0 | r1 | ... | rx | | + * PU = 01 B A + * PU = 11 B A + * PU = 00 A B + * PU = 10 A B + */ +static int +do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs) +{ + unsigned int rd, rn, nr_regs, regbits; + unsigned long eaddr, newaddr; + unsigned int val; + + /* count the number of registers in the mask to be transferred */ + nr_regs = hweight16(REGMASK_BITS(instr)) * 4; + + rn = RN_BITS(instr); + newaddr = eaddr = regs->regs[rn]; + + if (!LDST_U_BIT(instr)) + nr_regs = -nr_regs; + newaddr += nr_regs; + if (!LDST_U_BIT(instr)) + eaddr = newaddr; + + if (LDST_P_EQ_U(instr)) /* U = P */ + eaddr += 4; + + for (regbits = REGMASK_BITS(instr), rd = 0; regbits; + regbits >>= 1, rd += 1) + if (regbits & 1) { + if (LDST_L_BIT(instr)) { + if (get_user(val, (u32 __user *)eaddr)) + return TYPE_FAULT; + if (rd < 15) + regs->regs[rd] = val; + else + regs->pc = val; + } else { + /* + * The PC register has a bias of +8 in ARM mode + * and +4 in Thumb mode. This means that a read + * of the value of PC should account for this. + * Since Thumb does not permit STM instructions + * to refer to PC, just add 8 here. + */ + val = (rd < 15) ? regs->regs[rd] : regs->pc + 8; + if (put_user(val, (u32 __user *)eaddr)) + return TYPE_FAULT; + } + eaddr += 4; + } + + if (LDST_W_BIT(instr)) + regs->regs[rn] = newaddr; + + return TYPE_DONE; +} + +/* + * Convert Thumb multi-word load/store instruction forms to equivalent ARM + * instructions so we can reuse ARM userland alignment fault fixups for Thumb. + * + * This implementation was initially based on the algorithm found in + * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same + * to convert only Thumb ld/st instruction forms to equivalent ARM forms. + * + * NOTES: + * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections. + * 2. If for some reason we're passed an non-ld/st Thumb instruction to + * decode, we return 0xdeadc0de. This should never happen under normal + * circumstances but if it does, we've got other problems to deal with + * elsewhere and we obviously can't fix those problems here. + */ + +static unsigned long thumb2arm(u16 tinstr) +{ + u32 L = (tinstr & (1<<11)) >> 11; + + switch ((tinstr & 0xf800) >> 11) { + /* 6.6.1 Format 1: */ + case 0xc000 >> 11: /* 7.1.51 STMIA */ + case 0xc800 >> 11: /* 7.1.25 LDMIA */ + { + u32 Rn = (tinstr & (7<<8)) >> 8; + u32 W = ((L<> 11: /* 7.1.48 PUSH */ + case 0xb800 >> 11: /* 7.1.47 POP */ + if ((tinstr & (3 << 9)) == 0x0400) { + static const u32 subset[4] = { + 0xe92d0000, /* STMDB sp!,{registers} */ + 0xe92d4000, /* STMDB sp!,{registers,lr} */ + 0xe8bd0000, /* LDMIA sp!,{registers} */ + 0xe8bd8000 /* LDMIA sp!,{registers,pc} */ + }; + return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] | + (tinstr & 255); /* register_list */ + } + fallthrough; /* for illegal instruction case */ + + default: + return BAD_INSTR; + } +} + +/* + * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction + * handlable by ARM alignment handler, also find the corresponding handler, + * so that we can reuse ARM userland alignment fault fixups for Thumb. + * + * @pinstr: original Thumb-2 instruction; returns new handlable instruction + * @regs: register context. + * @poffset: return offset from faulted addr for later writeback + * + * NOTES: + * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections. + * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt) + */ +static void * +do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs, + union offset_union *poffset) +{ + u32 instr = *pinstr; + u16 tinst1 = (instr >> 16) & 0xffff; + u16 tinst2 = instr & 0xffff; + + switch (tinst1 & 0xffe0) { + /* A6.3.5 Load/Store multiple */ + case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */ + case 0xe8a0: /* ...above writeback version */ + case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */ + case 0xe920: /* ...above writeback version */ + /* no need offset decision since handler calculates it */ + return do_alignment_ldmstm; + + case 0xf840: /* POP/PUSH T3 (single register) */ + if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) { + u32 L = !!(LDST_L_BIT(instr)); + const u32 subset[2] = { + 0xe92d0000, /* STMDB sp!,{registers} */ + 0xe8bd0000, /* LDMIA sp!,{registers} */ + }; + *pinstr = subset[L] | (1<un = (tinst2 & 0xff) << 2; + fallthrough; + + case 0xe940: + case 0xe9c0: + return do_alignment_ldrdstrd; + + /* + * No need to handle load/store instructions up to word size + * since ARMv6 and later CPUs can perform unaligned accesses. + */ + default: + break; + } + return NULL; +} + +static int alignment_get_arm(struct pt_regs *regs, __le32 __user *ip, u32 *inst) +{ + __le32 instr = 0; + int fault; + + fault = get_user(instr, ip); + if (fault) + return fault; + + *inst = __le32_to_cpu(instr); + return 0; +} + +static int alignment_get_thumb(struct pt_regs *regs, __le16 __user *ip, u16 *inst) +{ + __le16 instr = 0; + int fault; + + fault = get_user(instr, ip); + if (fault) + return fault; + + *inst = __le16_to_cpu(instr); + return 0; +} + +int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs) +{ + union offset_union offset; + unsigned long instrptr; + int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs); + unsigned int type; + u32 instr = 0; + int isize = 4; + int thumb2_32b = 0; + + instrptr = instruction_pointer(regs); + + if (compat_thumb_mode(regs)) { + __le16 __user *ptr = (__le16 __user *)(instrptr & ~1); + u16 tinstr, tinst2; + + if (alignment_get_thumb(regs, ptr, &tinstr)) + return 1; + + if (IS_T32(tinstr)) { /* Thumb-2 32-bit */ + if (alignment_get_thumb(regs, ptr + 1, &tinst2)) + return 1; + instr = ((u32)tinstr << 16) | tinst2; + thumb2_32b = 1; + } else { + isize = 2; + instr = thumb2arm(tinstr); + } + } else { + if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr)) + return 1; + } + + switch (CODING_BITS(instr)) { + case 0x00000000: /* 3.13.4 load/store instruction extensions */ + if (LDSTHD_I_BIT(instr)) + offset.un = (instr & 0xf00) >> 4 | (instr & 15); + else + offset.un = regs->regs[RM_BITS(instr)]; + + if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */ + (instr & 0x001000f0) == 0x000000f0) /* STRD */ + handler = do_alignment_ldrdstrd; + else + return 1; + break; + + case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */ + if (thumb2_32b) { + offset.un = 0; + handler = do_alignment_t32_to_handler(&instr, regs, &offset); + } else { + offset.un = 0; + handler = do_alignment_ldmstm; + } + break; + + default: + return 1; + } + + type = handler(addr, instr, regs); + + if (type == TYPE_ERROR || type == TYPE_FAULT) + return 1; + + if (type == TYPE_LDST) + do_alignment_finish_ldst(addr, instr, regs, offset); + + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->pc); + arm64_skip_faulting_instruction(regs, isize); + + return 0; +} diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S new file mode 100644 index 0000000000..c87445dde6 --- /dev/null +++ b/arch/arm64/kernel/cpu-reset.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CPU reset routines + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Huawei Futurewei Technologies. + */ + +#include +#include +#include +#include +#include + +.text +.pushsection .idmap.text, "a" + +/* + * cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) + * + * @el2_switch: Flag to indicate a switch to EL2 is needed. + * @entry: Location to jump to for soft reset. + * arg0: First argument passed to @entry. (relocation list) + * arg1: Second argument passed to @entry.(physical kernel entry) + * arg2: Third argument passed to @entry. (physical dtb address) + * + * Put the CPU into the same state as it would be if it had been reset, and + * branch to what would be the reset vector. It must be executed with the + * flat identity mapping. + */ +SYM_TYPED_FUNC_START(cpu_soft_restart) + mov_q x12, INIT_SCTLR_EL1_MMU_OFF + pre_disable_mmu_workaround + /* + * either disable EL1&0 translation regime or disable EL2&0 translation + * regime if HCR_EL2.E2H == 1 + */ + msr sctlr_el1, x12 + isb + + cbz x0, 1f // el2_switch? + mov x0, #HVC_SOFT_RESTART + hvc #0 // no return + +1: mov x8, x1 // entry + mov x0, x2 // arg0 + mov x1, x3 // arg1 + mov x2, x4 // arg2 + br x8 +SYM_FUNC_END(cpu_soft_restart) + +.popsection diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c new file mode 100644 index 0000000000..87787a012b --- /dev/null +++ b/arch/arm64/kernel/cpu_errata.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Contains CPU specific errata definitions + * + * Copyright (C) 2014 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static bool __maybe_unused +is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) +{ + const struct arm64_midr_revidr *fix; + u32 midr = read_cpuid_id(), revidr; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + if (!is_midr_in_range(midr, &entry->midr_range)) + return false; + + midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; + revidr = read_cpuid(REVIDR_EL1); + for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) + if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) + return false; + + return true; +} + +static bool __maybe_unused +is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, + int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); +} + +static bool __maybe_unused +is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) +{ + u32 model; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + model = read_cpuid_id(); + model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | + MIDR_ARCHITECTURE_MASK; + + return model == entry->midr_range.model; +} + +static bool +has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u64 mask = arm64_ftr_reg_ctrel0.strict_mask; + u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; + u64 ctr_raw, ctr_real; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + /* + * We want to make sure that all the CPUs in the system expose + * a consistent CTR_EL0 to make sure that applications behaves + * correctly with migration. + * + * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : + * + * 1) It is safe if the system doesn't support IDC, as CPU anyway + * reports IDC = 0, consistent with the rest. + * + * 2) If the system has IDC, it is still safe as we trap CTR_EL0 + * access on this CPU via the ARM64_HAS_CACHE_IDC capability. + * + * So, we need to make sure either the raw CTR_EL0 or the effective + * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. + */ + ctr_raw = read_cpuid_cachetype() & mask; + ctr_real = read_cpuid_effective_cachetype() & mask; + + return (ctr_real != sys) && (ctr_raw != sys); +} + +static void +cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) +{ + u64 mask = arm64_ftr_reg_ctrel0.strict_mask; + bool enable_uct_trap = false; + + /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ + if ((read_cpuid_cachetype() & mask) != + (arm64_ftr_reg_ctrel0.sys_val & mask)) + enable_uct_trap = true; + + /* ... or if the system is affected by an erratum */ + if (cap->capability == ARM64_WORKAROUND_1542419) + enable_uct_trap = true; + + if (enable_uct_trap) + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); +} + +#ifdef CONFIG_ARM64_ERRATUM_1463225 +static bool +has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, + int scope) +{ + return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); +} +#endif + +static void __maybe_unused +cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) +{ + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); +} + +static DEFINE_RAW_SPINLOCK(reg_user_mask_modification); +static void __maybe_unused +cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused) +{ + struct arm64_ftr_reg *regp; + + regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1); + if (!regp) + return; + + raw_spin_lock(®_user_mask_modification); + if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK) + regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; + raw_spin_unlock(®_user_mask_modification); +} + +#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ + .matches = is_affected_midr_range, \ + .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) + +#define CAP_MIDR_ALL_VERSIONS(model) \ + .matches = is_affected_midr_range, \ + .midr_range = MIDR_ALL_VERSIONS(model) + +#define MIDR_FIXED(rev, revidr_mask) \ + .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} + +#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ + CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) + +#define CAP_MIDR_RANGE_LIST(list) \ + .matches = is_affected_midr_range_list, \ + .midr_range_list = list + +/* Errata affecting a range of revisions of given model variant */ +#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ + ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) + +/* Errata affecting a single variant/revision of a model */ +#define ERRATA_MIDR_REV(model, var, rev) \ + ERRATA_MIDR_RANGE(model, var, rev, var, rev) + +/* Errata affecting all variants/revisions of a given a model */ +#define ERRATA_MIDR_ALL_VERSIONS(model) \ + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ + CAP_MIDR_ALL_VERSIONS(model) + +/* Errata affecting a list of midr ranges, with same work around */ +#define ERRATA_MIDR_RANGE_LIST(midr_list) \ + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ + CAP_MIDR_RANGE_LIST(midr_list) + +static const __maybe_unused struct midr_range tx2_family_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + {}, +}; + +static bool __maybe_unused +needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, + int scope) +{ + int i; + + if (!is_affected_midr_range_list(entry, scope) || + !is_hyp_mode_available()) + return false; + + for_each_possible_cpu(i) { + if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) + return true; + } + + return false; +} + +static bool __maybe_unused +has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); + const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && has_dic; +} + +#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI +static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 + { + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) + }, + { + .midr_range.model = MIDR_QCOM_KRYO, + .matches = is_kryo_midr, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1286807 + { + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), + }, + { + /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ + ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2441007 + { + ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2441009 + { + /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */ + ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), + }, +#endif + {}, +}; +#endif + +#ifdef CONFIG_CAVIUM_ERRATUM_23154 +static const struct midr_range cavium_erratum_23154_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_THUNDERX), + MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX), + MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX), + MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX), + MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX), + MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX), + MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN), + MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM), + MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO), + {}, +}; +#endif + +#ifdef CONFIG_CAVIUM_ERRATUM_27456 +const struct midr_range cavium_erratum_27456_cpus[] = { + /* Cavium ThunderX, T88 pass 1.x - 2.1 */ + MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), + /* Cavium ThunderX, T81 pass 1.0 */ + MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), + {}, +}; +#endif + +#ifdef CONFIG_CAVIUM_ERRATUM_30115 +static const struct midr_range cavium_erratum_30115_cpus[] = { + /* Cavium ThunderX, T88 pass 1.x - 2.2 */ + MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), + /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ + MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), + /* Cavium ThunderX, T83 pass 1.0 */ + MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), + {}, +}; +#endif + +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 +static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { + { + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), + }, + { + .midr_range.model = MIDR_QCOM_KRYO, + .matches = is_kryo_midr, + }, + {}, +}; +#endif + +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE +static const struct midr_range workaround_clean_cache[] = { +#if defined(CONFIG_ARM64_ERRATUM_826319) || \ + defined(CONFIG_ARM64_ERRATUM_827319) || \ + defined(CONFIG_ARM64_ERRATUM_824069) + /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_819472 + /* Cortex-A53 r0p[01] : ARM errata 819472 */ + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), +#endif + {}, +}; +#endif + +#ifdef CONFIG_ARM64_ERRATUM_1418040 +/* + * - 1188873 affects r0p0 to r2p0 + * - 1418040 affects r0p0 to r3p1 + */ +static const struct midr_range erratum_1418040_list[] = { + /* Cortex-A76 r0p0 to r3p1 */ + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), + /* Neoverse-N1 r0p0 to r3p1 */ + MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), + /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ + MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), + {}, +}; +#endif + +#ifdef CONFIG_ARM64_ERRATUM_845719 +static const struct midr_range erratum_845719_list[] = { + /* Cortex-A53 r0p[01234] */ + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), + /* Brahma-B53 r0p[0] */ + MIDR_REV(MIDR_BRAHMA_B53, 0, 0), + /* Kryo2XX Silver rAp4 */ + MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4), + {}, +}; +#endif + +#ifdef CONFIG_ARM64_ERRATUM_843419 +static const struct arm64_cpu_capabilities erratum_843419_list[] = { + { + /* Cortex-A53 r0p[01234] */ + .matches = is_affected_midr_range, + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), + MIDR_FIXED(0x4, BIT(8)), + }, + { + /* Brahma-B53 r0p[0] */ + .matches = is_affected_midr_range, + ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), + }, + {}, +}; +#endif + +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT +static const struct midr_range erratum_speculative_at_list[] = { +#ifdef CONFIG_ARM64_ERRATUM_1165522 + /* Cortex A76 r0p0 to r2p0 */ + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), +#endif +#ifdef CONFIG_ARM64_ERRATUM_1319367 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), +#endif +#ifdef CONFIG_ARM64_ERRATUM_1530923 + /* Cortex A55 r0p0 to r2p0 */ + MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), + /* Kryo4xx Silver (rdpe => r1p0) */ + MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), +#endif + {}, +}; +#endif + +#ifdef CONFIG_ARM64_ERRATUM_1463225 +static const struct midr_range erratum_1463225[] = { + /* Cortex-A76 r0p0 - r3p1 */ + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), + /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ + MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), + {}, +}; +#endif + +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE +static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2139208 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2119858 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ + +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE +static const struct midr_range tsb_flush_fail_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2067961 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2054223 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ + +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE +static struct midr_range trbe_write_out_of_range_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2253138 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2224489 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ + +#ifdef CONFIG_ARM64_ERRATUM_1742098 +static struct midr_range broken_aarch32_aes[] = { + MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ + +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD +static const struct midr_range erratum_spec_unpriv_load_list[] = { +#ifdef CONFIG_ARM64_ERRATUM_3117295 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2966298 + /* Cortex-A520 r0p0 to r0p1 */ + MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1), +#endif + {}, +}; +#endif + +const struct arm64_cpu_capabilities arm64_errata[] = { +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE + { + .desc = "ARM errata 826319, 827319, 824069, or 819472", + .capability = ARM64_WORKAROUND_CLEAN_CACHE, + ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), + .cpu_enable = cpu_enable_cache_maint_trap, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_832075 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 832075", + .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, + ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, + 0, 0, + 1, 2), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_834220 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 834220", + .capability = ARM64_WORKAROUND_834220, + ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, + 0, 0, + 1, 2), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_843419 + { + .desc = "ARM erratum 843419", + .capability = ARM64_WORKAROUND_843419, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = cpucap_multi_entry_cap_matches, + .match_list = erratum_843419_list, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_845719 + { + .desc = "ARM erratum 845719", + .capability = ARM64_WORKAROUND_845719, + ERRATA_MIDR_RANGE_LIST(erratum_845719_list), + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23154 + { + .desc = "Cavium errata 23154 and 38545", + .capability = ARM64_WORKAROUND_CAVIUM_23154, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus), + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_27456 + { + .desc = "Cavium erratum 27456", + .capability = ARM64_WORKAROUND_CAVIUM_27456, + ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_30115 + { + .desc = "Cavium erratum 30115", + .capability = ARM64_WORKAROUND_CAVIUM_30115, + ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), + }, +#endif + { + .desc = "Mismatched cache type (CTR_EL0)", + .capability = ARM64_MISMATCHED_CACHE_TYPE, + .matches = has_mismatched_cache_type, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .cpu_enable = cpu_enable_trap_ctr_access, + }, +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 + { + .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = cpucap_multi_entry_cap_matches, + .match_list = qcom_erratum_1003_list, + }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI + { + .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009", + .capability = ARM64_WORKAROUND_REPEAT_TLBI, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = cpucap_multi_entry_cap_matches, + .match_list = arm64_repeat_tlbi_list, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_858921 + { + /* Cortex-A73 all versions */ + .desc = "ARM erratum 858921", + .capability = ARM64_WORKAROUND_858921, + ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + }, +#endif + { + .desc = "Spectre-v2", + .capability = ARM64_SPECTRE_V2, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_spectre_v2, + .cpu_enable = spectre_v2_enable_mitigation, + }, +#ifdef CONFIG_RANDOMIZE_BASE + { + /* Must come after the Spectre-v2 entry */ + .desc = "Spectre-v3a", + .capability = ARM64_SPECTRE_V3A, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_spectre_v3a, + .cpu_enable = spectre_v3a_enable_mitigation, + }, +#endif + { + .desc = "Spectre-v4", + .capability = ARM64_SPECTRE_V4, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_spectre_v4, + .cpu_enable = spectre_v4_enable_mitigation, + }, + { + .desc = "Spectre-BHB", + .capability = ARM64_SPECTRE_BHB, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = is_spectre_bhb_affected, + .cpu_enable = spectre_bhb_enable_mitigation, + }, +#ifdef CONFIG_ARM64_ERRATUM_1418040 + { + .desc = "ARM erratum 1418040", + .capability = ARM64_WORKAROUND_1418040, + ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), + /* + * We need to allow affected CPUs to come in late, but + * also need the non-affected CPUs to be able to come + * in at any point in time. Wonderful. + */ + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT + { + .desc = "ARM errata 1165522, 1319367, or 1530923", + .capability = ARM64_WORKAROUND_SPECULATIVE_AT, + ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1463225 + { + .desc = "ARM erratum 1463225", + .capability = ARM64_WORKAROUND_1463225, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_cortex_a76_erratum_1463225, + .midr_range_list = erratum_1463225, + }, +#endif +#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 + { + .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), + .matches = needs_tx2_tvm_workaround, + }, + { + .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1542419 + { + /* we depend on the firmware portion for correctness */ + .desc = "ARM erratum 1542419 (kernel portion)", + .capability = ARM64_WORKAROUND_1542419, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_neoverse_n1_erratum_1542419, + .cpu_enable = cpu_enable_trap_ctr_access, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1508412 + { + /* we depend on the firmware portion for correctness */ + .desc = "ARM erratum 1508412 (kernel portion)", + .capability = ARM64_WORKAROUND_1508412, + ERRATA_MIDR_RANGE(MIDR_CORTEX_A77, + 0, 0, + 1, 0), + }, +#endif +#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM + { + /* NVIDIA Carmel */ + .desc = "NVIDIA Carmel CNP erratum", + .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, + ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), + }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE + { + /* + * The erratum work around is handled within the TRBE + * driver and can be applied per-cpu. So, we can allow + * a late CPU to come online with this erratum. + */ + .desc = "ARM erratum 2119858 or 2139208", + .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), + }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE + { + .desc = "ARM erratum 2067961 or 2054223", + .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, + ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), + }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + { + .desc = "ARM erratum 2253138 or 2224489", + .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2645198 + { + .desc = "ARM erratum 2645198", + .capability = ARM64_WORKAROUND_2645198, + ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715) + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2077057 + { + .desc = "ARM erratum 2077057", + .capability = ARM64_WORKAROUND_2077057, + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2064142 + { + .desc = "ARM erratum 2064142", + .capability = ARM64_WORKAROUND_2064142, + + /* Cortex-A510 r0p0 - r0p2 */ + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2457168 + { + .desc = "ARM erratum 2457168", + .capability = ARM64_WORKAROUND_2457168, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + + /* Cortex-A510 r0p0-r1p1 */ + CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2038923 + { + .desc = "ARM erratum 2038923", + .capability = ARM64_WORKAROUND_2038923, + + /* Cortex-A510 r0p0 - r0p2 */ + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1902691 + { + .desc = "ARM erratum 1902691", + .capability = ARM64_WORKAROUND_1902691, + + /* Cortex-A510 r0p0 - r0p1 */ + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1) + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1742098 + { + .desc = "ARM erratum 1742098", + .capability = ARM64_WORKAROUND_1742098, + CAP_MIDR_RANGE_LIST(broken_aarch32_aes), + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2658417 + { + .desc = "ARM erratum 2658417", + .capability = ARM64_WORKAROUND_2658417, + /* Cortex-A510 r0p0 - r1p1 */ + ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), + MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), + .cpu_enable = cpu_clear_bf16_from_user_emulation, + }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD + { + .desc = "ARM errata 2966298, 3117295", + .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD, + /* Cortex-A520 r0p0 - r0p1 */ + ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list), + }, +#endif +#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 + { + .desc = "AmpereOne erratum AC03_CPU_38", + .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, + ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), + }, +#endif + { + } +}; diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c new file mode 100644 index 0000000000..e133011f64 --- /dev/null +++ b/arch/arm64/kernel/cpu_ops.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CPU kernel entry/exit control + * + * Copyright (C) 2013 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +extern const struct cpu_operations smp_spin_table_ops; +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL +extern const struct cpu_operations acpi_parking_protocol_ops; +#endif +extern const struct cpu_operations cpu_psci_ops; + +static const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; + +static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = { + &smp_spin_table_ops, + &cpu_psci_ops, + NULL, +}; + +static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = { +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL + &acpi_parking_protocol_ops, +#endif + &cpu_psci_ops, + NULL, +}; + +static const struct cpu_operations * __init cpu_get_ops(const char *name) +{ + const struct cpu_operations *const *ops; + + ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops; + + while (*ops) { + if (!strcmp(name, (*ops)->name)) + return *ops; + + ops++; + } + + return NULL; +} + +static const char *__init cpu_read_enable_method(int cpu) +{ + const char *enable_method; + + if (acpi_disabled) { + struct device_node *dn = of_get_cpu_node(cpu, NULL); + + if (!dn) { + if (!cpu) + pr_err("Failed to find device node for boot cpu\n"); + return NULL; + } + + enable_method = of_get_property(dn, "enable-method", NULL); + if (!enable_method) { + /* + * The boot CPU may not have an enable method (e.g. + * when spin-table is used for secondaries). + * Don't warn spuriously. + */ + if (cpu != 0) + pr_err("%pOF: missing enable-method property\n", + dn); + } + of_node_put(dn); + } else { + enable_method = acpi_get_enable_method(cpu); + if (!enable_method) { + /* + * In ACPI systems the boot CPU does not require + * checking the enable method since for some + * boot protocol (ie parking protocol) it need not + * be initialized. Don't warn spuriously. + */ + if (cpu != 0) + pr_err("Unsupported ACPI enable-method\n"); + } + } + + return enable_method; +} +/* + * Read a cpu's enable method and record it in cpu_ops. + */ +int __init init_cpu_ops(int cpu) +{ + const char *enable_method = cpu_read_enable_method(cpu); + + if (!enable_method) + return -ENODEV; + + cpu_ops[cpu] = cpu_get_ops(enable_method); + if (!cpu_ops[cpu]) { + pr_warn("Unsupported enable-method: %s\n", enable_method); + return -EOPNOTSUPP; + } + + return 0; +} + +const struct cpu_operations *get_cpu_ops(int cpu) +{ + return cpu_ops[cpu]; +} diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c new file mode 100644 index 0000000000..444a73c2e6 --- /dev/null +++ b/arch/arm64/kernel/cpufeature.c @@ -0,0 +1,3542 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Contains CPU feature definitions + * + * Copyright (C) 2015 ARM Ltd. + * + * A note for the weary kernel hacker: the code here is confusing and hard to + * follow! That's partly because it's solving a nasty problem, but also because + * there's a little bit of over-abstraction that tends to obscure what's going + * on behind a maze of helper functions and macros. + * + * The basic problem is that hardware folks have started gluing together CPUs + * with distinct architectural features; in some cases even creating SoCs where + * user-visible instructions are available only on a subset of the available + * cores. We try to address this by snapshotting the feature registers of the + * boot CPU and comparing these with the feature registers of each secondary + * CPU when bringing them up. If there is a mismatch, then we update the + * snapshot state to indicate the lowest-common denominator of the feature, + * known as the "safe" value. This snapshot state can be queried to view the + * "sanitised" value of a feature register. + * + * The sanitised register values are used to decide which capabilities we + * have in the system. These may be in the form of traditional "hwcaps" + * advertised to userspace or internal "cpucaps" which are used to configure + * things like alternative patching and static keys. While a feature mismatch + * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch + * may prevent a CPU from being onlined at all. + * + * Some implementation details worth remembering: + * + * - Mismatched features are *always* sanitised to a "safe" value, which + * usually indicates that the feature is not supported. + * + * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK" + * warning when onlining an offending CPU and the kernel will be tainted + * with TAINT_CPU_OUT_OF_SPEC. + * + * - Features marked as FTR_VISIBLE have their sanitised value visible to + * userspace. FTR_VISIBLE features in registers that are only visible + * to EL0 by trapping *must* have a corresponding HWCAP so that late + * onlining of CPUs cannot lead to features disappearing at runtime. + * + * - A "feature" is typically a 4-bit register field. A "capability" is the + * high-level description derived from the sanitised field value. + * + * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID + * scheme for fields in ID registers") to understand when feature fields + * may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly). + * + * - KVM exposes its own view of the feature registers to guest operating + * systems regardless of FTR_VISIBLE. This is typically driven from the + * sanitised register values to allow virtual CPUs to be migrated between + * arbitrary physical CPUs, but some features not present on the host are + * also advertised and emulated. Look at sys_reg_descs[] for the gory + * details. + * + * - If the arm64_ftr_bits[] for a register has a missing field, then this + * field is treated as STRICT RES0, including for read_sanitised_ftr_reg(). + * This is stronger than FTR_HIDDEN and can be used to hide features from + * KVM guests. + */ + +#define pr_fmt(fmt) "CPU features: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Kernel representation of AT_HWCAP and AT_HWCAP2 */ +static DECLARE_BITMAP(elf_hwcap, MAX_CPU_FEATURES) __read_mostly; + +#ifdef CONFIG_COMPAT +#define COMPAT_ELF_HWCAP_DEFAULT \ + (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ + COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ + COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\ + COMPAT_HWCAP_LPAE) +unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; +unsigned int compat_elf_hwcap2 __read_mostly; +#endif + +DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS); +EXPORT_SYMBOL(system_cpucaps); +static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NCAPS]; + +DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS); + +bool arm64_use_ng_mappings = false; +EXPORT_SYMBOL(arm64_use_ng_mappings); + +DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; + +/* + * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs + * support it? + */ +static bool __read_mostly allow_mismatched_32bit_el0; + +/* + * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have + * seen at least one CPU capable of 32-bit EL0. + */ +DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); + +/* + * Mask of CPUs supporting 32-bit EL0. + * Only valid if arm64_mismatched_32bit_el0 is enabled. + */ +static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly; + +void dump_cpu_features(void) +{ + /* file-wide pr_fmt adds "CPU features: " prefix */ + pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps); +} + +#define ARM64_CPUID_FIELDS(reg, field, min_value) \ + .sys_reg = SYS_##reg, \ + .field_pos = reg##_##field##_SHIFT, \ + .field_width = reg##_##field##_WIDTH, \ + .sign = reg##_##field##_SIGNED, \ + .min_field_value = reg##_##field##_##min_value, + +#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + { \ + .sign = SIGNED, \ + .visible = VISIBLE, \ + .strict = STRICT, \ + .type = TYPE, \ + .shift = SHIFT, \ + .width = WIDTH, \ + .safe_val = SAFE_VAL, \ + } + +/* Define a feature with unsigned values */ +#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +/* Define a feature with a signed value */ +#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +#define ARM64_FTR_END \ + { \ + .width = 0, \ + } + +static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); + +static bool __system_matches_cap(unsigned int n); + +/* + * NOTE: Any changes to the visibility of features should be kept in + * sync with the documentation of the CPU feature register ABI. + */ +static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SPECRES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPI_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_API_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_APA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0), + /* + * Page size not being supported at Stage-2 is not fatal. You + * just give up KVM if PAGE_SIZE isn't supported there. Go fix + * your favourite nesting hypervisor. + * + * There is a small corner case where the hypervisor explicitly + * advertises a given granule size at Stage-2 (value 2) on some + * vCPUs, and uses the fallback to Stage-1 (value 0) for other + * vCPUs. Although this is not forbidden by the architecture, it + * indicates that the hypervisor is being silly (or buggy). + * + * We make no effort to cope with this and pretend that if these + * fields are inconsistent across vCPUs, then it isn't worth + * trying to bring KVM up. + */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1), + /* + * We already refuse to boot CPUs that don't support our configured + * page size, so we can only detect mismatches for a page size other + * than the one we're currently using. Unfortunately, SoCs like this + * exist in the wild so, even though we don't like it, we'll have to go + * along with it and treat them as non-strict. + */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI), + + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0), + /* Linux shouldn't care about secure memory */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0), + /* + * Differing PARange is fine as long as all peripherals and memory are mapped + * within the minimum PARange of all CPUs + */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_ctr[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IDC_SHIFT, 1, 1), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1), + /* + * Linux can handle differing I-cache policies. Userspace JITs will + * make use of *minLine. + * If we have differing I-cache policies, report it as the weakest - VIPT. + */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_EL0_L1Ip_SHIFT, 2, CTR_EL0_L1Ip_VIPT), /* L1Ip */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static struct arm64_ftr_override __ro_after_init no_override = { }; + +struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { + .name = "SYS_CTR_EL0", + .ftr_bits = ftr_ctr, + .override = &no_override, +}; + +static const struct arm64_ftr_bits ftr_id_mmfr0[] = { + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_InnerShr_SHIFT, 4, 0xf), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_FCSE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_AuxReg_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_TCM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_ShareLvl_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_OuterShr_SHIFT, 4, 0xf), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_PMSA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_VMSA_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0), + /* + * We can instantiate multiple PMU instances with different levels + * of support. + */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_mvfr0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPRound_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPShVec_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSqrt_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDivide_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPTrap_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_SIMDReg_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_mvfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPDNaN_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPFtZ_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_mvfr2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_FPMisc_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_SIMDMisc_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_dczid[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_EL0_DZP_SHIFT, 1, 1), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_EL0_BS_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_gmid[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, GMID_EL1_BS_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_isar0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Divide_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Debug_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Coproc_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_CmpBranch_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitField_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitCount_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Swap_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_isar5[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SEVL_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_mmfr4[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_EVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CCIDX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_LSM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_HPDS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CnP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_XNX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_AC2_SHIFT, 4, 0), + + /* + * SpecSEI = 1 indicates that the PE might generate an SError on an + * external abort on speculative read. It is safe to assume that an + * SError might be generated than it will not be. Hence it has been + * classified as FTR_HIGHER_SAFE. + */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_EL1_SpecSEI_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_isar4[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SWP_frac_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_PSR_M_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SynchPrim_frac_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Barrier_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SMC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Writeback_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_WithShifts_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Unpriv_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_mmfr5[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_EL1_ETS_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_isar6[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_pfr0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_DIT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_CSV2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State0_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_pfr1[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GIC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virt_frac_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Sec_frac_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GenTimer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virtualization_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_MProgMod_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Security_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_ProgMod_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_pfr2[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_dfr0[] = { + /* [31:28] TraceFilt */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MProfDbg_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapTrc_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopTrc_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapDbg_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopSDbg_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopDbg_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_dfr1[] = { + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_EL1_MTPMU_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_zcr[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */ + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_smcr[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */ + ARM64_FTR_END, +}; + +/* + * Common ftr bits for a 32bit register with all hidden, strict + * attributes, with 4bit feature fields and a default safe value of + * 0. Covers the following 32bit registers: + * id_isar[1-3], id_mmfr[1-3] + */ +static const struct arm64_ftr_bits ftr_generic_32bits[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), + ARM64_FTR_END, +}; + +/* Table for a single 32bit feature value */ +static const struct arm64_ftr_bits ftr_single32[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_raz[] = { + ARM64_FTR_END, +}; + +#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \ + .sys_id = id, \ + .reg = &(struct arm64_ftr_reg){ \ + .name = id_str, \ + .override = (ovr), \ + .ftr_bits = &((table)[0]), \ + }} + +#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \ + __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr) + +#define ARM64_FTR_REG(id, table) \ + __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override) + +struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override; +struct arm64_ftr_override __ro_after_init id_aa64pfr0_override; +struct arm64_ftr_override __ro_after_init id_aa64pfr1_override; +struct arm64_ftr_override __ro_after_init id_aa64zfr0_override; +struct arm64_ftr_override __ro_after_init id_aa64smfr0_override; +struct arm64_ftr_override __ro_after_init id_aa64isar1_override; +struct arm64_ftr_override __ro_after_init id_aa64isar2_override; + +struct arm64_ftr_override arm64_sw_feature_override; + +static const struct __ftr_reg_entry { + u32 sys_id; + struct arm64_ftr_reg *reg; +} arm64_ftr_regs[] = { + + /* Op1 = 0, CRn = 0, CRm = 1 */ + ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), + ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1), + ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0), + ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), + ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), + + /* Op1 = 0, CRn = 0, CRm = 2 */ + ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0), + ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4), + ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), + ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), + ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6), + + /* Op1 = 0, CRn = 0, CRm = 3 */ + ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0), + ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1), + ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), + ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2), + ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1), + ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5), + + /* Op1 = 0, CRn = 0, CRm = 4 */ + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0, + &id_aa64pfr0_override), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1, + &id_aa64pfr1_override), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0, + &id_aa64zfr0_override), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0, + &id_aa64smfr0_override), + + /* Op1 = 0, CRn = 0, CRm = 5 */ + ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), + ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz), + + /* Op1 = 0, CRn = 0, CRm = 6 */ + ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1, + &id_aa64isar1_override), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2, + &id_aa64isar2_override), + + /* Op1 = 0, CRn = 0, CRm = 7 */ + ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1, + &id_aa64mmfr1_override), + ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), + ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3), + + /* Op1 = 0, CRn = 1, CRm = 2 */ + ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), + ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr), + + /* Op1 = 1, CRn = 0, CRm = 0 */ + ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), + + /* Op1 = 3, CRn = 0, CRm = 0 */ + { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 }, + ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), + + /* Op1 = 3, CRn = 14, CRm = 0 */ + ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32), +}; + +static int search_cmp_ftr_reg(const void *id, const void *regp) +{ + return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; +} + +/* + * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using + * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the + * ascending order of sys_id, we use binary search to find a matching + * entry. + * + * returns - Upon success, matching ftr_reg entry for id. + * - NULL on failure. It is upto the caller to decide + * the impact of a failure. + */ +static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id) +{ + const struct __ftr_reg_entry *ret; + + ret = bsearch((const void *)(unsigned long)sys_id, + arm64_ftr_regs, + ARRAY_SIZE(arm64_ftr_regs), + sizeof(arm64_ftr_regs[0]), + search_cmp_ftr_reg); + if (ret) + return ret->reg; + return NULL; +} + +/* + * get_arm64_ftr_reg - Looks up a feature register entry using + * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn(). + * + * returns - Upon success, matching ftr_reg entry for id. + * - NULL on failure but with an WARN_ON(). + */ +struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) +{ + struct arm64_ftr_reg *reg; + + reg = get_arm64_ftr_reg_nowarn(sys_id); + + /* + * Requesting a non-existent register search is an error. Warn + * and let the caller handle it. + */ + WARN_ON(!reg); + return reg; +} + +static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg, + s64 ftr_val) +{ + u64 mask = arm64_ftr_mask(ftrp); + + reg &= ~mask; + reg |= (ftr_val << ftrp->shift) & mask; + return reg; +} + +s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, + s64 cur) +{ + s64 ret = 0; + + switch (ftrp->type) { + case FTR_EXACT: + ret = ftrp->safe_val; + break; + case FTR_LOWER_SAFE: + ret = min(new, cur); + break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (!cur || !new) + break; + fallthrough; + case FTR_HIGHER_SAFE: + ret = max(new, cur); + break; + default: + BUG(); + } + + return ret; +} + +static void __init sort_ftr_regs(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) { + const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg; + const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits; + unsigned int j = 0; + + /* + * Features here must be sorted in descending order with respect + * to their shift values and should not overlap with each other. + */ + for (; ftr_bits->width != 0; ftr_bits++, j++) { + unsigned int width = ftr_reg->ftr_bits[j].width; + unsigned int shift = ftr_reg->ftr_bits[j].shift; + unsigned int prev_shift; + + WARN((shift + width) > 64, + "%s has invalid feature at shift %d\n", + ftr_reg->name, shift); + + /* + * Skip the first feature. There is nothing to + * compare against for now. + */ + if (j == 0) + continue; + + prev_shift = ftr_reg->ftr_bits[j - 1].shift; + WARN((shift + width) > prev_shift, + "%s has feature overlap at shift %d\n", + ftr_reg->name, shift); + } + + /* + * Skip the first register. There is nothing to + * compare against for now. + */ + if (i == 0) + continue; + /* + * Registers here must be sorted in ascending order with respect + * to sys_id for subsequent binary search in get_arm64_ftr_reg() + * to work correctly. + */ + BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id); + } +} + +/* + * Initialise the CPU feature register from Boot CPU values. + * Also initiliases the strict_mask for the register. + * Any bits that are not covered by an arm64_ftr_bits entry are considered + * RES0 for the system-wide value, and must strictly match. + */ +static void init_cpu_ftr_reg(u32 sys_reg, u64 new) +{ + u64 val = 0; + u64 strict_mask = ~0x0ULL; + u64 user_mask = 0; + u64 valid_mask = 0; + + const struct arm64_ftr_bits *ftrp; + struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); + + if (!reg) + return; + + for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { + u64 ftr_mask = arm64_ftr_mask(ftrp); + s64 ftr_new = arm64_ftr_value(ftrp, new); + s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val); + + if ((ftr_mask & reg->override->mask) == ftr_mask) { + s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new); + char *str = NULL; + + if (ftr_ovr != tmp) { + /* Unsafe, remove the override */ + reg->override->mask &= ~ftr_mask; + reg->override->val &= ~ftr_mask; + tmp = ftr_ovr; + str = "ignoring override"; + } else if (ftr_new != tmp) { + /* Override was valid */ + ftr_new = tmp; + str = "forced"; + } else if (ftr_ovr == tmp) { + /* Override was the safe value */ + str = "already set"; + } + + if (str) + pr_warn("%s[%d:%d]: %s to %llx\n", + reg->name, + ftrp->shift + ftrp->width - 1, + ftrp->shift, str, tmp); + } else if ((ftr_mask & reg->override->val) == ftr_mask) { + reg->override->val &= ~ftr_mask; + pr_warn("%s[%d:%d]: impossible override, ignored\n", + reg->name, + ftrp->shift + ftrp->width - 1, + ftrp->shift); + } + + val = arm64_ftr_set_value(ftrp, val, ftr_new); + + valid_mask |= ftr_mask; + if (!ftrp->strict) + strict_mask &= ~ftr_mask; + if (ftrp->visible) + user_mask |= ftr_mask; + else + reg->user_val = arm64_ftr_set_value(ftrp, + reg->user_val, + ftrp->safe_val); + } + + val &= valid_mask; + + reg->sys_val = val; + reg->strict_mask = strict_mask; + reg->user_mask = user_mask; +} + +extern const struct arm64_cpu_capabilities arm64_errata[]; +static const struct arm64_cpu_capabilities arm64_features[]; + +static void __init +init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) +{ + for (; caps->matches; caps++) { + if (WARN(caps->capability >= ARM64_NCAPS, + "Invalid capability %d\n", caps->capability)) + continue; + if (WARN(cpucap_ptrs[caps->capability], + "Duplicate entry for capability %d\n", + caps->capability)) + continue; + cpucap_ptrs[caps->capability] = caps; + } +} + +static void __init init_cpucap_indirect_list(void) +{ + init_cpucap_indirect_list_from_array(arm64_features); + init_cpucap_indirect_list_from_array(arm64_errata); +} + +static void __init setup_boot_cpu_capabilities(void); + +static void init_32bit_cpu_features(struct cpuinfo_32bit *info) +{ + init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); + init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); + init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); + init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); + init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); + init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); + init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); + init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); + init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); + init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); + init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); + init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); + init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); + init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); + init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); + init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); + init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); + init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); + init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); + init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); + init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); +} + +void __init init_cpu_features(struct cpuinfo_arm64 *info) +{ + /* Before we start using the tables, make sure it is sorted */ + sort_ftr_regs(); + + init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); + init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); + init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); + init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); + init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); + init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); + init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); + init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); + init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); + init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); + init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); + init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3); + init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); + init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); + init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); + init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0); + + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) + init_32bit_cpu_features(&info->aarch32); + + if (IS_ENABLED(CONFIG_ARM64_SVE) && + id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { + info->reg_zcr = read_zcr_features(); + init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); + vec_init_vq_map(ARM64_VEC_SVE); + } + + if (IS_ENABLED(CONFIG_ARM64_SME) && + id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) { + info->reg_smcr = read_smcr_features(); + /* + * We mask out SMPS since even if the hardware + * supports priorities the kernel does not at present + * and we block access to them. + */ + info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS; + init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr); + vec_init_vq_map(ARM64_VEC_SME); + } + + if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) + init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); + + /* + * Initialize the indirect array of CPU capabilities pointers before we + * handle the boot CPU below. + */ + init_cpucap_indirect_list(); + + /* + * Detect and enable early CPU capabilities based on the boot CPU, + * after we have initialised the CPU feature infrastructure. + */ + setup_boot_cpu_capabilities(); +} + +static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) +{ + const struct arm64_ftr_bits *ftrp; + + for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { + s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); + s64 ftr_new = arm64_ftr_value(ftrp, new); + + if (ftr_cur == ftr_new) + continue; + /* Find a safe value */ + ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur); + reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); + } + +} + +static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) +{ + struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); + + if (!regp) + return 0; + + update_cpu_ftr_reg(regp, val); + if ((boot & regp->strict_mask) == (val & regp->strict_mask)) + return 0; + pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", + regp->name, boot, cpu, val); + return 1; +} + +static void relax_cpu_ftr_reg(u32 sys_id, int field) +{ + const struct arm64_ftr_bits *ftrp; + struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); + + if (!regp) + return; + + for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) { + if (ftrp->shift == field) { + regp->strict_mask &= ~arm64_ftr_mask(ftrp); + break; + } + } + + /* Bogus field? */ + WARN_ON(!ftrp->width); +} + +static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info, + struct cpuinfo_arm64 *boot) +{ + static bool boot_cpu_32bit_regs_overridden = false; + + if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden) + return; + + if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0)) + return; + + boot->aarch32 = info->aarch32; + init_32bit_cpu_features(&boot->aarch32); + boot_cpu_32bit_regs_overridden = true; +} + +static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info, + struct cpuinfo_32bit *boot) +{ + int taint = 0; + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + /* + * If we don't have AArch32 at EL1, then relax the strictness of + * EL1-dependent register fields to avoid spurious sanity check fails. + */ + if (!id_aa64pfr0_32bit_el1(pfr0)) { + relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_EL1_SMC_SHIFT); + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virt_frac_SHIFT); + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Sec_frac_SHIFT); + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virtualization_SHIFT); + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Security_SHIFT); + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_ProgMod_SHIFT); + } + + taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, + info->reg_id_dfr0, boot->reg_id_dfr0); + taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu, + info->reg_id_dfr1, boot->reg_id_dfr1); + taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, + info->reg_id_isar0, boot->reg_id_isar0); + taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, + info->reg_id_isar1, boot->reg_id_isar1); + taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, + info->reg_id_isar2, boot->reg_id_isar2); + taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, + info->reg_id_isar3, boot->reg_id_isar3); + taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, + info->reg_id_isar4, boot->reg_id_isar4); + taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, + info->reg_id_isar5, boot->reg_id_isar5); + taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu, + info->reg_id_isar6, boot->reg_id_isar6); + + /* + * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and + * ACTLR formats could differ across CPUs and therefore would have to + * be trapped for virtualization anyway. + */ + taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, + info->reg_id_mmfr0, boot->reg_id_mmfr0); + taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, + info->reg_id_mmfr1, boot->reg_id_mmfr1); + taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, + info->reg_id_mmfr2, boot->reg_id_mmfr2); + taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, + info->reg_id_mmfr3, boot->reg_id_mmfr3); + taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu, + info->reg_id_mmfr4, boot->reg_id_mmfr4); + taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu, + info->reg_id_mmfr5, boot->reg_id_mmfr5); + taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, + info->reg_id_pfr0, boot->reg_id_pfr0); + taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, + info->reg_id_pfr1, boot->reg_id_pfr1); + taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu, + info->reg_id_pfr2, boot->reg_id_pfr2); + taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, + info->reg_mvfr0, boot->reg_mvfr0); + taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, + info->reg_mvfr1, boot->reg_mvfr1); + taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, + info->reg_mvfr2, boot->reg_mvfr2); + + return taint; +} + +/* + * Update system wide CPU feature registers with the values from a + * non-boot CPU. Also performs SANITY checks to make sure that there + * aren't any insane variations from that of the boot CPU. + */ +void update_cpu_features(int cpu, + struct cpuinfo_arm64 *info, + struct cpuinfo_arm64 *boot) +{ + int taint = 0; + + /* + * The kernel can handle differing I-cache policies, but otherwise + * caches should look identical. Userspace JITs will make use of + * *minLine. + */ + taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, + info->reg_ctr, boot->reg_ctr); + + /* + * Userspace may perform DC ZVA instructions. Mismatched block sizes + * could result in too much or too little memory being zeroed if a + * process is preempted and migrated between CPUs. + */ + taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, + info->reg_dczid, boot->reg_dczid); + + /* If different, timekeeping will be broken (especially with KVM) */ + taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, + info->reg_cntfrq, boot->reg_cntfrq); + + /* + * The kernel uses self-hosted debug features and expects CPUs to + * support identical debug features. We presently need CTX_CMPs, WRPs, + * and BRPs to be identical. + * ID_AA64DFR1 is currently RES0. + */ + taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, + info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, + info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); + /* + * Even in big.LITTLE, processors should be identical instruction-set + * wise. + */ + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, + info->reg_id_aa64isar0, boot->reg_id_aa64isar0); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, + info->reg_id_aa64isar1, boot->reg_id_aa64isar1); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, + info->reg_id_aa64isar2, boot->reg_id_aa64isar2); + + /* + * Differing PARange support is fine as long as all peripherals and + * memory are mapped within the minimum PARange of all CPUs. + * Linux should not care about secure memory. + */ + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, + info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, + info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, + info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR3_EL1, cpu, + info->reg_id_aa64mmfr3, boot->reg_id_aa64mmfr3); + + taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, + info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, + info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); + + taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, + info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); + + taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu, + info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0); + + if (IS_ENABLED(CONFIG_ARM64_SVE) && + id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { + info->reg_zcr = read_zcr_features(); + taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu, + info->reg_zcr, boot->reg_zcr); + + /* Probe vector lengths */ + if (!system_capabilities_finalized()) + vec_update_vq_map(ARM64_VEC_SVE); + } + + if (IS_ENABLED(CONFIG_ARM64_SME) && + id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) { + info->reg_smcr = read_smcr_features(); + /* + * We mask out SMPS since even if the hardware + * supports priorities the kernel does not at present + * and we block access to them. + */ + info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS; + taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu, + info->reg_smcr, boot->reg_smcr); + + /* Probe vector lengths */ + if (!system_capabilities_finalized()) + vec_update_vq_map(ARM64_VEC_SME); + } + + /* + * The kernel uses the LDGM/STGM instructions and the number of tags + * they read/write depends on the GMID_EL1.BS field. Check that the + * value is the same on all CPUs. + */ + if (IS_ENABLED(CONFIG_ARM64_MTE) && + id_aa64pfr1_mte(info->reg_id_aa64pfr1)) { + taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu, + info->reg_gmid, boot->reg_gmid); + } + + /* + * If we don't have AArch32 at all then skip the checks entirely + * as the register values may be UNKNOWN and we're not going to be + * using them for anything. + * + * This relies on a sanitised view of the AArch64 ID registers + * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last. + */ + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { + lazy_init_32bit_cpu_features(info, boot); + taint |= update_32bit_cpu_features(cpu, &info->aarch32, + &boot->aarch32); + } + + /* + * Mismatched CPU features are a recipe for disaster. Don't even + * pretend to support them. + */ + if (taint) { + pr_warn_once("Unsupported CPU feature variation detected.\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + } +} + +u64 read_sanitised_ftr_reg(u32 id) +{ + struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id); + + if (!regp) + return 0; + return regp->sys_val; +} +EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg); + +#define read_sysreg_case(r) \ + case r: val = read_sysreg_s(r); break; + +/* + * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated. + * Read the system register on the current CPU + */ +u64 __read_sysreg_by_encoding(u32 sys_id) +{ + struct arm64_ftr_reg *regp; + u64 val; + + switch (sys_id) { + read_sysreg_case(SYS_ID_PFR0_EL1); + read_sysreg_case(SYS_ID_PFR1_EL1); + read_sysreg_case(SYS_ID_PFR2_EL1); + read_sysreg_case(SYS_ID_DFR0_EL1); + read_sysreg_case(SYS_ID_DFR1_EL1); + read_sysreg_case(SYS_ID_MMFR0_EL1); + read_sysreg_case(SYS_ID_MMFR1_EL1); + read_sysreg_case(SYS_ID_MMFR2_EL1); + read_sysreg_case(SYS_ID_MMFR3_EL1); + read_sysreg_case(SYS_ID_MMFR4_EL1); + read_sysreg_case(SYS_ID_MMFR5_EL1); + read_sysreg_case(SYS_ID_ISAR0_EL1); + read_sysreg_case(SYS_ID_ISAR1_EL1); + read_sysreg_case(SYS_ID_ISAR2_EL1); + read_sysreg_case(SYS_ID_ISAR3_EL1); + read_sysreg_case(SYS_ID_ISAR4_EL1); + read_sysreg_case(SYS_ID_ISAR5_EL1); + read_sysreg_case(SYS_ID_ISAR6_EL1); + read_sysreg_case(SYS_MVFR0_EL1); + read_sysreg_case(SYS_MVFR1_EL1); + read_sysreg_case(SYS_MVFR2_EL1); + + read_sysreg_case(SYS_ID_AA64PFR0_EL1); + read_sysreg_case(SYS_ID_AA64PFR1_EL1); + read_sysreg_case(SYS_ID_AA64ZFR0_EL1); + read_sysreg_case(SYS_ID_AA64SMFR0_EL1); + read_sysreg_case(SYS_ID_AA64DFR0_EL1); + read_sysreg_case(SYS_ID_AA64DFR1_EL1); + read_sysreg_case(SYS_ID_AA64MMFR0_EL1); + read_sysreg_case(SYS_ID_AA64MMFR1_EL1); + read_sysreg_case(SYS_ID_AA64MMFR2_EL1); + read_sysreg_case(SYS_ID_AA64MMFR3_EL1); + read_sysreg_case(SYS_ID_AA64ISAR0_EL1); + read_sysreg_case(SYS_ID_AA64ISAR1_EL1); + read_sysreg_case(SYS_ID_AA64ISAR2_EL1); + + read_sysreg_case(SYS_CNTFRQ_EL0); + read_sysreg_case(SYS_CTR_EL0); + read_sysreg_case(SYS_DCZID_EL0); + + default: + BUG(); + return 0; + } + + regp = get_arm64_ftr_reg(sys_id); + if (regp) { + val &= ~regp->override->mask; + val |= (regp->override->val & regp->override->mask); + } + + return val; +} + +#include + +static bool +has_always(const struct arm64_cpu_capabilities *entry, int scope) +{ + return true; +} + +static bool +feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) +{ + int val = cpuid_feature_extract_field_width(reg, entry->field_pos, + entry->field_width, + entry->sign); + + return val >= entry->min_field_value; +} + +static u64 +read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope) +{ + WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); + if (scope == SCOPE_SYSTEM) + return read_sanitised_ftr_reg(entry->sys_reg); + else + return __read_sysreg_by_encoding(entry->sys_reg); +} + +static bool +has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +{ + int mask; + struct arm64_ftr_reg *regp; + u64 val = read_scoped_sysreg(entry, scope); + + regp = get_arm64_ftr_reg(entry->sys_reg); + if (!regp) + return false; + + mask = cpuid_feature_extract_unsigned_field_width(regp->user_mask, + entry->field_pos, + entry->field_width); + if (!mask) + return false; + + return feature_matches(val, entry); +} + +static bool +has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +{ + u64 val = read_scoped_sysreg(entry, scope); + return feature_matches(val, entry); +} + +const struct cpumask *system_32bit_el0_cpumask(void) +{ + if (!system_supports_32bit_el0()) + return cpu_none_mask; + + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) + return cpu_32bit_el0_mask; + + return cpu_possible_mask; +} + +static int __init parse_32bit_el0_param(char *str) +{ + allow_mismatched_32bit_el0 = true; + return 0; +} +early_param("allow_mismatched_32bit_el0", parse_32bit_el0_param); + +static ssize_t aarch32_el0_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct cpumask *mask = system_32bit_el0_cpumask(); + + return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(mask)); +} +static const DEVICE_ATTR_RO(aarch32_el0); + +static int __init aarch32_el0_sysfs_init(void) +{ + struct device *dev_root; + int ret = 0; + + if (!allow_mismatched_32bit_el0) + return 0; + + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + ret = device_create_file(dev_root, &dev_attr_aarch32_el0); + put_device(dev_root); + } + return ret; +} +device_initcall(aarch32_el0_sysfs_init); + +static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope) +{ + if (!has_cpuid_feature(entry, scope)) + return allow_mismatched_32bit_el0; + + if (scope == SCOPE_SYSTEM) + pr_info("detected: 32-bit EL0 Support\n"); + + return true; +} + +static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope) +{ + bool has_sre; + + if (!has_cpuid_feature(entry, scope)) + return false; + + has_sre = gic_enable_sre(); + if (!has_sre) + pr_warn_once("%s present but disabled by higher exception level\n", + entry->desc); + + return has_sre; +} + +static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused) +{ + u32 midr = read_cpuid_id(); + + /* Cavium ThunderX pass 1.x and 2.x */ + return midr_is_cpu_model_range(midr, MIDR_THUNDERX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); +} + +static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused) +{ + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + return cpuid_feature_extract_signed_field(pfr0, + ID_AA64PFR0_EL1_FP_SHIFT) < 0; +} + +static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u64 ctr; + + if (scope == SCOPE_SYSTEM) + ctr = arm64_ftr_reg_ctrel0.sys_val; + else + ctr = read_cpuid_effective_cachetype(); + + return ctr & BIT(CTR_EL0_IDC_SHIFT); +} + +static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused) +{ + /* + * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively + * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses + * to the CTR_EL0 on this CPU and emulate it with the real/safe + * value. + */ + if (!(read_cpuid_cachetype() & BIT(CTR_EL0_IDC_SHIFT))) + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); +} + +static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u64 ctr; + + if (scope == SCOPE_SYSTEM) + ctr = arm64_ftr_reg_ctrel0.sys_val; + else + ctr = read_cpuid_cachetype(); + + return ctr & BIT(CTR_EL0_DIC_SHIFT); +} + +static bool __maybe_unused +has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) +{ + /* + * Kdump isn't guaranteed to power-off all secondary CPUs, CNP + * may share TLB entries with a CPU stuck in the crashed + * kernel. + */ + if (is_kdump_kernel()) + return false; + + if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP)) + return false; + + return has_cpuid_feature(entry, scope); +} + +/* + * This check is triggered during the early boot before the cpufeature + * is initialised. Checking the status on the local CPU allows the boot + * CPU to detect the need for non-global mappings and thus avoiding a + * pagetable re-write after all the CPUs are booted. This check will be + * anyway run on individual CPUs, allowing us to get the consistent + * state once the SMP CPUs are up and thus make the switch to non-global + * mappings if required. + */ +bool kaslr_requires_kpti(void) +{ + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + /* + * E0PD does a similar job to KPTI so can be used instead + * where available. + */ + if (IS_ENABLED(CONFIG_ARM64_E0PD)) { + u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); + if (cpuid_feature_extract_unsigned_field(mmfr2, + ID_AA64MMFR2_EL1_E0PD_SHIFT)) + return false; + } + + /* + * Systems affected by Cavium erratum 24756 are incompatible + * with KPTI. + */ + if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { + extern const struct midr_range cavium_erratum_27456_cpus[]; + + if (is_midr_in_range_list(read_cpuid_id(), + cavium_erratum_27456_cpus)) + return false; + } + + return kaslr_enabled(); +} + +static bool __meltdown_safe = true; +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ + +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, + int scope) +{ + /* List of CPUs that are not vulnerable and don't need KPTI */ + static const struct midr_range kpti_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), + { /* sentinel */ } + }; + char const *str = "kpti command line option"; + bool meltdown_safe; + + meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); + + /* Defer to CPU feature registers */ + if (has_cpuid_feature(entry, scope)) + meltdown_safe = true; + + if (!meltdown_safe) + __meltdown_safe = false; + + /* + * For reasons that aren't entirely clear, enabling KPTI on Cavium + * ThunderX leads to apparent I-cache corruption of kernel text, which + * ends as well as you might imagine. Don't even try. We cannot rely + * on the cpus_have_*cap() helpers here to detect the CPU erratum + * because cpucap detection order may change. However, since we know + * affected CPUs are always in a homogeneous configuration, it is + * safe to rely on this_cpu_has_cap() here. + */ + if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) { + str = "ARM64_WORKAROUND_CAVIUM_27456"; + __kpti_forced = -1; + } + + /* Useful for KASLR robustness */ + if (kaslr_requires_kpti()) { + if (!__kpti_forced) { + str = "KASLR"; + __kpti_forced = 1; + } + } + + if (cpu_mitigations_off() && !__kpti_forced) { + str = "mitigations=off"; + __kpti_forced = -1; + } + + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { + pr_info_once("kernel page table isolation disabled by kernel configuration\n"); + return false; + } + + /* Forced? */ + if (__kpti_forced) { + pr_info_once("kernel page table isolation forced %s by %s\n", + __kpti_forced > 0 ? "ON" : "OFF", str); + return __kpti_forced > 0; + } + + return !meltdown_safe; +} + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT)) + +extern +void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(int), int flags); + +static phys_addr_t kpti_ng_temp_alloc; + +static phys_addr_t kpti_ng_pgd_alloc(int shift) +{ + kpti_ng_temp_alloc -= PAGE_SIZE; + return kpti_ng_temp_alloc; +} + +static void +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) +{ + typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long); + extern kpti_remap_fn idmap_kpti_install_ng_mappings; + kpti_remap_fn *remap_fn; + + int cpu = smp_processor_id(); + int levels = CONFIG_PGTABLE_LEVELS; + int order = order_base_2(levels); + u64 kpti_ng_temp_pgd_pa = 0; + pgd_t *kpti_ng_temp_pgd; + u64 alloc = 0; + + if (__this_cpu_read(this_cpu_vector) == vectors) { + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); + + __this_cpu_write(this_cpu_vector, v); + } + + /* + * We don't need to rewrite the page-tables if either we've done + * it already or we have KASLR enabled and therefore have not + * created any global mappings at all. + */ + if (arm64_use_ng_mappings) + return; + + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); + + if (!cpu) { + alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); + kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE); + kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd); + + // + // Create a minimal page table hierarchy that permits us to map + // the swapper page tables temporarily as we traverse them. + // + // The physical pages are laid out as follows: + // + // +--------+-/-------+-/------ +-\\--------+ + // : PTE[] : | PMD[] : | PUD[] : || PGD[] : + // +--------+-\-------+-\------ +-//--------+ + // ^ + // The first page is mapped into this hierarchy at a PMD_SHIFT + // aligned virtual address, so that we can manipulate the PTE + // level entries while the mapping is active. The first entry + // covers the PTE[] page itself, the remaining entries are free + // to be used as a ad-hoc fixmap. + // + create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc), + KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL, + kpti_ng_pgd_alloc, 0); + } + + cpu_install_idmap(); + remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA); + cpu_uninstall_idmap(); + + if (!cpu) { + free_pages(alloc, order); + arm64_use_ng_mappings = true; + } +} +#else +static void +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) +{ +} +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + +static int __init parse_kpti(char *str) +{ + bool enabled; + int ret = kstrtobool(str, &enabled); + + if (ret) + return ret; + + __kpti_forced = enabled ? 1 : -1; + return 0; +} +early_param("kpti", parse_kpti); + +#ifdef CONFIG_ARM64_HW_AFDBM +static inline void __cpu_enable_hw_dbm(void) +{ + u64 tcr = read_sysreg(tcr_el1) | TCR_HD; + + write_sysreg(tcr, tcr_el1); + isb(); + local_flush_tlb_all(); +} + +static bool cpu_has_broken_dbm(void) +{ + /* List of CPUs which have broken DBM support. */ + static const struct midr_range cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_1024718 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + /* Kryo4xx Silver (rdpe => r1p0) */ + MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2051678 + MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), +#endif + {}, + }; + + return is_midr_in_range_list(read_cpuid_id(), cpus); +} + +static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) +{ + return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) && + !cpu_has_broken_dbm(); +} + +static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap) +{ + if (cpu_can_use_dbm(cap)) + __cpu_enable_hw_dbm(); +} + +static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, + int __unused) +{ + static bool detected = false; + /* + * DBM is a non-conflicting feature. i.e, the kernel can safely + * run a mix of CPUs with and without the feature. So, we + * unconditionally enable the capability to allow any late CPU + * to use the feature. We only enable the control bits on the + * CPU, if it actually supports. + * + * We have to make sure we print the "feature" detection only + * when at least one CPU actually uses it. So check if this CPU + * can actually use it and print the message exactly once. + * + * This is safe as all CPUs (including secondary CPUs - due to the + * LOCAL_CPU scope - and the hotplugged CPUs - via verification) + * goes through the "matches" check exactly once. Also if a CPU + * matches the criteria, it is guaranteed that the CPU will turn + * the DBM on, as the capability is unconditionally enabled. + */ + if (!detected && cpu_can_use_dbm(cap)) { + detected = true; + pr_info("detected: Hardware dirty bit management\n"); + } + + return true; +} + +#endif + +#ifdef CONFIG_ARM64_AMU_EXTN + +/* + * The "amu_cpus" cpumask only signals that the CPU implementation for the + * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide + * information regarding all the events that it supports. When a CPU bit is + * set in the cpumask, the user of this feature can only rely on the presence + * of the 4 fixed counters for that CPU. But this does not guarantee that the + * counters are enabled or access to these counters is enabled by code + * executed at higher exception levels (firmware). + */ +static struct cpumask amu_cpus __read_mostly; + +bool cpu_has_amu_feat(int cpu) +{ + return cpumask_test_cpu(cpu, &amu_cpus); +} + +int get_cpu_with_amu_feat(void) +{ + return cpumask_any(&amu_cpus); +} + +static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) +{ + if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { + pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", + smp_processor_id()); + cpumask_set_cpu(smp_processor_id(), &amu_cpus); + + /* 0 reference values signal broken/disabled counters */ + if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) + update_freq_counters_refs(); + } +} + +static bool has_amu(const struct arm64_cpu_capabilities *cap, + int __unused) +{ + /* + * The AMU extension is a non-conflicting feature: the kernel can + * safely run a mix of CPUs with and without support for the + * activity monitors extension. Therefore, unconditionally enable + * the capability to allow any late CPU to use the feature. + * + * With this feature unconditionally enabled, the cpu_enable + * function will be called for all CPUs that match the criteria, + * including secondary and hotplugged, marking this feature as + * present on that respective CPU. The enable function will also + * print a detection message. + */ + + return true; +} +#else +int get_cpu_with_amu_feat(void) +{ + return nr_cpu_ids; +} +#endif + +static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) +{ + return is_kernel_in_hyp_mode(); +} + +static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) +{ + /* + * Copy register values that aren't redirected by hardware. + * + * Before code patching, we only set tpidr_el1, all CPUs need to copy + * this value to tpidr_el2 before we patch the code. Once we've done + * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to + * do anything here. + */ + if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) + write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); +} + +static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap, + int scope) +{ + if (kvm_get_mode() != KVM_MODE_NV) + return false; + + if (!has_cpuid_feature(cap, scope)) { + pr_warn("unavailable: %s\n", cap->desc); + return false; + } + + return true; +} + +static bool hvhe_possible(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + u64 val; + + val = read_sysreg(id_aa64mmfr1_el1); + if (!cpuid_feature_extract_unsigned_field(val, ID_AA64MMFR1_EL1_VH_SHIFT)) + return false; + + val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask; + return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE); +} + +#ifdef CONFIG_ARM64_PAN +static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) +{ + /* + * We modify PSTATE. This won't work from irq context as the PSTATE + * is discarded once we return from the exception. + */ + WARN_ON_ONCE(in_interrupt()); + + sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); + set_pstate_pan(1); +} +#endif /* CONFIG_ARM64_PAN */ + +#ifdef CONFIG_ARM64_RAS_EXTN +static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) +{ + /* Firmware may have left a deferred SError in this register. */ + write_sysreg_s(0, SYS_DISR_EL1); +} +#endif /* CONFIG_ARM64_RAS_EXTN */ + +#ifdef CONFIG_ARM64_PTR_AUTH +static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope) +{ + int boot_val, sec_val; + + /* We don't expect to be called with SCOPE_SYSTEM */ + WARN_ON(scope == SCOPE_SYSTEM); + /* + * The ptr-auth feature levels are not intercompatible with lower + * levels. Hence we must match ptr-auth feature level of the secondary + * CPUs with that of the boot CPU. The level of boot cpu is fetched + * from the sanitised register whereas direct register read is done for + * the secondary CPUs. + * The sanitised feature state is guaranteed to match that of the + * boot CPU as a mismatched secondary CPU is parked before it gets + * a chance to update the state, with the capability. + */ + boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg), + entry->field_pos, entry->sign); + if (scope & SCOPE_BOOT_CPU) + return boot_val >= entry->min_field_value; + /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */ + sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), + entry->field_pos, entry->sign); + return (sec_val >= entry->min_field_value) && (sec_val == boot_val); +} + +static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry, + int scope) +{ + bool api = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope); + bool apa = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope); + bool apa3 = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope); + + return apa || apa3 || api; +} + +static bool has_generic_auth(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + bool gpi = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF); + bool gpa = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5); + bool gpa3 = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3); + + return gpa || gpa3 || gpi; +} +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#ifdef CONFIG_ARM64_E0PD +static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) +{ + if (this_cpu_has_cap(ARM64_HAS_E0PD)) + sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); +} +#endif /* CONFIG_ARM64_E0PD */ + +#ifdef CONFIG_ARM64_PSEUDO_NMI +static bool enable_pseudo_nmi; + +static int __init early_enable_pseudo_nmi(char *p) +{ + return kstrtobool(p, &enable_pseudo_nmi); +} +early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); + +static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, + int scope) +{ + /* + * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GIC_CPUIF_SYSREGS); + if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS)) + return false; + + return enable_pseudo_nmi; +} + +static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry, + int scope) +{ + /* + * If we're not using priority masking then we won't be poking PMR_EL1, + * and there's no need to relax synchronization of writes to it, and + * ICC_CTLR_EL1 might not be accessible and we must avoid reads from + * that. + * + * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING); + if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING)) + return false; + + /* + * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a + * hint for interrupt distribution, a DSB is not necessary when + * unmasking IRQs via PMR, and we can relax the barrier to a NOP. + * + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0; +} +#endif + +#ifdef CONFIG_ARM64_BTI +static void bti_enable(const struct arm64_cpu_capabilities *__unused) +{ + /* + * Use of X16/X17 for tail-calls and trampolines that jump to + * function entry points using BR is a requirement for + * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI. + * So, be strict and forbid other BRs using other registers to + * jump onto a PACIxSP instruction: + */ + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1); + isb(); +} +#endif /* CONFIG_ARM64_BTI */ + +#ifdef CONFIG_ARM64_MTE +static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) +{ + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0); + + mte_cpu_setup(); + + /* + * Clear the tags in the zero page. This needs to be done via the + * linear map which has the Tagged attribute. + */ + if (try_page_mte_tagging(ZERO_PAGE(0))) { + mte_clear_page_tags(lm_alias(empty_zero_page)); + set_page_mte_tagged(ZERO_PAGE(0)); + } + + kasan_init_hw_tags_cpu(); +} +#endif /* CONFIG_ARM64_MTE */ + +static void elf_hwcap_fixup(void) +{ +#ifdef CONFIG_ARM64_ERRATUM_1742098 + if (cpus_have_const_cap(ARM64_WORKAROUND_1742098)) + compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; +#endif /* ARM64_ERRATUM_1742098 */ +} + +#ifdef CONFIG_KVM +static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused) +{ + return kvm_get_mode() == KVM_MODE_PROTECTED; +} +#endif /* CONFIG_KVM */ + +static void cpu_trap_el0_impdef(const struct arm64_cpu_capabilities *__unused) +{ + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_TIDCP); +} + +static void cpu_enable_dit(const struct arm64_cpu_capabilities *__unused) +{ + set_pstate_dit(1); +} + +static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused) +{ + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn); +} + +/* Internal helper functions to match cpu capability type */ +static bool +cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); +} + +static bool +cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); +} + +static bool +cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); +} + +static const struct arm64_cpu_capabilities arm64_features[] = { + { + .capability = ARM64_ALWAYS_BOOT, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_always, + }, + { + .capability = ARM64_ALWAYS_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_always, + }, + { + .desc = "GIC system register CPU interface", + .capability = ARM64_HAS_GIC_CPUIF_SYSREGS, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_useable_gicv3_cpuif, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, GIC, IMP) + }, + { + .desc = "Enhanced Counter Virtualization", + .capability = ARM64_HAS_ECV, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, IMP) + }, + { + .desc = "Enhanced Counter Virtualization (CNTPOFF)", + .capability = ARM64_HAS_ECV_CNTPOFF, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF) + }, +#ifdef CONFIG_ARM64_PAN + { + .desc = "Privileged Access Never", + .capability = ARM64_HAS_PAN, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_pan, + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, IMP) + }, +#endif /* CONFIG_ARM64_PAN */ +#ifdef CONFIG_ARM64_EPAN + { + .desc = "Enhanced Privileged Access Never", + .capability = ARM64_HAS_EPAN, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, PAN3) + }, +#endif /* CONFIG_ARM64_EPAN */ +#ifdef CONFIG_ARM64_LSE_ATOMICS + { + .desc = "LSE atomic instructions", + .capability = ARM64_HAS_LSE_ATOMICS, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP) + }, +#endif /* CONFIG_ARM64_LSE_ATOMICS */ + { + .desc = "Software prefetching using PRFM", + .capability = ARM64_HAS_NO_HW_PREFETCH, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .matches = has_no_hw_prefetch, + }, + { + .desc = "Virtualization Host Extensions", + .capability = ARM64_HAS_VIRT_HOST_EXTN, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = runs_at_el2, + .cpu_enable = cpu_copy_el2regs, + }, + { + .desc = "Nested Virtualization Support", + .capability = ARM64_HAS_NESTED_VIRT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_nested_virt_support, + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, IMP) + }, + { + .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_32bit_el0, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL0, AARCH32) + }, +#ifdef CONFIG_KVM + { + .desc = "32-bit EL1 Support", + .capability = ARM64_HAS_32BIT_EL1, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL1, AARCH32) + }, + { + .desc = "Protected KVM", + .capability = ARM64_KVM_PROTECTED_MODE, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = is_kvm_protected_mode, + }, + { + .desc = "HCRX_EL2 register", + .capability = ARM64_HAS_HCX, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HCX, IMP) + }, +#endif + { + .desc = "Kernel page table isolation (KPTI)", + .capability = ARM64_UNMAP_KERNEL_AT_EL0, + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, + .cpu_enable = kpti_install_ng_mappings, + .matches = unmap_kernel_at_el0, + /* + * The ID feature fields below are used to indicate that + * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for + * more details. + */ + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP) + }, + { + /* FP/SIMD is not implemented */ + .capability = ARM64_HAS_NO_FPSIMD, + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, + .min_field_value = 0, + .matches = has_no_fpsimd, + }, +#ifdef CONFIG_ARM64_PMEM + { + .desc = "Data cache clean to Point of Persistence", + .capability = ARM64_HAS_DCPOP, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, IMP) + }, + { + .desc = "Data cache clean to Point of Deep Persistence", + .capability = ARM64_HAS_DCPODP, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, DPB2) + }, +#endif +#ifdef CONFIG_ARM64_SVE + { + .desc = "Scalable Vector Extension", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SVE, + .cpu_enable = sve_kernel_enable, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP) + }, +#endif /* CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_RAS_EXTN + { + .desc = "RAS Extension Support", + .capability = ARM64_HAS_RAS_EXTN, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_clear_disr, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP) + }, +#endif /* CONFIG_ARM64_RAS_EXTN */ +#ifdef CONFIG_ARM64_AMU_EXTN + { + /* + * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y. + * Therefore, don't provide .desc as we don't want the detection + * message to be shown until at least one CPU is detected to + * support the feature. + */ + .capability = ARM64_HAS_AMU_EXTN, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .matches = has_amu, + .cpu_enable = cpu_amu_enable, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, AMU, IMP) + }, +#endif /* CONFIG_ARM64_AMU_EXTN */ + { + .desc = "Data cache clean to the PoU not required for I/D coherence", + .capability = ARM64_HAS_CACHE_IDC, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cache_idc, + .cpu_enable = cpu_emulate_effective_ctr, + }, + { + .desc = "Instruction cache invalidation not required for I/D coherence", + .capability = ARM64_HAS_CACHE_DIC, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cache_dic, + }, + { + .desc = "Stage-2 Force Write-Back", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_HAS_STAGE2_FWB, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, FWB, IMP) + }, + { + .desc = "ARMv8.4 Translation Table Level", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_HAS_ARMv8_4_TTL, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, TTL, IMP) + }, + { + .desc = "TLB range maintenance instructions", + .capability = ARM64_HAS_TLB_RANGE, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, TLB, RANGE) + }, +#ifdef CONFIG_ARM64_HW_AFDBM + { + /* + * Since we turn this on always, we don't want the user to + * think that the feature is available when it may not be. + * So hide the description. + * + * .desc = "Hardware pagetable Dirty Bit Management", + * + */ + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .capability = ARM64_HW_DBM, + .matches = has_hw_dbm, + .cpu_enable = cpu_enable_hw_dbm, + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, DBM) + }, +#endif + { + .desc = "CRC32 instructions", + .capability = ARM64_HAS_CRC32, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, CRC32, IMP) + }, + { + .desc = "Speculative Store Bypassing Safe (SSBS)", + .capability = ARM64_SSBS, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SSBS, IMP) + }, +#ifdef CONFIG_ARM64_CNP + { + .desc = "Common not Private translations", + .capability = ARM64_HAS_CNP, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_useable_cnp, + .cpu_enable = cpu_enable_cnp, + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, CnP, IMP) + }, +#endif + { + .desc = "Speculation barrier (SB)", + .capability = ARM64_HAS_SB, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, SB, IMP) + }, +#ifdef CONFIG_ARM64_PTR_AUTH + { + .desc = "Address authentication (architected QARMA5 algorithm)", + .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_address_auth_cpucap, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth) + }, + { + .desc = "Address authentication (architected QARMA3 algorithm)", + .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_address_auth_cpucap, + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth) + }, + { + .desc = "Address authentication (IMP DEF algorithm)", + .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_address_auth_cpucap, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth) + }, + { + .capability = ARM64_HAS_ADDRESS_AUTH, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_address_auth_metacap, + }, + { + .desc = "Generic authentication (architected QARMA5 algorithm)", + .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP) + }, + { + .desc = "Generic authentication (architected QARMA3 algorithm)", + .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP) + }, + { + .desc = "Generic authentication (IMP DEF algorithm)", + .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP) + }, + { + .capability = ARM64_HAS_GENERIC_AUTH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_generic_auth, + }, +#endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PSEUDO_NMI + { + /* + * Depends on having GICv3 + */ + .desc = "IRQ priority masking", + .capability = ARM64_HAS_GIC_PRIO_MASKING, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = can_use_gic_priorities, + }, + { + /* + * Depends on ARM64_HAS_GIC_PRIO_MASKING + */ + .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_gic_prio_relaxed_sync, + }, +#endif +#ifdef CONFIG_ARM64_E0PD + { + .desc = "E0PD", + .capability = ARM64_HAS_E0PD, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .cpu_enable = cpu_enable_e0pd, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, E0PD, IMP) + }, +#endif + { + .desc = "Random Number Generator", + .capability = ARM64_HAS_RNG, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, RNDR, IMP) + }, +#ifdef CONFIG_ARM64_BTI + { + .desc = "Branch Target Identification", + .capability = ARM64_BTI, +#ifdef CONFIG_ARM64_BTI_KERNEL + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, +#else + .type = ARM64_CPUCAP_SYSTEM_FEATURE, +#endif + .matches = has_cpuid_feature, + .cpu_enable = bti_enable, + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, BT, IMP) + }, +#endif +#ifdef CONFIG_ARM64_MTE + { + .desc = "Memory Tagging Extension", + .capability = ARM64_MTE, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_mte, + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE2) + }, + { + .desc = "Asymmetric MTE Tag Check Fault", + .capability = ARM64_MTE_ASYMM, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE3) + }, +#endif /* CONFIG_ARM64_MTE */ + { + .desc = "RCpc load-acquire (LDAPR)", + .capability = ARM64_HAS_LDAPR, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP) + }, + { + .desc = "Fine Grained Traps", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_HAS_FGT, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, IMP) + }, +#ifdef CONFIG_ARM64_SME + { + .desc = "Scalable Matrix Extension", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME, + .matches = has_cpuid_feature, + .cpu_enable = sme_kernel_enable, + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP) + }, + /* FA64 should be sorted after the base SME capability */ + { + .desc = "FA64", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME_FA64, + .matches = has_cpuid_feature, + .cpu_enable = fa64_kernel_enable, + ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP) + }, + { + .desc = "SME2", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME2, + .matches = has_cpuid_feature, + .cpu_enable = sme2_kernel_enable, + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2) + }, +#endif /* CONFIG_ARM64_SME */ + { + .desc = "WFx with timeout", + .capability = ARM64_HAS_WFXT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, WFxT, IMP) + }, + { + .desc = "Trap EL0 IMPLEMENTATION DEFINED functionality", + .capability = ARM64_HAS_TIDCP1, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_trap_el0_impdef, + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, TIDCP1, IMP) + }, + { + .desc = "Data independent timing control (DIT)", + .capability = ARM64_HAS_DIT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_dit, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP) + }, + { + .desc = "Memory Copy and Memory Set instructions", + .capability = ARM64_HAS_MOPS, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_mops, + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, MOPS, IMP) + }, + { + .capability = ARM64_HAS_TCR2, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, TCRX, IMP) + }, + { + .desc = "Stage-1 Permission Indirection Extension (S1PIE)", + .capability = ARM64_HAS_S1PIE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1PIE, IMP) + }, + { + .desc = "VHE for hypervisor only", + .capability = ARM64_KVM_HVHE, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = hvhe_possible, + }, + { + .desc = "Enhanced Virtualization Traps", + .capability = ARM64_HAS_EVT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) + }, + {}, +}; + +#define HWCAP_CPUID_MATCH(reg, field, min_value) \ + .matches = has_user_cpuid_feature, \ + ARM64_CPUID_FIELDS(reg, field, min_value) + +#define __HWCAP_CAP(name, cap_type, cap) \ + .desc = name, \ + .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ + .hwcap_type = cap_type, \ + .hwcap = cap, \ + +#define HWCAP_CAP(reg, field, min_value, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + HWCAP_CPUID_MATCH(reg, field, min_value) \ + } + +#define HWCAP_MULTI_CAP(list, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + .matches = cpucap_multi_entry_cap_matches, \ + .match_list = list, \ + } + +#define HWCAP_CAP_MATCH(match, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + .matches = match, \ + } + +#ifdef CONFIG_ARM64_PTR_AUTH +static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { + { + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth) + }, + { + HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth) + }, + { + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth) + }, + {}, +}; + +static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { + { + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP) + }, + { + HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP) + }, + { + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP) + }, + {}, +}; +#endif + +static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { + HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), + HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA1, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA1), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA256, CAP_HWCAP, KERNEL_HWCAP_SHA2), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512), + HWCAP_CAP(ID_AA64ISAR0_EL1, CRC32, IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32), + HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(ID_AA64ISAR0_EL1, RDM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3), + HWCAP_CAP(ID_AA64ISAR0_EL1, SM3, IMP, CAP_HWCAP, KERNEL_HWCAP_SM3), + HWCAP_CAP(ID_AA64ISAR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SM4), + HWCAP_CAP(ID_AA64ISAR0_EL1, DP, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), + HWCAP_CAP(ID_AA64ISAR0_EL1, FHM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), + HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM), + HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(ID_AA64ISAR0_EL1, RNDR, IMP, CAP_HWCAP, KERNEL_HWCAP_RNG), + HWCAP_CAP(ID_AA64PFR0_EL1, FP, IMP, CAP_HWCAP, KERNEL_HWCAP_FP), + HWCAP_CAP(ID_AA64PFR0_EL1, FP, FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP), + HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(ID_AA64PFR0_EL1, DIT, IMP, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP), + HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), + HWCAP_CAP(ID_AA64ISAR1_EL1, JSCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT), + HWCAP_CAP(ID_AA64ISAR1_EL1, FCMA, IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA), + HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC), + HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), + HWCAP_CAP(ID_AA64ISAR1_EL1, FRINTTS, IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT), + HWCAP_CAP(ID_AA64ISAR1_EL1, SB, IMP, CAP_HWCAP, KERNEL_HWCAP_SB), + HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_BF16), + HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), + HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), + HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), + HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), +#ifdef CONFIG_ARM64_SVE + HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), + HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1), + HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), + HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), + HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), + HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), + HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), + HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), + HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), + HWCAP_CAP(ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4), + HWCAP_CAP(ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), + HWCAP_CAP(ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), + HWCAP_CAP(ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), +#endif + HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), +#ifdef CONFIG_ARM64_BTI + HWCAP_CAP(ID_AA64PFR1_EL1, BT, IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), +#endif +#ifdef CONFIG_ARM64_PTR_AUTH + HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), + HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), +#endif +#ifdef CONFIG_ARM64_MTE + HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), + HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), +#endif /* CONFIG_ARM64_MTE */ + HWCAP_CAP(ID_AA64MMFR0_EL1, ECV, IMP, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(ID_AA64MMFR1_EL1, AFP, IMP, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(ID_AA64ISAR2_EL1, CSSC, IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC), + HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), + HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), + HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), + HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS), + HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), +#ifdef CONFIG_ARM64_SME + HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), + HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), + HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), + HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), + HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), + HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), + HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), + HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), +#endif /* CONFIG_ARM64_SME */ + {}, +}; + +#ifdef CONFIG_COMPAT +static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) +{ + /* + * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available, + * in line with that of arm32 as in vfp_init(). We make sure that the + * check is future proof, by making sure value is non-zero. + */ + u32 mvfr1; + + WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); + if (scope == SCOPE_SYSTEM) + mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1); + else + mvfr1 = read_sysreg_s(SYS_MVFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDSP_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDInt_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDLS_SHIFT); +} +#endif + +static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { +#ifdef CONFIG_COMPAT + HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), + HWCAP_CAP(MVFR1_EL1, SIMDFMAC, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), + /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ + HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), + HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), + HWCAP_CAP(MVFR1_EL1, FPHP, FP16, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), + HWCAP_CAP(MVFR1_EL1, SIMDHP, SIMDHP_FLOAT, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), + HWCAP_CAP(ID_ISAR5_EL1, AES, VMULL, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), + HWCAP_CAP(ID_ISAR5_EL1, AES, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), + HWCAP_CAP(ID_ISAR5_EL1, SHA1, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), + HWCAP_CAP(ID_ISAR5_EL1, SHA2, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), + HWCAP_CAP(ID_ISAR5_EL1, CRC32, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), + HWCAP_CAP(ID_ISAR6_EL1, DP, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), + HWCAP_CAP(ID_ISAR6_EL1, FHM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(ID_ISAR6_EL1, SB, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), + HWCAP_CAP(ID_ISAR6_EL1, BF16, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), + HWCAP_CAP(ID_ISAR6_EL1, I8MM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), + HWCAP_CAP(ID_PFR2_EL1, SSBS, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), +#endif + {}, +}; + +static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) +{ + switch (cap->hwcap_type) { + case CAP_HWCAP: + cpu_set_feature(cap->hwcap); + break; +#ifdef CONFIG_COMPAT + case CAP_COMPAT_HWCAP: + compat_elf_hwcap |= (u32)cap->hwcap; + break; + case CAP_COMPAT_HWCAP2: + compat_elf_hwcap2 |= (u32)cap->hwcap; + break; +#endif + default: + WARN_ON(1); + break; + } +} + +/* Check if we have a particular HWCAP enabled */ +static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) +{ + bool rc; + + switch (cap->hwcap_type) { + case CAP_HWCAP: + rc = cpu_have_feature(cap->hwcap); + break; +#ifdef CONFIG_COMPAT + case CAP_COMPAT_HWCAP: + rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; + break; + case CAP_COMPAT_HWCAP2: + rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; + break; +#endif + default: + WARN_ON(1); + rc = false; + } + + return rc; +} + +static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) +{ + /* We support emulation of accesses to CPU ID feature registers */ + cpu_set_named_feature(CPUID); + for (; hwcaps->matches; hwcaps++) + if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) + cap_set_elf_hwcap(hwcaps); +} + +static void update_cpu_capabilities(u16 scope_mask) +{ + int i; + const struct arm64_cpu_capabilities *caps; + + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; + for (i = 0; i < ARM64_NCAPS; i++) { + caps = cpucap_ptrs[i]; + if (!caps || !(caps->type & scope_mask) || + cpus_have_cap(caps->capability) || + !caps->matches(caps, cpucap_default_scope(caps))) + continue; + + if (caps->desc) + pr_info("detected: %s\n", caps->desc); + + __set_bit(caps->capability, system_cpucaps); + + if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) + set_bit(caps->capability, boot_cpucaps); + } +} + +/* + * Enable all the available capabilities on this CPU. The capabilities + * with BOOT_CPU scope are handled separately and hence skipped here. + */ +static int cpu_enable_non_boot_scope_capabilities(void *__unused) +{ + int i; + u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU; + + for_each_available_cap(i) { + const struct arm64_cpu_capabilities *cap = cpucap_ptrs[i]; + + if (WARN_ON(!cap)) + continue; + + if (!(cap->type & non_boot_scope)) + continue; + + if (cap->cpu_enable) + cap->cpu_enable(cap); + } + return 0; +} + +/* + * Run through the enabled capabilities and enable() it on all active + * CPUs + */ +static void __init enable_cpu_capabilities(u16 scope_mask) +{ + int i; + const struct arm64_cpu_capabilities *caps; + bool boot_scope; + + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; + boot_scope = !!(scope_mask & SCOPE_BOOT_CPU); + + for (i = 0; i < ARM64_NCAPS; i++) { + unsigned int num; + + caps = cpucap_ptrs[i]; + if (!caps || !(caps->type & scope_mask)) + continue; + num = caps->capability; + if (!cpus_have_cap(num)) + continue; + + if (boot_scope && caps->cpu_enable) + /* + * Capabilities with SCOPE_BOOT_CPU scope are finalised + * before any secondary CPU boots. Thus, each secondary + * will enable the capability as appropriate via + * check_local_cpu_capabilities(). The only exception is + * the boot CPU, for which the capability must be + * enabled here. This approach avoids costly + * stop_machine() calls for this case. + */ + caps->cpu_enable(caps); + } + + /* + * For all non-boot scope capabilities, use stop_machine() + * as it schedules the work allowing us to modify PSTATE, + * instead of on_each_cpu() which uses an IPI, giving us a + * PSTATE that disappears when we return. + */ + if (!boot_scope) + stop_machine(cpu_enable_non_boot_scope_capabilities, + NULL, cpu_online_mask); +} + +/* + * Run through the list of capabilities to check for conflicts. + * If the system has already detected a capability, take necessary + * action on this CPU. + */ +static void verify_local_cpu_caps(u16 scope_mask) +{ + int i; + bool cpu_has_cap, system_has_cap; + const struct arm64_cpu_capabilities *caps; + + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; + + for (i = 0; i < ARM64_NCAPS; i++) { + caps = cpucap_ptrs[i]; + if (!caps || !(caps->type & scope_mask)) + continue; + + cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); + system_has_cap = cpus_have_cap(caps->capability); + + if (system_has_cap) { + /* + * Check if the new CPU misses an advertised feature, + * which is not safe to miss. + */ + if (!cpu_has_cap && !cpucap_late_cpu_optional(caps)) + break; + /* + * We have to issue cpu_enable() irrespective of + * whether the CPU has it or not, as it is enabeld + * system wide. It is upto the call back to take + * appropriate action on this CPU. + */ + if (caps->cpu_enable) + caps->cpu_enable(caps); + } else { + /* + * Check if the CPU has this capability if it isn't + * safe to have when the system doesn't. + */ + if (cpu_has_cap && !cpucap_late_cpu_permitted(caps)) + break; + } + } + + if (i < ARM64_NCAPS) { + pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", + smp_processor_id(), caps->capability, + caps->desc, system_has_cap, cpu_has_cap); + + if (cpucap_panic_on_conflict(caps)) + cpu_panic_kernel(); + else + cpu_die_early(); + } +} + +/* + * Check for CPU features that are used in early boot + * based on the Boot CPU value. + */ +static void check_early_cpu_features(void) +{ + verify_cpu_asid_bits(); + + verify_local_cpu_caps(SCOPE_BOOT_CPU); +} + +static void +__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) +{ + + for (; caps->matches; caps++) + if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) { + pr_crit("CPU%d: missing HWCAP: %s\n", + smp_processor_id(), caps->desc); + cpu_die_early(); + } +} + +static void verify_local_elf_hwcaps(void) +{ + __verify_local_elf_hwcaps(arm64_elf_hwcaps); + + if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1))) + __verify_local_elf_hwcaps(compat_elf_hwcaps); +} + +static void verify_sve_features(void) +{ + u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); + u64 zcr = read_zcr_features(); + + unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK; + unsigned int len = zcr & ZCR_ELx_LEN_MASK; + + if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) { + pr_crit("CPU%d: SVE: vector length support mismatch\n", + smp_processor_id()); + cpu_die_early(); + } + + /* Add checks on other ZCR bits here if necessary */ +} + +static void verify_sme_features(void) +{ + u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1); + u64 smcr = read_smcr_features(); + + unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK; + unsigned int len = smcr & SMCR_ELx_LEN_MASK; + + if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) { + pr_crit("CPU%d: SME: vector length support mismatch\n", + smp_processor_id()); + cpu_die_early(); + } + + /* Add checks on other SMCR bits here if necessary */ +} + +static void verify_hyp_capabilities(void) +{ + u64 safe_mmfr1, mmfr0, mmfr1; + int parange, ipa_max; + unsigned int safe_vmid_bits, vmid_bits; + + if (!IS_ENABLED(CONFIG_KVM)) + return; + + safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + + /* Verify VMID bits */ + safe_vmid_bits = get_vmid_bits(safe_mmfr1); + vmid_bits = get_vmid_bits(mmfr1); + if (vmid_bits < safe_vmid_bits) { + pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id()); + cpu_die_early(); + } + + /* Verify IPA range */ + parange = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_EL1_PARANGE_SHIFT); + ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange); + if (ipa_max < get_kvm_ipa_limit()) { + pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id()); + cpu_die_early(); + } +} + +/* + * Run through the enabled system capabilities and enable() it on this CPU. + * The capabilities were decided based on the available CPUs at the boot time. + * Any new CPU should match the system wide status of the capability. If the + * new CPU doesn't have a capability which the system now has enabled, we + * cannot do anything to fix it up and could cause unexpected failures. So + * we park the CPU. + */ +static void verify_local_cpu_capabilities(void) +{ + /* + * The capabilities with SCOPE_BOOT_CPU are checked from + * check_early_cpu_features(), as they need to be verified + * on all secondary CPUs. + */ + verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU); + verify_local_elf_hwcaps(); + + if (system_supports_sve()) + verify_sve_features(); + + if (system_supports_sme()) + verify_sme_features(); + + if (is_hyp_mode_available()) + verify_hyp_capabilities(); +} + +void check_local_cpu_capabilities(void) +{ + /* + * All secondary CPUs should conform to the early CPU features + * in use by the kernel based on boot CPU. + */ + check_early_cpu_features(); + + /* + * If we haven't finalised the system capabilities, this CPU gets + * a chance to update the errata work arounds and local features. + * Otherwise, this CPU should verify that it has all the system + * advertised capabilities. + */ + if (!system_capabilities_finalized()) + update_cpu_capabilities(SCOPE_LOCAL_CPU); + else + verify_local_cpu_capabilities(); +} + +static void __init setup_boot_cpu_capabilities(void) +{ + /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */ + update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU); + /* Enable the SCOPE_BOOT_CPU capabilities alone right away */ + enable_cpu_capabilities(SCOPE_BOOT_CPU); +} + +bool this_cpu_has_cap(unsigned int n) +{ + if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { + const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n]; + + if (cap) + return cap->matches(cap, SCOPE_LOCAL_CPU); + } + + return false; +} +EXPORT_SYMBOL_GPL(this_cpu_has_cap); + +/* + * This helper function is used in a narrow window when, + * - The system wide safe registers are set with all the SMP CPUs and, + * - The SYSTEM_FEATURE system_cpucaps may not have been set. + * In all other cases cpus_have_{const_}cap() should be used. + */ +static bool __maybe_unused __system_matches_cap(unsigned int n) +{ + if (n < ARM64_NCAPS) { + const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n]; + + if (cap) + return cap->matches(cap, SCOPE_SYSTEM); + } + return false; +} + +void cpu_set_feature(unsigned int num) +{ + set_bit(num, elf_hwcap); +} + +bool cpu_have_feature(unsigned int num) +{ + return test_bit(num, elf_hwcap); +} +EXPORT_SYMBOL_GPL(cpu_have_feature); + +unsigned long cpu_get_elf_hwcap(void) +{ + /* + * We currently only populate the first 32 bits of AT_HWCAP. Please + * note that for userspace compatibility we guarantee that bits 62 + * and 63 will always be returned as 0. + */ + return elf_hwcap[0]; +} + +unsigned long cpu_get_elf_hwcap2(void) +{ + return elf_hwcap[1]; +} + +static void __init setup_system_capabilities(void) +{ + /* + * We have finalised the system-wide safe feature + * registers, finalise the capabilities that depend + * on it. Also enable all the available capabilities, + * that are not enabled already. + */ + update_cpu_capabilities(SCOPE_SYSTEM); + enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU); +} + +void __init setup_cpu_features(void) +{ + u32 cwg; + + setup_system_capabilities(); + setup_elf_hwcaps(arm64_elf_hwcaps); + + if (system_supports_32bit_el0()) { + setup_elf_hwcaps(compat_elf_hwcaps); + elf_hwcap_fixup(); + } + + if (system_uses_ttbr0_pan()) + pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); + + sve_setup(); + sme_setup(); + minsigstksz_setup(); + + /* + * Check for sane CTR_EL0.CWG value. + */ + cwg = cache_type_cwg(); + if (!cwg) + pr_warn("No Cache Writeback Granule information, assuming %d\n", + ARCH_DMA_MINALIGN); +} + +static int enable_mismatched_32bit_el0(unsigned int cpu) +{ + /* + * The first 32-bit-capable CPU we detected and so can no longer + * be offlined by userspace. -1 indicates we haven't yet onlined + * a 32-bit-capable CPU. + */ + static int lucky_winner = -1; + + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); + bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0); + + if (cpu_32bit) { + cpumask_set_cpu(cpu, cpu_32bit_el0_mask); + static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0); + } + + if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit) + return 0; + + if (lucky_winner >= 0) + return 0; + + /* + * We've detected a mismatch. We need to keep one of our CPUs with + * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting + * every CPU in the system for a 32-bit task. + */ + lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask, + cpu_active_mask); + get_cpu_device(lucky_winner)->offline_disabled = true; + setup_elf_hwcaps(compat_elf_hwcaps); + elf_hwcap_fixup(); + pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n", + cpu, lucky_winner); + return 0; +} + +static int __init init_32bit_el0_mask(void) +{ + if (!allow_mismatched_32bit_el0) + return 0; + + if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL)) + return -ENOMEM; + + return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "arm64/mismatched_32bit_el0:online", + enable_mismatched_32bit_el0, NULL); +} +subsys_initcall_sync(init_32bit_el0_mask); + +static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) +{ + cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); +} + +/* + * We emulate only the following system register space. + * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7] + * See Table C5-6 System instruction encodings for System register accesses, + * ARMv8 ARM(ARM DDI 0487A.f) for more details. + */ +static inline bool __attribute_const__ is_emulated(u32 id) +{ + return (sys_reg_Op0(id) == 0x3 && + sys_reg_CRn(id) == 0x0 && + sys_reg_Op1(id) == 0x0 && + (sys_reg_CRm(id) == 0 || + ((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7)))); +} + +/* + * With CRm == 0, reg should be one of : + * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1. + */ +static inline int emulate_id_reg(u32 id, u64 *valp) +{ + switch (id) { + case SYS_MIDR_EL1: + *valp = read_cpuid_id(); + break; + case SYS_MPIDR_EL1: + *valp = SYS_MPIDR_SAFE_VAL; + break; + case SYS_REVIDR_EL1: + /* IMPLEMENTATION DEFINED values are emulated with 0 */ + *valp = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int emulate_sys_reg(u32 id, u64 *valp) +{ + struct arm64_ftr_reg *regp; + + if (!is_emulated(id)) + return -EINVAL; + + if (sys_reg_CRm(id) == 0) + return emulate_id_reg(id, valp); + + regp = get_arm64_ftr_reg_nowarn(id); + if (regp) + *valp = arm64_ftr_reg_user_value(regp); + else + /* + * The untracked registers are either IMPLEMENTATION DEFINED + * (e.g, ID_AFR0_EL1) or reserved RAZ. + */ + *valp = 0; + return 0; +} + +int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt) +{ + int rc; + u64 val; + + rc = emulate_sys_reg(sys_reg, &val); + if (!rc) { + pt_regs_write_reg(regs, rt, val); + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + } + return rc; +} + +bool try_emulate_mrs(struct pt_regs *regs, u32 insn) +{ + u32 sys_reg, rt; + + if (compat_user_mode(regs) || !aarch64_insn_is_mrs(insn)) + return false; + + /* + * sys_reg values are defined as used in mrs/msr instruction. + * shift the imm value to get the encoding. + */ + sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5; + rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); + return do_emulate_mrs(regs, sys_reg, rt) == 0; +} + +enum mitigation_state arm64_get_meltdown_state(void) +{ + if (__meltdown_safe) + return SPECTRE_UNAFFECTED; + + if (arm64_kernel_unmapped_at_el0()) + return SPECTRE_MITIGATED; + + return SPECTRE_VULNERABLE; +} + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, + char *buf) +{ + switch (arm64_get_meltdown_state()) { + case SPECTRE_UNAFFECTED: + return sprintf(buf, "Not affected\n"); + + case SPECTRE_MITIGATED: + return sprintf(buf, "Mitigation: PTI\n"); + + default: + return sprintf(buf, "Vulnerable\n"); + } +} diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c new file mode 100644 index 0000000000..f372295207 --- /dev/null +++ b/arch/arm64/kernel/cpuidle.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM64 CPU idle arch support + * + * Copyright (C) 2014 ARM Ltd. + * Author: Lorenzo Pieralisi + */ + +#include +#include +#include +#include + +#ifdef CONFIG_ACPI_PROCESSOR_IDLE + +#include + +#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) + +static int psci_acpi_cpu_init_idle(unsigned int cpu) +{ + int i, count; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; + + for (i = 0; i < count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i + 1]; + /* + * Only bits[31:0] represent a PSCI power_state while + * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification + */ + state = lpi->address; + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + +int acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return psci_acpi_cpu_init_idle(cpu); +} + +__cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + u32 state = lpi->address; + + if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); + else + return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); +} +#endif diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c new file mode 100644 index 0000000000..98fda85005 --- /dev/null +++ b/arch/arm64/kernel/cpuinfo.c @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Record and handle CPU attributes. + * + * Copyright (C) 2014 ARM Ltd. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * In case the boot CPU is hotpluggable, we record its initial state and + * current state separately. Certain system registers may contain different + * values depending on configuration at or after reset. + */ +DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); +static struct cpuinfo_arm64 boot_cpu_data; + +static inline const char *icache_policy_str(int l1ip) +{ + switch (l1ip) { + case CTR_EL0_L1Ip_VPIPT: + return "VPIPT"; + case CTR_EL0_L1Ip_VIPT: + return "VIPT"; + case CTR_EL0_L1Ip_PIPT: + return "PIPT"; + default: + return "RESERVED/UNKNOWN"; + } +} + +unsigned long __icache_flags; + +static const char *const hwcap_str[] = { + [KERNEL_HWCAP_FP] = "fp", + [KERNEL_HWCAP_ASIMD] = "asimd", + [KERNEL_HWCAP_EVTSTRM] = "evtstrm", + [KERNEL_HWCAP_AES] = "aes", + [KERNEL_HWCAP_PMULL] = "pmull", + [KERNEL_HWCAP_SHA1] = "sha1", + [KERNEL_HWCAP_SHA2] = "sha2", + [KERNEL_HWCAP_CRC32] = "crc32", + [KERNEL_HWCAP_ATOMICS] = "atomics", + [KERNEL_HWCAP_FPHP] = "fphp", + [KERNEL_HWCAP_ASIMDHP] = "asimdhp", + [KERNEL_HWCAP_CPUID] = "cpuid", + [KERNEL_HWCAP_ASIMDRDM] = "asimdrdm", + [KERNEL_HWCAP_JSCVT] = "jscvt", + [KERNEL_HWCAP_FCMA] = "fcma", + [KERNEL_HWCAP_LRCPC] = "lrcpc", + [KERNEL_HWCAP_DCPOP] = "dcpop", + [KERNEL_HWCAP_SHA3] = "sha3", + [KERNEL_HWCAP_SM3] = "sm3", + [KERNEL_HWCAP_SM4] = "sm4", + [KERNEL_HWCAP_ASIMDDP] = "asimddp", + [KERNEL_HWCAP_SHA512] = "sha512", + [KERNEL_HWCAP_SVE] = "sve", + [KERNEL_HWCAP_ASIMDFHM] = "asimdfhm", + [KERNEL_HWCAP_DIT] = "dit", + [KERNEL_HWCAP_USCAT] = "uscat", + [KERNEL_HWCAP_ILRCPC] = "ilrcpc", + [KERNEL_HWCAP_FLAGM] = "flagm", + [KERNEL_HWCAP_SSBS] = "ssbs", + [KERNEL_HWCAP_SB] = "sb", + [KERNEL_HWCAP_PACA] = "paca", + [KERNEL_HWCAP_PACG] = "pacg", + [KERNEL_HWCAP_DCPODP] = "dcpodp", + [KERNEL_HWCAP_SVE2] = "sve2", + [KERNEL_HWCAP_SVEAES] = "sveaes", + [KERNEL_HWCAP_SVEPMULL] = "svepmull", + [KERNEL_HWCAP_SVEBITPERM] = "svebitperm", + [KERNEL_HWCAP_SVESHA3] = "svesha3", + [KERNEL_HWCAP_SVESM4] = "svesm4", + [KERNEL_HWCAP_FLAGM2] = "flagm2", + [KERNEL_HWCAP_FRINT] = "frint", + [KERNEL_HWCAP_SVEI8MM] = "svei8mm", + [KERNEL_HWCAP_SVEF32MM] = "svef32mm", + [KERNEL_HWCAP_SVEF64MM] = "svef64mm", + [KERNEL_HWCAP_SVEBF16] = "svebf16", + [KERNEL_HWCAP_I8MM] = "i8mm", + [KERNEL_HWCAP_BF16] = "bf16", + [KERNEL_HWCAP_DGH] = "dgh", + [KERNEL_HWCAP_RNG] = "rng", + [KERNEL_HWCAP_BTI] = "bti", + [KERNEL_HWCAP_MTE] = "mte", + [KERNEL_HWCAP_ECV] = "ecv", + [KERNEL_HWCAP_AFP] = "afp", + [KERNEL_HWCAP_RPRES] = "rpres", + [KERNEL_HWCAP_MTE3] = "mte3", + [KERNEL_HWCAP_SME] = "sme", + [KERNEL_HWCAP_SME_I16I64] = "smei16i64", + [KERNEL_HWCAP_SME_F64F64] = "smef64f64", + [KERNEL_HWCAP_SME_I8I32] = "smei8i32", + [KERNEL_HWCAP_SME_F16F32] = "smef16f32", + [KERNEL_HWCAP_SME_B16F32] = "smeb16f32", + [KERNEL_HWCAP_SME_F32F32] = "smef32f32", + [KERNEL_HWCAP_SME_FA64] = "smefa64", + [KERNEL_HWCAP_WFXT] = "wfxt", + [KERNEL_HWCAP_EBF16] = "ebf16", + [KERNEL_HWCAP_SVE_EBF16] = "sveebf16", + [KERNEL_HWCAP_CSSC] = "cssc", + [KERNEL_HWCAP_RPRFM] = "rprfm", + [KERNEL_HWCAP_SVE2P1] = "sve2p1", + [KERNEL_HWCAP_SME2] = "sme2", + [KERNEL_HWCAP_SME2P1] = "sme2p1", + [KERNEL_HWCAP_SME_I16I32] = "smei16i32", + [KERNEL_HWCAP_SME_BI32I32] = "smebi32i32", + [KERNEL_HWCAP_SME_B16B16] = "smeb16b16", + [KERNEL_HWCAP_SME_F16F16] = "smef16f16", + [KERNEL_HWCAP_MOPS] = "mops", + [KERNEL_HWCAP_HBC] = "hbc", +}; + +#ifdef CONFIG_COMPAT +#define COMPAT_KERNEL_HWCAP(x) const_ilog2(COMPAT_HWCAP_ ## x) +static const char *const compat_hwcap_str[] = { + [COMPAT_KERNEL_HWCAP(SWP)] = "swp", + [COMPAT_KERNEL_HWCAP(HALF)] = "half", + [COMPAT_KERNEL_HWCAP(THUMB)] = "thumb", + [COMPAT_KERNEL_HWCAP(26BIT)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(FAST_MULT)] = "fastmult", + [COMPAT_KERNEL_HWCAP(FPA)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(VFP)] = "vfp", + [COMPAT_KERNEL_HWCAP(EDSP)] = "edsp", + [COMPAT_KERNEL_HWCAP(JAVA)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(IWMMXT)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(CRUNCH)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(THUMBEE)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(NEON)] = "neon", + [COMPAT_KERNEL_HWCAP(VFPv3)] = "vfpv3", + [COMPAT_KERNEL_HWCAP(VFPV3D16)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(TLS)] = "tls", + [COMPAT_KERNEL_HWCAP(VFPv4)] = "vfpv4", + [COMPAT_KERNEL_HWCAP(IDIVA)] = "idiva", + [COMPAT_KERNEL_HWCAP(IDIVT)] = "idivt", + [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae", + [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm", + [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", + [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", + [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", + [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", + [COMPAT_KERNEL_HWCAP(ASIMDBF16)] = "asimdbf16", + [COMPAT_KERNEL_HWCAP(I8MM)] = "i8mm", +}; + +#define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) +static const char *const compat_hwcap2_str[] = { + [COMPAT_KERNEL_HWCAP2(AES)] = "aes", + [COMPAT_KERNEL_HWCAP2(PMULL)] = "pmull", + [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1", + [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2", + [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32", + [COMPAT_KERNEL_HWCAP2(SB)] = "sb", + [COMPAT_KERNEL_HWCAP2(SSBS)] = "ssbs", +}; +#endif /* CONFIG_COMPAT */ + +static int c_show(struct seq_file *m, void *v) +{ + int i, j; + bool compat = personality(current->personality) == PER_LINUX32; + + for_each_online_cpu(i) { + struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); + u32 midr = cpuinfo->reg_midr; + + /* + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with + * "processor". Give glibc what it expects. + */ + seq_printf(m, "processor\t: %d\n", i); + if (compat) + seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", + MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); + + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000UL/HZ), + loops_per_jiffy / (5000UL/HZ) % 100); + + /* + * Dump out the common processor features in a single line. + * Userspace should read the hwcaps with getauxval(AT_HWCAP) + * rather than attempting to parse this, but there's a body of + * software which does already (at least for 32-bit). + */ + seq_puts(m, "Features\t:"); + if (compat) { +#ifdef CONFIG_COMPAT + for (j = 0; j < ARRAY_SIZE(compat_hwcap_str); j++) { + if (compat_elf_hwcap & (1 << j)) { + /* + * Warn once if any feature should not + * have been present on arm64 platform. + */ + if (WARN_ON_ONCE(!compat_hwcap_str[j])) + continue; + + seq_printf(m, " %s", compat_hwcap_str[j]); + } + } + + for (j = 0; j < ARRAY_SIZE(compat_hwcap2_str); j++) + if (compat_elf_hwcap2 & (1 << j)) + seq_printf(m, " %s", compat_hwcap2_str[j]); +#endif /* CONFIG_COMPAT */ + } else { + for (j = 0; j < ARRAY_SIZE(hwcap_str); j++) + if (cpu_have_feature(j)) + seq_printf(m, " %s", hwcap_str[j]); + } + seq_puts(m, "\n"); + + seq_printf(m, "CPU implementer\t: 0x%02x\n", + MIDR_IMPLEMENTOR(midr)); + seq_printf(m, "CPU architecture: 8\n"); + seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); + seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); + seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); + } + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show +}; + + +static struct kobj_type cpuregs_kobj_type = { + .sysfs_ops = &kobj_sysfs_ops, +}; + +/* + * The ARM ARM uses the phrase "32-bit register" to describe a register + * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however + * no statement is made as to whether the upper 32 bits will or will not + * be made use of in future, and between ARM DDI 0487A.c and ARM DDI + * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit. + * + * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit + * registers, we expose them both as 64 bit values to cater for possible + * future expansion without an ABI break. + */ +#define kobj_to_cpuinfo(kobj) container_of(kobj, struct cpuinfo_arm64, kobj) +#define CPUREGS_ATTR_RO(_name, _field) \ + static ssize_t _name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ + { \ + struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \ + \ + if (info->reg_midr) \ + return sprintf(buf, "0x%016llx\n", info->reg_##_field); \ + else \ + return 0; \ + } \ + static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name) + +CPUREGS_ATTR_RO(midr_el1, midr); +CPUREGS_ATTR_RO(revidr_el1, revidr); +CPUREGS_ATTR_RO(smidr_el1, smidr); + +static struct attribute *cpuregs_id_attrs[] = { + &cpuregs_attr_midr_el1.attr, + &cpuregs_attr_revidr_el1.attr, + NULL +}; + +static const struct attribute_group cpuregs_attr_group = { + .attrs = cpuregs_id_attrs, + .name = "identification" +}; + +static struct attribute *sme_cpuregs_id_attrs[] = { + &cpuregs_attr_smidr_el1.attr, + NULL +}; + +static const struct attribute_group sme_cpuregs_attr_group = { + .attrs = sme_cpuregs_id_attrs, + .name = "identification" +}; + +static int cpuid_cpu_online(unsigned int cpu) +{ + int rc; + struct device *dev; + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); + + dev = get_cpu_device(cpu); + if (!dev) { + rc = -ENODEV; + goto out; + } + rc = kobject_add(&info->kobj, &dev->kobj, "regs"); + if (rc) + goto out; + rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group); + if (rc) + kobject_del(&info->kobj); + if (system_supports_sme()) + rc = sysfs_merge_group(&info->kobj, &sme_cpuregs_attr_group); +out: + return rc; +} + +static int cpuid_cpu_offline(unsigned int cpu) +{ + struct device *dev; + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); + + dev = get_cpu_device(cpu); + if (!dev) + return -ENODEV; + if (info->kobj.parent) { + sysfs_remove_group(&info->kobj, &cpuregs_attr_group); + kobject_del(&info->kobj); + } + + return 0; +} + +static int __init cpuinfo_regs_init(void) +{ + int cpu, ret; + + for_each_possible_cpu(cpu) { + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); + + kobject_init(&info->kobj, &cpuregs_kobj_type); + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online", + cpuid_cpu_online, cpuid_cpu_offline); + if (ret < 0) { + pr_err("cpuinfo: failed to register hotplug callbacks.\n"); + return ret; + } + return 0; +} +device_initcall(cpuinfo_regs_init); + +static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) +{ + unsigned int cpu = smp_processor_id(); + u32 l1ip = CTR_L1IP(info->reg_ctr); + + switch (l1ip) { + case CTR_EL0_L1Ip_PIPT: + break; + case CTR_EL0_L1Ip_VPIPT: + set_bit(ICACHEF_VPIPT, &__icache_flags); + break; + case CTR_EL0_L1Ip_VIPT: + default: + /* Assume aliasing */ + set_bit(ICACHEF_ALIASING, &__icache_flags); + break; + } + + pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str(l1ip), cpu); +} + +static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info) +{ + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); + info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1); + info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); + info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); + info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); + info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); + info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); + info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); + info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); + info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); + info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); + info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); + info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); + info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1); + info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1); + info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); + info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1); + + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); +} + +static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) +{ + info->reg_cntfrq = arch_timer_get_cntfrq(); + /* + * Use the effective value of the CTR_EL0 than the raw value + * exposed by the CPU. CTR_EL0.IDC field value must be interpreted + * with the CLIDR_EL1 fields to avoid triggering false warnings + * when there is a mismatch across the CPUs. Keep track of the + * effective value of the CTR_EL0 in our internal records for + * accurate sanity check and feature enablement. + */ + info->reg_ctr = read_cpuid_effective_cachetype(); + info->reg_dczid = read_cpuid(DCZID_EL0); + info->reg_midr = read_cpuid_id(); + info->reg_revidr = read_cpuid(REVIDR_EL1); + + info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1); + info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); + info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); + info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); + info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1); + info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); + info->reg_id_aa64mmfr3 = read_cpuid(ID_AA64MMFR3_EL1); + info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); + info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); + info->reg_id_aa64smfr0 = read_cpuid(ID_AA64SMFR0_EL1); + + if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) + info->reg_gmid = read_cpuid(GMID_EL1); + + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) + __cpuinfo_store_cpu_32bit(&info->aarch32); + + cpuinfo_detect_icache_policy(info); +} + +void cpuinfo_store_cpu(void) +{ + struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); + __cpuinfo_store_cpu(info); + update_cpu_features(smp_processor_id(), info, &boot_cpu_data); +} + +void __init cpuinfo_store_boot_cpu(void) +{ + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); + __cpuinfo_store_cpu(info); + + boot_cpu_data = *info; + init_cpu_features(&boot_cpu_data); +} diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c new file mode 100644 index 0000000000..66cde752cd --- /dev/null +++ b/arch/arm64/kernel/crash_core.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Linaro. + * Copyright (C) Huawei Futurewei Technologies. + */ + +#include +#include +#include +#include +#include + +static inline u64 get_tcr_el1_t1sz(void); + +static inline u64 get_tcr_el1_t1sz(void) +{ + return (read_sysreg(tcr_el1) & TCR_T1SZ_MASK) >> TCR_T1SZ_OFFSET; +} + +void arch_crash_save_vmcoreinfo(void) +{ + VMCOREINFO_NUMBER(VA_BITS); + /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */ + vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR); + vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END); + vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START); + vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END); + vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START); + vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END); + vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n", + kimage_voffset); + vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n", + PHYS_OFFSET); + vmcoreinfo_append_str("NUMBER(TCR_EL1_T1SZ)=0x%llx\n", + get_tcr_el1_t1sz()); + vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); + vmcoreinfo_append_str("NUMBER(KERNELPACMASK)=0x%llx\n", + system_supports_address_auth() ? + ptrauth_kernel_pac_mask() : 0); +} diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c new file mode 100644 index 0000000000..670e4ce818 --- /dev/null +++ b/arch/arm64/kernel/crash_dump.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Routines for doing kexec-based kdump + * + * Copyright (C) 2017 Linaro Limited + * Author: AKASHI Takahiro + */ + +#include +#include +#include +#include +#include + +ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, + size_t csize, unsigned long offset) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); + if (!vaddr) + return -ENOMEM; + + csize = copy_to_iter(vaddr + offset, csize, iter); + + memunmap(vaddr); + + return csize; +} + +/** + * elfcorehdr_read - read from ELF core header + * @buf: buffer where the data is placed + * @count: number of bytes to read + * @ppos: address in the memory + * + * This function reads @count bytes from elf core header which exists + * on crash dump kernel's memory. + */ +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) +{ + memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count); + *ppos += count; + + return count; +} diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c new file mode 100644 index 0000000000..64f2ecbdfe --- /dev/null +++ b/arch/arm64/kernel/debug-monitors.c @@ -0,0 +1,460 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARMv8 single-step debug support and mdscr context switching. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Determine debug architecture. */ +u8 debug_monitors_arch(void) +{ + return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), + ID_AA64DFR0_EL1_DebugVer_SHIFT); +} + +/* + * MDSCR access routines. + */ +static void mdscr_write(u32 mdscr) +{ + unsigned long flags; + flags = local_daif_save(); + write_sysreg(mdscr, mdscr_el1); + local_daif_restore(flags); +} +NOKPROBE_SYMBOL(mdscr_write); + +static u32 mdscr_read(void) +{ + return read_sysreg(mdscr_el1); +} +NOKPROBE_SYMBOL(mdscr_read); + +/* + * Allow root to disable self-hosted debug from userspace. + * This is useful if you want to connect an external JTAG debugger. + */ +static bool debug_enabled = true; + +static int create_debug_debugfs_entry(void) +{ + debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled); + return 0; +} +fs_initcall(create_debug_debugfs_entry); + +static int __init early_debug_disable(char *buf) +{ + debug_enabled = false; + return 0; +} + +early_param("nodebugmon", early_debug_disable); + +/* + * Keep track of debug users on each core. + * The ref counts are per-cpu so we use a local_t type. + */ +static DEFINE_PER_CPU(int, mde_ref_count); +static DEFINE_PER_CPU(int, kde_ref_count); + +void enable_debug_monitors(enum dbg_active_el el) +{ + u32 mdscr, enable = 0; + + WARN_ON(preemptible()); + + if (this_cpu_inc_return(mde_ref_count) == 1) + enable = DBG_MDSCR_MDE; + + if (el == DBG_ACTIVE_EL1 && + this_cpu_inc_return(kde_ref_count) == 1) + enable |= DBG_MDSCR_KDE; + + if (enable && debug_enabled) { + mdscr = mdscr_read(); + mdscr |= enable; + mdscr_write(mdscr); + } +} +NOKPROBE_SYMBOL(enable_debug_monitors); + +void disable_debug_monitors(enum dbg_active_el el) +{ + u32 mdscr, disable = 0; + + WARN_ON(preemptible()); + + if (this_cpu_dec_return(mde_ref_count) == 0) + disable = ~DBG_MDSCR_MDE; + + if (el == DBG_ACTIVE_EL1 && + this_cpu_dec_return(kde_ref_count) == 0) + disable &= ~DBG_MDSCR_KDE; + + if (disable) { + mdscr = mdscr_read(); + mdscr &= disable; + mdscr_write(mdscr); + } +} +NOKPROBE_SYMBOL(disable_debug_monitors); + +/* + * OS lock clearing. + */ +static int clear_os_lock(unsigned int cpu) +{ + write_sysreg(0, osdlr_el1); + write_sysreg(0, oslar_el1); + isb(); + return 0; +} + +static int __init debug_monitors_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, + "arm64/debug_monitors:starting", + clear_os_lock, NULL); +} +postcore_initcall(debug_monitors_init); + +/* + * Single step API and exception handling. + */ +static void set_user_regs_spsr_ss(struct user_pt_regs *regs) +{ + regs->pstate |= DBG_SPSR_SS; +} +NOKPROBE_SYMBOL(set_user_regs_spsr_ss); + +static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) +{ + regs->pstate &= ~DBG_SPSR_SS; +} +NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); + +#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) +#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) + +static DEFINE_SPINLOCK(debug_hook_lock); +static LIST_HEAD(user_step_hook); +static LIST_HEAD(kernel_step_hook); + +static void register_debug_hook(struct list_head *node, struct list_head *list) +{ + spin_lock(&debug_hook_lock); + list_add_rcu(node, list); + spin_unlock(&debug_hook_lock); + +} + +static void unregister_debug_hook(struct list_head *node) +{ + spin_lock(&debug_hook_lock); + list_del_rcu(node); + spin_unlock(&debug_hook_lock); + synchronize_rcu(); +} + +void register_user_step_hook(struct step_hook *hook) +{ + register_debug_hook(&hook->node, &user_step_hook); +} + +void unregister_user_step_hook(struct step_hook *hook) +{ + unregister_debug_hook(&hook->node); +} + +void register_kernel_step_hook(struct step_hook *hook) +{ + register_debug_hook(&hook->node, &kernel_step_hook); +} + +void unregister_kernel_step_hook(struct step_hook *hook) +{ + unregister_debug_hook(&hook->node); +} + +/* + * Call registered single step handlers + * There is no Syndrome info to check for determining the handler. + * So we call all the registered handlers, until the right handler is + * found which returns zero. + */ +static int call_step_hook(struct pt_regs *regs, unsigned long esr) +{ + struct step_hook *hook; + struct list_head *list; + int retval = DBG_HOOK_ERROR; + + list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; + + /* + * Since single-step exception disables interrupt, this function is + * entirely not preemptible, and we can use rcu list safely here. + */ + list_for_each_entry_rcu(hook, list, node) { + retval = hook->fn(regs, esr); + if (retval == DBG_HOOK_HANDLED) + break; + } + + return retval; +} +NOKPROBE_SYMBOL(call_step_hook); + +static void send_user_sigtrap(int si_code) +{ + struct pt_regs *regs = current_pt_regs(); + + if (WARN_ON(!user_mode(regs))) + return; + + if (interrupts_enabled(regs)) + local_irq_enable(); + + arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs), + "User debug trap"); +} + +static int single_step_handler(unsigned long unused, unsigned long esr, + struct pt_regs *regs) +{ + bool handler_found = false; + + /* + * If we are stepping a pending breakpoint, call the hw_breakpoint + * handler first. + */ + if (!reinstall_suspended_bps(regs)) + return 0; + + if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED) + handler_found = true; + + if (!handler_found && user_mode(regs)) { + send_user_sigtrap(TRAP_TRACE); + + /* + * ptrace will disable single step unless explicitly + * asked to re-enable it. For other clients, it makes + * sense to leave it enabled (i.e. rewind the controls + * to the active-not-pending state). + */ + user_rewind_single_step(current); + } else if (!handler_found) { + pr_warn("Unexpected kernel single-step exception at EL1\n"); + /* + * Re-enable stepping since we know that we will be + * returning to regs. + */ + set_regs_spsr_ss(regs); + } + + return 0; +} +NOKPROBE_SYMBOL(single_step_handler); + +static LIST_HEAD(user_break_hook); +static LIST_HEAD(kernel_break_hook); + +void register_user_break_hook(struct break_hook *hook) +{ + register_debug_hook(&hook->node, &user_break_hook); +} + +void unregister_user_break_hook(struct break_hook *hook) +{ + unregister_debug_hook(&hook->node); +} + +void register_kernel_break_hook(struct break_hook *hook) +{ + register_debug_hook(&hook->node, &kernel_break_hook); +} + +void unregister_kernel_break_hook(struct break_hook *hook) +{ + unregister_debug_hook(&hook->node); +} + +static int call_break_hook(struct pt_regs *regs, unsigned long esr) +{ + struct break_hook *hook; + struct list_head *list; + int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL; + + list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; + + /* + * Since brk exception disables interrupt, this function is + * entirely not preemptible, and we can use rcu list safely here. + */ + list_for_each_entry_rcu(hook, list, node) { + unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; + + if ((comment & ~hook->mask) == hook->imm) + fn = hook->fn; + } + + return fn ? fn(regs, esr) : DBG_HOOK_ERROR; +} +NOKPROBE_SYMBOL(call_break_hook); + +static int brk_handler(unsigned long unused, unsigned long esr, + struct pt_regs *regs) +{ + if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) + return 0; + + if (user_mode(regs)) { + send_user_sigtrap(TRAP_BRKPT); + } else { + pr_warn("Unexpected kernel BRK exception at EL1\n"); + return -EFAULT; + } + + return 0; +} +NOKPROBE_SYMBOL(brk_handler); + +int aarch32_break_handler(struct pt_regs *regs) +{ + u32 arm_instr; + u16 thumb_instr; + bool bp = false; + void __user *pc = (void __user *)instruction_pointer(regs); + + if (!compat_user_mode(regs)) + return -EFAULT; + + if (compat_thumb_mode(regs)) { + /* get 16-bit Thumb instruction */ + __le16 instr; + get_user(instr, (__le16 __user *)pc); + thumb_instr = le16_to_cpu(instr); + if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { + /* get second half of 32-bit Thumb-2 instruction */ + get_user(instr, (__le16 __user *)(pc + 2)); + thumb_instr = le16_to_cpu(instr); + bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; + } else { + bp = thumb_instr == AARCH32_BREAK_THUMB; + } + } else { + /* 32-bit ARM instruction */ + __le32 instr; + get_user(instr, (__le32 __user *)pc); + arm_instr = le32_to_cpu(instr); + bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; + } + + if (!bp) + return -EFAULT; + + send_user_sigtrap(TRAP_BRKPT); + return 0; +} +NOKPROBE_SYMBOL(aarch32_break_handler); + +void __init debug_traps_init(void) +{ + hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, + TRAP_TRACE, "single-step handler"); + hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP, + TRAP_BRKPT, "BRK handler"); +} + +/* Re-enable single step for syscall restarting. */ +void user_rewind_single_step(struct task_struct *task) +{ + /* + * If single step is active for this thread, then set SPSR.SS + * to 1 to avoid returning to the active-pending state. + */ + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + set_regs_spsr_ss(task_pt_regs(task)); +} +NOKPROBE_SYMBOL(user_rewind_single_step); + +void user_fastforward_single_step(struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + clear_regs_spsr_ss(task_pt_regs(task)); +} + +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + set_user_regs_spsr_ss(regs); + else + clear_user_regs_spsr_ss(regs); +} + +/* Kernel API */ +void kernel_enable_single_step(struct pt_regs *regs) +{ + WARN_ON(!irqs_disabled()); + set_regs_spsr_ss(regs); + mdscr_write(mdscr_read() | DBG_MDSCR_SS); + enable_debug_monitors(DBG_ACTIVE_EL1); +} +NOKPROBE_SYMBOL(kernel_enable_single_step); + +void kernel_disable_single_step(void) +{ + WARN_ON(!irqs_disabled()); + mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); + disable_debug_monitors(DBG_ACTIVE_EL1); +} +NOKPROBE_SYMBOL(kernel_disable_single_step); + +int kernel_active_single_step(void) +{ + WARN_ON(!irqs_disabled()); + return mdscr_read() & DBG_MDSCR_SS; +} +NOKPROBE_SYMBOL(kernel_active_single_step); + +void kernel_rewind_single_step(struct pt_regs *regs) +{ + set_regs_spsr_ss(regs); +} + +/* ptrace API */ +void user_enable_single_step(struct task_struct *task) +{ + struct thread_info *ti = task_thread_info(task); + + if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) + set_regs_spsr_ss(task_pt_regs(task)); +} +NOKPROBE_SYMBOL(user_enable_single_step); + +void user_disable_single_step(struct task_struct *task) +{ + clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); +} +NOKPROBE_SYMBOL(user_disable_single_step); diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S new file mode 100644 index 0000000000..11d7f7de20 --- /dev/null +++ b/arch/arm64/kernel/efi-header.S @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 - 2017 Linaro, Ltd. + * Copyright (C) 2013, 2014 Red Hat, Inc. + */ + +#include +#include + + .macro efi_signature_nop +#ifdef CONFIG_EFI +.L_head: + /* + * This ccmp instruction has no meaningful effect except that + * its opcode forms the magic "MZ" signature required by UEFI. + */ + ccmp x18, #0, #0xd, pl +#else + /* + * Bootloaders may inspect the opcode at the start of the kernel + * image to decide if the kernel is capable of booting via UEFI. + * So put an ordinary NOP here, not the "MZ.." pseudo-nop above. + */ + nop +#endif + .endm + + .macro __EFI_PE_HEADER +#ifdef CONFIG_EFI + .set .Lpe_header_offset, . - .L_head + .long PE_MAGIC + .short IMAGE_FILE_MACHINE_ARM64 // Machine + .short .Lsection_count // NumberOfSections + .long 0 // TimeDateStamp + .long 0 // PointerToSymbolTable + .long 0 // NumberOfSymbols + .short .Lsection_table - .Loptional_header // SizeOfOptionalHeader + .short IMAGE_FILE_DEBUG_STRIPPED | \ + IMAGE_FILE_EXECUTABLE_IMAGE | \ + IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics + +.Loptional_header: + .short PE_OPT_MAGIC_PE32PLUS // PE32+ format + .byte 0x02 // MajorLinkerVersion + .byte 0x14 // MinorLinkerVersion + .long __initdata_begin - .Lefi_header_end // SizeOfCode + .long __pecoff_data_size // SizeOfInitializedData + .long 0 // SizeOfUninitializedData + .long __efistub_efi_pe_entry - .L_head // AddressOfEntryPoint + .long .Lefi_header_end - .L_head // BaseOfCode + + .quad 0 // ImageBase + .long SEGMENT_ALIGN // SectionAlignment + .long PECOFF_FILE_ALIGNMENT // FileAlignment + .short 0 // MajorOperatingSystemVersion + .short 0 // MinorOperatingSystemVersion + .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion + .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion + .short 0 // MajorSubsystemVersion + .short 0 // MinorSubsystemVersion + .long 0 // Win32VersionValue + + .long _end - .L_head // SizeOfImage + + // Everything before the kernel image is considered part of the header + .long .Lefi_header_end - .L_head // SizeOfHeaders + .long 0 // CheckSum + .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem + .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics + .quad 0 // SizeOfStackReserve + .quad 0 // SizeOfStackCommit + .quad 0 // SizeOfHeapReserve + .quad 0 // SizeOfHeapCommit + .long 0 // LoaderFlags + .long (.Lsection_table - .) / 8 // NumberOfRvaAndSizes + + .quad 0 // ExportTable + .quad 0 // ImportTable + .quad 0 // ResourceTable + .quad 0 // ExceptionTable + .quad 0 // CertificationTable + .quad 0 // BaseRelocationTable + +#if defined(CONFIG_DEBUG_EFI) || defined(CONFIG_ARM64_BTI_KERNEL) + .long .Lefi_debug_table - .L_head // DebugTable + .long .Lefi_debug_table_size + + /* + * The debug table is referenced via its Relative Virtual Address (RVA), + * which is only defined for those parts of the image that are covered + * by a section declaration. Since this header is not covered by any + * section, the debug table must be emitted elsewhere. So stick it in + * the .init.rodata section instead. + * + * Note that the payloads themselves are permitted to have zero RVAs, + * which means we can simply put those right after the section headers. + */ + __INITRODATA + + .align 2 +.Lefi_debug_table: +#ifdef CONFIG_DEBUG_EFI + // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY + .long 0 // Characteristics + .long 0 // TimeDateStamp + .short 0 // MajorVersion + .short 0 // MinorVersion + .long IMAGE_DEBUG_TYPE_CODEVIEW // Type + .long .Lefi_debug_entry_size // SizeOfData + .long 0 // RVA + .long .Lefi_debug_entry - .L_head // FileOffset +#endif +#ifdef CONFIG_ARM64_BTI_KERNEL + .long 0 // Characteristics + .long 0 // TimeDateStamp + .short 0 // MajorVersion + .short 0 // MinorVersion + .long IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS // Type + .long 4 // SizeOfData + .long 0 // RVA + .long .Lefi_dll_characteristics_ex - .L_head // FileOffset +#endif + .set .Lefi_debug_table_size, . - .Lefi_debug_table + .previous +#endif + + // Section table +.Lsection_table: + .ascii ".text\0\0\0" + .long __initdata_begin - .Lefi_header_end // VirtualSize + .long .Lefi_header_end - .L_head // VirtualAddress + .long __initdata_begin - .Lefi_header_end // SizeOfRawData + .long .Lefi_header_end - .L_head // PointerToRawData + + .long 0 // PointerToRelocations + .long 0 // PointerToLineNumbers + .short 0 // NumberOfRelocations + .short 0 // NumberOfLineNumbers + .long IMAGE_SCN_CNT_CODE | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_EXECUTE // Characteristics + + .ascii ".data\0\0\0" + .long __pecoff_data_size // VirtualSize + .long __initdata_begin - .L_head // VirtualAddress + .long __pecoff_data_rawsize // SizeOfRawData + .long __initdata_begin - .L_head // PointerToRawData + + .long 0 // PointerToRelocations + .long 0 // PointerToLineNumbers + .short 0 // NumberOfRelocations + .short 0 // NumberOfLineNumbers + .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_WRITE // Characteristics + + .set .Lsection_count, (. - .Lsection_table) / 40 + +#ifdef CONFIG_DEBUG_EFI +.Lefi_debug_entry: + // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY + .ascii "NB10" // Signature + .long 0 // Unknown + .long 0 // Unknown2 + .long 0 // Unknown3 + + .asciz VMLINUX_PATH + + .set .Lefi_debug_entry_size, . - .Lefi_debug_entry +#endif +#ifdef CONFIG_ARM64_BTI_KERNEL +.Lefi_dll_characteristics_ex: + .long IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT +#endif + + .balign SEGMENT_ALIGN +.Lefi_header_end: +#else + .set .Lpe_header_offset, 0x0 +#endif + .endm diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S new file mode 100644 index 0000000000..e8ae803662 --- /dev/null +++ b/arch/arm64/kernel/efi-rt-wrapper.S @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 Linaro Ltd + */ + +#include +#include + +SYM_FUNC_START(__efi_rt_asm_wrapper) + stp x29, x30, [sp, #-112]! + mov x29, sp + + /* + * Register x18 is designated as the 'platform' register by the AAPCS, + * which means firmware running at the same exception level as the OS + * (such as UEFI) should never touch it. + */ + stp x1, x18, [sp, #16] + + /* + * Preserve all callee saved registers and preserve the stack pointer + * value at the base of the EFI runtime stack so we can recover from + * synchronous exceptions occurring while executing the firmware + * routines. + */ + stp x19, x20, [sp, #32] + stp x21, x22, [sp, #48] + stp x23, x24, [sp, #64] + stp x25, x26, [sp, #80] + stp x27, x28, [sp, #96] + + ldr_l x16, efi_rt_stack_top + mov sp, x16 + stp x18, x29, [sp, #-16]! + + /* + * We are lucky enough that no EFI runtime services take more than + * 5 arguments, so all are passed in registers rather than via the + * stack. + */ + mov x8, x0 + mov x0, x2 + mov x1, x3 + mov x2, x4 + mov x3, x5 + mov x4, x6 + blr x8 + + mov x16, sp + mov sp, x29 + str xzr, [x16, #8] // clear recorded task SP value + + ldp x1, x2, [sp, #16] + cmp x2, x18 + ldp x29, x30, [sp], #112 + b.ne 0f + ret +0: + /* + * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a + * shadow stack pointer, which we need to restore before returning to + * potentially instrumented code. This is safe because the wrapper is + * called with preemption disabled and a separate shadow stack is used + * for interrupts. + */ +#ifdef CONFIG_SHADOW_CALL_STACK + ldr_l x18, efi_rt_stack_top + ldr x18, [x18, #-16] +#endif + + b efi_handle_corrupted_x18 // tail call +SYM_FUNC_END(__efi_rt_asm_wrapper) + +SYM_CODE_START(__efi_rt_asm_recover) + mov sp, x30 + + ldr_l x16, efi_rt_stack_top // clear recorded task SP value + str xzr, [x16, #-8] + + ldp x19, x20, [sp, #32] + ldp x21, x22, [sp, #48] + ldp x23, x24, [sp, #64] + ldp x25, x26, [sp, #80] + ldp x27, x28, [sp, #96] + ldp x29, x30, [sp], #112 + ret +SYM_CODE_END(__efi_rt_asm_recover) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c new file mode 100644 index 0000000000..2b478ca356 --- /dev/null +++ b/arch/arm64/kernel/efi.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + */ + +#include +#include +#include + +#include +#include + +static bool region_is_misaligned(const efi_memory_desc_t *md) +{ + if (PAGE_SIZE == EFI_PAGE_SIZE) + return false; + return !PAGE_ALIGNED(md->phys_addr) || + !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT); +} + +/* + * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be + * executable, everything else can be mapped with the XN bits + * set. Also take the new (optional) RO/XP bits into account. + */ +static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) +{ + u64 attr = md->attribute; + u32 type = md->type; + + if (type == EFI_MEMORY_MAPPED_IO) + return PROT_DEVICE_nGnRE; + + if (region_is_misaligned(md)) { + static bool __initdata code_is_misaligned; + + /* + * Regions that are not aligned to the OS page size cannot be + * mapped with strict permissions, as those might interfere + * with the permissions that are needed by the adjacent + * region's mapping. However, if we haven't encountered any + * misaligned runtime code regions so far, we can safely use + * non-executable permissions for non-code regions. + */ + code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE); + + return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC) + : pgprot_val(PAGE_KERNEL); + } + + /* R-- */ + if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == + (EFI_MEMORY_XP | EFI_MEMORY_RO)) + return pgprot_val(PAGE_KERNEL_RO); + + /* R-X */ + if (attr & EFI_MEMORY_RO) + return pgprot_val(PAGE_KERNEL_ROX); + + /* RW- */ + if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) == + EFI_MEMORY_XP) || + type != EFI_RUNTIME_SERVICES_CODE) + return pgprot_val(PAGE_KERNEL); + + /* RWX */ + return pgprot_val(PAGE_KERNEL_EXEC); +} + +/* we will fill this structure from the stub, so don't put it in .bss */ +struct screen_info screen_info __section(".data"); +EXPORT_SYMBOL(screen_info); + +int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) +{ + pteval_t prot_val = create_mapping_protection(md); + bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || + md->type == EFI_RUNTIME_SERVICES_DATA); + + /* + * If this region is not aligned to the page size used by the OS, the + * mapping will be rounded outwards, and may end up sharing a page + * frame with an adjacent runtime memory region. Given that the page + * table descriptor covering the shared page will be rewritten when the + * adjacent region gets mapped, we must avoid block mappings here so we + * don't have to worry about splitting them when that happens. + */ + if (region_is_misaligned(md)) + page_mappings_only = true; + + create_pgd_mapping(mm, md->phys_addr, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, + __pgprot(prot_val | PTE_NG), page_mappings_only); + return 0; +} + +struct set_perm_data { + const efi_memory_desc_t *md; + bool has_bti; +}; + +static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) +{ + struct set_perm_data *spd = data; + const efi_memory_desc_t *md = spd->md; + pte_t pte = READ_ONCE(*ptep); + + if (md->attribute & EFI_MEMORY_RO) + pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); + if (md->attribute & EFI_MEMORY_XP) + pte = set_pte_bit(pte, __pgprot(PTE_PXN)); + else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && + system_supports_bti() && spd->has_bti) + pte = set_pte_bit(pte, __pgprot(PTE_GP)); + set_pte(ptep, pte); + return 0; +} + +int __init efi_set_mapping_permissions(struct mm_struct *mm, + efi_memory_desc_t *md, + bool has_bti) +{ + struct set_perm_data data = { md, has_bti }; + + BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && + md->type != EFI_RUNTIME_SERVICES_DATA); + + if (region_is_misaligned(md)) + return 0; + + /* + * Calling apply_to_page_range() is only safe on regions that are + * guaranteed to be mapped down to pages. Since we are only called + * for regions that have been mapped using efi_create_mapping() above + * (and this is checked by the generic Memory Attributes table parsing + * routines), there is no need to check that again here. + */ + return apply_to_page_range(mm, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, + set_permissions, &data); +} + +/* + * UpdateCapsule() depends on the system being shutdown via + * ResetSystem(). + */ +bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES); +} + +asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) +{ + pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f); + return s; +} + +static DEFINE_RAW_SPINLOCK(efi_rt_lock); + +void arch_efi_call_virt_setup(void) +{ + efi_virtmap_load(); + __efi_fpsimd_begin(); + raw_spin_lock(&efi_rt_lock); +} + +void arch_efi_call_virt_teardown(void) +{ + raw_spin_unlock(&efi_rt_lock); + __efi_fpsimd_end(); + efi_virtmap_unload(); +} + +asmlinkage u64 *efi_rt_stack_top __ro_after_init; + +asmlinkage efi_status_t __efi_rt_asm_recover(void); + +bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) +{ + /* Check whether the exception occurred while running the firmware */ + if (!current_in_efi() || regs->pc >= TASK_SIZE_64) + return false; + + pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + regs->regs[0] = EFI_ABORTED; + regs->regs[30] = efi_rt_stack_top[-1]; + regs->pc = (u64)__efi_rt_asm_recover; + + if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) + regs->regs[18] = efi_rt_stack_top[-2]; + + return true; +} + +/* EFI requires 8 KiB of stack space for runtime services */ +static_assert(THREAD_SIZE >= SZ_8K); + +static int __init arm64_efi_rt_init(void) +{ + void *p; + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + + p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, + NUMA_NO_NODE, &&l); +l: if (!p) { + pr_warn("Failed to allocate EFI runtime stack\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return -ENOMEM; + } + + efi_rt_stack_top = p + THREAD_SIZE; + return 0; +} +core_initcall(arm64_efi_rt_init); diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c new file mode 100644 index 0000000000..2e94d20c4a --- /dev/null +++ b/arch/arm64/kernel/elfcore.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +#include +#include + +#define for_each_mte_vma(cprm, i, m) \ + if (system_supports_mte()) \ + for (i = 0, m = cprm->vma_meta; \ + i < cprm->vma_count; \ + i++, m = cprm->vma_meta + i) \ + if (m->flags & VM_MTE) + +static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m) +{ + return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE; +} + +/* Derived from dump_user_range(); start/end must be page-aligned */ +static int mte_dump_tag_range(struct coredump_params *cprm, + unsigned long start, unsigned long len) +{ + int ret = 1; + unsigned long addr; + void *tags = NULL; + + for (addr = start; addr < start + len; addr += PAGE_SIZE) { + struct page *page = get_dump_page(addr); + + /* + * get_dump_page() returns NULL when encountering an empty + * page table entry that would otherwise have been filled with + * the zero page. Skip the equivalent tag dump which would + * have been all zeros. + */ + if (!page) { + dump_skip(cprm, MTE_PAGE_TAG_STORAGE); + continue; + } + + /* + * Pages mapped in user space as !pte_access_permitted() (e.g. + * PROT_EXEC only) may not have the PG_mte_tagged flag set. + */ + if (!page_mte_tagged(page)) { + put_page(page); + dump_skip(cprm, MTE_PAGE_TAG_STORAGE); + continue; + } + + if (!tags) { + tags = mte_allocate_tag_storage(); + if (!tags) { + put_page(page); + ret = 0; + break; + } + } + + mte_save_page_tags(page_address(page), tags); + put_page(page); + if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) { + ret = 0; + break; + } + } + + if (tags) + mte_free_tag_storage(tags); + + return ret; +} + +Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm) +{ + int i; + struct core_vma_metadata *m; + int vma_count = 0; + + for_each_mte_vma(cprm, i, m) + vma_count++; + + return vma_count; +} + +int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +{ + int i; + struct core_vma_metadata *m; + + for_each_mte_vma(cprm, i, m) { + struct elf_phdr phdr; + + phdr.p_type = PT_AARCH64_MEMTAG_MTE; + phdr.p_offset = offset; + phdr.p_vaddr = m->start; + phdr.p_paddr = 0; + phdr.p_filesz = mte_vma_tag_dump_size(m); + phdr.p_memsz = m->end - m->start; + offset += phdr.p_filesz; + phdr.p_flags = 0; + phdr.p_align = 0; + + if (!dump_emit(cprm, &phdr, sizeof(phdr))) + return 0; + } + + return 1; +} + +size_t elf_core_extra_data_size(struct coredump_params *cprm) +{ + int i; + struct core_vma_metadata *m; + size_t data_size = 0; + + for_each_mte_vma(cprm, i, m) + data_size += mte_vma_tag_dump_size(m); + + return data_size; +} + +int elf_core_write_extra_data(struct coredump_params *cprm) +{ + int i; + struct core_vma_metadata *m; + + for_each_mte_vma(cprm, i, m) { + if (!mte_dump_tag_range(cprm, m->start, m->dump_size)) + return 0; + } + + return 1; +} diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c new file mode 100644 index 0000000000..0fc94207e6 --- /dev/null +++ b/arch/arm64/kernel/entry-common.c @@ -0,0 +1,932 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Exception handling code + * + * Copyright (C) 2019 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Handle IRQ/context state management when entering from kernel mode. + * Before this function is called it is not safe to call regular kernel code, + * instrumentable code, or any code which may trigger an exception. + * + * This is intended to match the logic in irqentry_enter(), handling the kernel + * mode transitions only. + */ +static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs) +{ + regs->exit_rcu = false; + + if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { + lockdep_hardirqs_off(CALLER_ADDR0); + ct_irq_enter(); + trace_hardirqs_off_finish(); + + regs->exit_rcu = true; + return; + } + + lockdep_hardirqs_off(CALLER_ADDR0); + rcu_irq_enter_check_tick(); + trace_hardirqs_off_finish(); +} + +static void noinstr enter_from_kernel_mode(struct pt_regs *regs) +{ + __enter_from_kernel_mode(regs); + mte_check_tfsr_entry(); + mte_disable_tco_entry(current); +} + +/* + * Handle IRQ/context state management when exiting to kernel mode. + * After this function returns it is not safe to call regular kernel code, + * instrumentable code, or any code which may trigger an exception. + * + * This is intended to match the logic in irqentry_exit(), handling the kernel + * mode transitions only, and with preemption handled elsewhere. + */ +static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs) +{ + lockdep_assert_irqs_disabled(); + + if (interrupts_enabled(regs)) { + if (regs->exit_rcu) { + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); + ct_irq_exit(); + lockdep_hardirqs_on(CALLER_ADDR0); + return; + } + + trace_hardirqs_on(); + } else { + if (regs->exit_rcu) + ct_irq_exit(); + } +} + +static void noinstr exit_to_kernel_mode(struct pt_regs *regs) +{ + mte_check_tfsr_exit(); + __exit_to_kernel_mode(regs); +} + +/* + * Handle IRQ/context state management when entering from user mode. + * Before this function is called it is not safe to call regular kernel code, + * instrumentable code, or any code which may trigger an exception. + */ +static __always_inline void __enter_from_user_mode(void) +{ + lockdep_hardirqs_off(CALLER_ADDR0); + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); + trace_hardirqs_off_finish(); + mte_disable_tco_entry(current); +} + +static __always_inline void enter_from_user_mode(struct pt_regs *regs) +{ + __enter_from_user_mode(); +} + +/* + * Handle IRQ/context state management when exiting to user mode. + * After this function returns it is not safe to call regular kernel code, + * instrumentable code, or any code which may trigger an exception. + */ +static __always_inline void __exit_to_user_mode(void) +{ + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); + user_enter_irqoff(); + lockdep_hardirqs_on(CALLER_ADDR0); +} + +static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) +{ + unsigned long flags; + + local_daif_mask(); + + flags = read_thread_flags(); + if (unlikely(flags & _TIF_WORK_MASK)) + do_notify_resume(regs, flags); + + lockdep_sys_exit(); +} + +static __always_inline void exit_to_user_mode(struct pt_regs *regs) +{ + exit_to_user_mode_prepare(regs); + mte_check_tfsr_exit(); + __exit_to_user_mode(); +} + +asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) +{ + exit_to_user_mode(regs); +} + +/* + * Handle IRQ/context state management when entering an NMI from user/kernel + * mode. Before this function is called it is not safe to call regular kernel + * code, instrumentable code, or any code which may trigger an exception. + */ +static void noinstr arm64_enter_nmi(struct pt_regs *regs) +{ + regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); + + __nmi_enter(); + lockdep_hardirqs_off(CALLER_ADDR0); + lockdep_hardirq_enter(); + ct_nmi_enter(); + + trace_hardirqs_off_finish(); + ftrace_nmi_enter(); +} + +/* + * Handle IRQ/context state management when exiting an NMI from user/kernel + * mode. After this function returns it is not safe to call regular kernel + * code, instrumentable code, or any code which may trigger an exception. + */ +static void noinstr arm64_exit_nmi(struct pt_regs *regs) +{ + bool restore = regs->lockdep_hardirqs; + + ftrace_nmi_exit(); + if (restore) { + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); + } + + ct_nmi_exit(); + lockdep_hardirq_exit(); + if (restore) + lockdep_hardirqs_on(CALLER_ADDR0); + __nmi_exit(); +} + +/* + * Handle IRQ/context state management when entering a debug exception from + * kernel mode. Before this function is called it is not safe to call regular + * kernel code, instrumentable code, or any code which may trigger an exception. + */ +static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) +{ + regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); + + lockdep_hardirqs_off(CALLER_ADDR0); + ct_nmi_enter(); + + trace_hardirqs_off_finish(); +} + +/* + * Handle IRQ/context state management when exiting a debug exception from + * kernel mode. After this function returns it is not safe to call regular + * kernel code, instrumentable code, or any code which may trigger an exception. + */ +static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) +{ + bool restore = regs->lockdep_hardirqs; + + if (restore) { + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); + } + + ct_nmi_exit(); + if (restore) + lockdep_hardirqs_on(CALLER_ADDR0); +} + +#ifdef CONFIG_PREEMPT_DYNAMIC +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +#define need_irq_preemption() \ + (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) +#else +#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION)) +#endif + +static void __sched arm64_preempt_schedule_irq(void) +{ + if (!need_irq_preemption()) + return; + + /* + * Note: thread_info::preempt_count includes both thread_info::count + * and thread_info::need_resched, and is not equivalent to + * preempt_count(). + */ + if (READ_ONCE(current_thread_info()->preempt_count) != 0) + return; + + /* + * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC + * priority masking is used the GIC irqchip driver will clear DAIF.IF + * using gic_arch_enable_irqs() for normal IRQs. If anything is set in + * DAIF we must have handled an NMI, so skip preemption. + */ + if (system_uses_irq_prio_masking() && read_sysreg(daif)) + return; + + /* + * Preempting a task from an IRQ means we leave copies of PSTATE + * on the stack. cpufeature's enable calls may modify PSTATE, but + * resuming one of these preempted tasks would undo those changes. + * + * Only allow a task to be preempted once cpufeatures have been + * enabled. + */ + if (system_capabilities_finalized()) + preempt_schedule_irq(); +} + +static void do_interrupt_handler(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + if (on_thread_stack()) + call_on_irq_stack(regs, handler); + else + handler(regs); + + set_irq_regs(old_regs); +} + +extern void (*handle_arch_irq)(struct pt_regs *); +extern void (*handle_arch_fiq)(struct pt_regs *); + +static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, + unsigned long esr) +{ + arm64_enter_nmi(regs); + + console_verbose(); + + pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", + vector, smp_processor_id(), esr, + esr_get_class_string(esr)); + + __show_regs(regs); + panic("Unhandled exception"); +} + +#define UNHANDLED(el, regsize, vector) \ +asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ +{ \ + const char *desc = #regsize "-bit " #el " " #vector; \ + __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ +} + +#ifdef CONFIG_ARM64_ERRATUM_1463225 +static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static void cortex_a76_erratum_1463225_svc_handler(void) +{ + u32 reg, val; + + if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) + return; + + if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) + return; + + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); + reg = read_sysreg(mdscr_el1); + val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; + write_sysreg(val, mdscr_el1); + asm volatile("msr daifclr, #8"); + isb(); + + /* We will have taken a single-step exception by this point */ + + write_sysreg(reg, mdscr_el1); + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); +} + +static __always_inline bool +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) + return false; + + /* + * We've taken a dummy step exception from the kernel to ensure + * that interrupts are re-enabled on the syscall path. Return back + * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions + * masked so that we can safely restore the mdscr and get on with + * handling the syscall. + */ + regs->pstate |= PSR_D_BIT; + return true; +} +#else /* CONFIG_ARM64_ERRATUM_1463225 */ +static void cortex_a76_erratum_1463225_svc_handler(void) { } +static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + return false; +} +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + +/* + * As per the ABI exit SME streaming mode and clear the SVE state not + * shared with FPSIMD on syscall entry. + */ +static inline void fp_user_discard(void) +{ + /* + * If SME is active then exit streaming mode. If ZA is active + * then flush the SVE registers but leave userspace access to + * both SVE and SME enabled, otherwise disable SME for the + * task and fall through to disabling SVE too. This means + * that after a syscall we never have any streaming mode + * register state to track, if this changes the KVM code will + * need updating. + */ + if (system_supports_sme()) + sme_smstop_sm(); + + if (!system_supports_sve()) + return; + + if (test_thread_flag(TIF_SVE)) { + unsigned int sve_vq_minus_one; + + sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; + sve_flush_live(true, sve_vq_minus_one); + } +} + +UNHANDLED(el1t, 64, sync) +UNHANDLED(el1t, 64, irq) +UNHANDLED(el1t, 64, fiq) +UNHANDLED(el1t, 64, error) + +static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + enter_from_kernel_mode(regs); + local_daif_inherit(regs); + do_mem_abort(far, esr, regs); + local_daif_mask(); + exit_to_kernel_mode(regs); +} + +static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + enter_from_kernel_mode(regs); + local_daif_inherit(regs); + do_sp_pc_abort(far, esr, regs); + local_daif_mask(); + exit_to_kernel_mode(regs); +} + +static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) +{ + enter_from_kernel_mode(regs); + local_daif_inherit(regs); + do_el1_undef(regs, esr); + local_daif_mask(); + exit_to_kernel_mode(regs); +} + +static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) +{ + enter_from_kernel_mode(regs); + local_daif_inherit(regs); + do_el1_bti(regs, esr); + local_daif_mask(); + exit_to_kernel_mode(regs); +} + +static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + arm64_enter_el1_dbg(regs); + if (!cortex_a76_erratum_1463225_debug_handler(regs)) + do_debug_exception(far, esr, regs); + arm64_exit_el1_dbg(regs); +} + +static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) +{ + enter_from_kernel_mode(regs); + local_daif_inherit(regs); + do_el1_fpac(regs, esr); + local_daif_mask(); + exit_to_kernel_mode(regs); +} + +asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_DABT_CUR: + case ESR_ELx_EC_IABT_CUR: + el1_abort(regs, esr); + break; + /* + * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a + * recursive exception when trying to push the initial pt_regs. + */ + case ESR_ELx_EC_PC_ALIGN: + el1_pc(regs, esr); + break; + case ESR_ELx_EC_SYS64: + case ESR_ELx_EC_UNKNOWN: + el1_undef(regs, esr); + break; + case ESR_ELx_EC_BTI: + el1_bti(regs, esr); + break; + case ESR_ELx_EC_BREAKPT_CUR: + case ESR_ELx_EC_SOFTSTP_CUR: + case ESR_ELx_EC_WATCHPT_CUR: + case ESR_ELx_EC_BRK64: + el1_dbg(regs, esr); + break; + case ESR_ELx_EC_FPAC: + el1_fpac(regs, esr); + break; + default: + __panic_unhandled(regs, "64-bit el1h sync", esr); + } +} + +static __always_inline void __el1_pnmi(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + arm64_enter_nmi(regs); + do_interrupt_handler(regs, handler); + arm64_exit_nmi(regs); +} + +static __always_inline void __el1_irq(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + enter_from_kernel_mode(regs); + + irq_enter_rcu(); + do_interrupt_handler(regs, handler); + irq_exit_rcu(); + + arm64_preempt_schedule_irq(); + + exit_to_kernel_mode(regs); +} +static void noinstr el1_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) + __el1_pnmi(regs, handler); + else + __el1_irq(regs, handler); +} + +asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_fiq); +} + +asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + local_daif_restore(DAIF_ERRCTX); + arm64_enter_nmi(regs); + do_serror(regs, esr); + arm64_exit_nmi(regs); +} + +static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_mem_abort(far, esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + /* + * We've taken an instruction abort from userspace and not yet + * re-enabled IRQs. If the address is a kernel address, apply + * BP hardening prior to enabling IRQs and pre-emption. + */ + if (!is_ttbr0_addr(far)) + arm64_apply_bp_hardening(); + + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_mem_abort(far, esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_fpsimd_acc(esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_sve_acc(esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_sme_acc(esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_fpsimd_exc(esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_el0_sys(esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + if (!is_ttbr0_addr(instruction_pointer(regs))) + arm64_apply_bp_hardening(); + + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_sp_pc_abort(far, esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_sp_pc_abort(regs->sp, esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_el0_undef(regs, esr); + exit_to_user_mode(regs); +} + +static void noinstr el0_bti(struct pt_regs *regs) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_el0_bti(regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_el0_mops(regs, esr); + exit_to_user_mode(regs); +} + +static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + bad_el0_sync(regs, 0, esr); + exit_to_user_mode(regs); +} + +static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) +{ + /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ + unsigned long far = read_sysreg(far_el1); + + enter_from_user_mode(regs); + do_debug_exception(far, esr, regs); + local_daif_restore(DAIF_PROCCTX); + exit_to_user_mode(regs); +} + +static void noinstr el0_svc(struct pt_regs *regs) +{ + enter_from_user_mode(regs); + cortex_a76_erratum_1463225_svc_handler(); + fp_user_discard(); + local_daif_restore(DAIF_PROCCTX); + do_el0_svc(regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_el0_fpac(regs, esr); + exit_to_user_mode(regs); +} + +asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_SVC64: + el0_svc(regs); + break; + case ESR_ELx_EC_DABT_LOW: + el0_da(regs, esr); + break; + case ESR_ELx_EC_IABT_LOW: + el0_ia(regs, esr); + break; + case ESR_ELx_EC_FP_ASIMD: + el0_fpsimd_acc(regs, esr); + break; + case ESR_ELx_EC_SVE: + el0_sve_acc(regs, esr); + break; + case ESR_ELx_EC_SME: + el0_sme_acc(regs, esr); + break; + case ESR_ELx_EC_FP_EXC64: + el0_fpsimd_exc(regs, esr); + break; + case ESR_ELx_EC_SYS64: + case ESR_ELx_EC_WFx: + el0_sys(regs, esr); + break; + case ESR_ELx_EC_SP_ALIGN: + el0_sp(regs, esr); + break; + case ESR_ELx_EC_PC_ALIGN: + el0_pc(regs, esr); + break; + case ESR_ELx_EC_UNKNOWN: + el0_undef(regs, esr); + break; + case ESR_ELx_EC_BTI: + el0_bti(regs); + break; + case ESR_ELx_EC_MOPS: + el0_mops(regs, esr); + break; + case ESR_ELx_EC_BREAKPT_LOW: + case ESR_ELx_EC_SOFTSTP_LOW: + case ESR_ELx_EC_WATCHPT_LOW: + case ESR_ELx_EC_BRK64: + el0_dbg(regs, esr); + break; + case ESR_ELx_EC_FPAC: + el0_fpac(regs, esr); + break; + default: + el0_inv(regs, esr); + } +} + +static void noinstr el0_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + enter_from_user_mode(regs); + + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + if (regs->pc & BIT(55)) + arm64_apply_bp_hardening(); + + irq_enter_rcu(); + do_interrupt_handler(regs, handler); + irq_exit_rcu(); + + exit_to_user_mode(regs); +} + +static void noinstr __el0_irq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_fiq); +} + +asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + +static void noinstr __el0_error_handler_common(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + enter_from_user_mode(regs); + local_daif_restore(DAIF_ERRCTX); + arm64_enter_nmi(regs); + do_serror(regs, esr); + arm64_exit_nmi(regs); + local_daif_restore(DAIF_PROCCTX); + exit_to_user_mode(regs); +} + +asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) +{ + __el0_error_handler_common(regs); +} + +#ifdef CONFIG_COMPAT +static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_el0_cp15(esr, regs); + exit_to_user_mode(regs); +} + +static void noinstr el0_svc_compat(struct pt_regs *regs) +{ + enter_from_user_mode(regs); + cortex_a76_erratum_1463225_svc_handler(); + local_daif_restore(DAIF_PROCCTX); + do_el0_svc_compat(regs); + exit_to_user_mode(regs); +} + +asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_SVC32: + el0_svc_compat(regs); + break; + case ESR_ELx_EC_DABT_LOW: + el0_da(regs, esr); + break; + case ESR_ELx_EC_IABT_LOW: + el0_ia(regs, esr); + break; + case ESR_ELx_EC_FP_ASIMD: + el0_fpsimd_acc(regs, esr); + break; + case ESR_ELx_EC_FP_EXC32: + el0_fpsimd_exc(regs, esr); + break; + case ESR_ELx_EC_PC_ALIGN: + el0_pc(regs, esr); + break; + case ESR_ELx_EC_UNKNOWN: + case ESR_ELx_EC_CP14_MR: + case ESR_ELx_EC_CP14_LS: + case ESR_ELx_EC_CP14_64: + el0_undef(regs, esr); + break; + case ESR_ELx_EC_CP15_32: + case ESR_ELx_EC_CP15_64: + el0_cp15(regs, esr); + break; + case ESR_ELx_EC_BREAKPT_LOW: + case ESR_ELx_EC_SOFTSTP_LOW: + case ESR_ELx_EC_WATCHPT_LOW: + case ESR_ELx_EC_BKPT32: + el0_dbg(regs, esr); + break; + default: + el0_inv(regs, esr); + } +} + +asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + +asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) +{ + __el0_error_handler_common(regs); +} +#else /* CONFIG_COMPAT */ +UNHANDLED(el0t, 32, sync) +UNHANDLED(el0t, 32, irq) +UNHANDLED(el0t, 32, fiq) +UNHANDLED(el0t, 32, error) +#endif /* CONFIG_COMPAT */ + +#ifdef CONFIG_VMAP_STACK +asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + unsigned long far = read_sysreg(far_el1); + + arm64_enter_nmi(regs); + panic_bad_stack(regs, esr, far); +} +#endif /* CONFIG_VMAP_STACK */ + +#ifdef CONFIG_ARM_SDE_INTERFACE +asmlinkage noinstr unsigned long +__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) +{ + unsigned long ret; + + /* + * We didn't take an exception to get here, so the HW hasn't + * set/cleared bits in PSTATE that we may rely on. + * + * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to + * whether PSTATE bits are inherited unchanged or generated from + * scratch, and the TF-A implementation always clears PAN and always + * clears UAO. There are no other known implementations. + * + * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how + * PSTATE is modified upon architectural exceptions, and so PAN is + * either inherited or set per SCTLR_ELx.SPAN, and UAO is always + * cleared. + * + * We must explicitly reset PAN to the expected state, including + * clearing it when the host isn't using it, in case a VM had it set. + */ + if (system_uses_hw_pan()) + set_pstate_pan(1); + else if (cpu_has_pan()) + set_pstate_pan(0); + + arm64_enter_nmi(regs); + ret = do_sdei_event(regs, arg); + arm64_exit_nmi(regs); + + return ret; +} +#endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S new file mode 100644 index 0000000000..6325db1a21 --- /dev/null +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * FP/SIMD state saving and restoring + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas + */ + +#include + +#include +#include + +/* + * Save the FP registers. + * + * x0 - pointer to struct fpsimd_state + */ +SYM_FUNC_START(fpsimd_save_state) + fpsimd_save x0, 8 + ret +SYM_FUNC_END(fpsimd_save_state) + +/* + * Load the FP registers. + * + * x0 - pointer to struct fpsimd_state + */ +SYM_FUNC_START(fpsimd_load_state) + fpsimd_restore x0, 8 + ret +SYM_FUNC_END(fpsimd_load_state) + +#ifdef CONFIG_ARM64_SVE + +/* + * Save the SVE state + * + * x0 - pointer to buffer for state + * x1 - pointer to storage for FPSR + * x2 - Save FFR if non-zero + */ +SYM_FUNC_START(sve_save_state) + sve_save 0, x1, x2, 3 + ret +SYM_FUNC_END(sve_save_state) + +/* + * Load the SVE state + * + * x0 - pointer to buffer for state + * x1 - pointer to storage for FPSR + * x2 - Restore FFR if non-zero + */ +SYM_FUNC_START(sve_load_state) + sve_load 0, x1, x2, 4 + ret +SYM_FUNC_END(sve_load_state) + +SYM_FUNC_START(sve_get_vl) + _sve_rdvl 0, 1 + ret +SYM_FUNC_END(sve_get_vl) + +SYM_FUNC_START(sve_set_vq) + sve_load_vq x0, x1, x2 + ret +SYM_FUNC_END(sve_set_vq) + +/* + * Zero all SVE registers but the first 128-bits of each vector + * + * VQ must already be configured by caller, any further updates of VQ + * will need to ensure that the register state remains valid. + * + * x0 = include FFR? + * x1 = VQ - 1 + */ +SYM_FUNC_START(sve_flush_live) + cbz x1, 1f // A VQ-1 of 0 is 128 bits so no extra Z state + sve_flush_z +1: sve_flush_p + tbz x0, #0, 2f + sve_flush_ffr +2: ret +SYM_FUNC_END(sve_flush_live) + +#endif /* CONFIG_ARM64_SVE */ + +#ifdef CONFIG_ARM64_SME + +SYM_FUNC_START(sme_get_vl) + _sme_rdsvl 0, 1 + ret +SYM_FUNC_END(sme_get_vl) + +SYM_FUNC_START(sme_set_vq) + sme_load_vq x0, x1, x2 + ret +SYM_FUNC_END(sme_set_vq) + +/* + * Save the ZA and ZT state + * + * x0 - pointer to buffer for state + * x1 - number of ZT registers to save + */ +SYM_FUNC_START(sme_save_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_save_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _str_zt 0 +1: + ret +SYM_FUNC_END(sme_save_state) + +/* + * Load the ZA and ZT state + * + * x0 - pointer to buffer for state + * x1 - number of ZT registers to save + */ +SYM_FUNC_START(sme_load_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_load_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _ldr_zt 0 +1: + ret +SYM_FUNC_END(sme_load_state) + +#endif /* CONFIG_ARM64_SME */ diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S new file mode 100644 index 0000000000..f0c16640ef --- /dev/null +++ b/arch/arm64/kernel/entry-ftrace.S @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * arch/arm64/kernel/entry-ftrace.S + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro + */ + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +/* + * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before + * the regular function prologue. For an enabled callsite, ftrace_init_nop() and + * ftrace_make_call() have patched those NOPs to: + * + * MOV X9, LR + * BL ftrace_caller + * + * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are + * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to + * clobber. + * + * We save the callsite's context into a struct ftrace_regs before invoking any + * ftrace callbacks. So that we can get a sensible backtrace, we create frame + * records for the callsite and the ftrace entry assembly. This is not + * sufficient for reliable stacktrace: until we create the callsite stack + * record, its caller is missing from the LR and existing chain of frame + * records. + */ +SYM_CODE_START(ftrace_caller) + bti c + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + /* + * The literal pointer to the ops is at an 8-byte aligned boundary + * which is either 12 or 16 bytes before the BL instruction in the call + * site. See ftrace_call_adjust() for details. + * + * Therefore here the LR points at `literal + 16` or `literal + 20`, + * and we can find the address of the literal in either case by + * aligning to an 8-byte boundary and subtracting 16. We do the + * alignment first as this allows us to fold the subtraction into the + * LDR. + */ + bic x11, x30, 0x7 + ldr x11, [x11, #-(4 * AARCH64_INSN_SIZE)] // op + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + /* + * If the op has a direct call, handle it immediately without + * saving/restoring registers. + */ + ldr x17, [x11, #FTRACE_OPS_DIRECT_CALL] // op->direct_call + cbnz x17, ftrace_caller_direct +#endif +#endif + + /* Save original SP */ + mov x10, sp + + /* Make room for ftrace regs, plus two frame records */ + sub sp, sp, #(FREGS_SIZE + 32) + + /* Save function arguments */ + stp x0, x1, [sp, #FREGS_X0] + stp x2, x3, [sp, #FREGS_X2] + stp x4, x5, [sp, #FREGS_X4] + stp x6, x7, [sp, #FREGS_X6] + str x8, [sp, #FREGS_X8] + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + str xzr, [sp, #FREGS_DIRECT_TRAMP] +#endif + + /* Save the callsite's FP, LR, SP */ + str x29, [sp, #FREGS_FP] + str x9, [sp, #FREGS_LR] + str x10, [sp, #FREGS_SP] + + /* Save the PC after the ftrace callsite */ + str x30, [sp, #FREGS_PC] + + /* Create a frame record for the callsite above the ftrace regs */ + stp x29, x9, [sp, #FREGS_SIZE + 16] + add x29, sp, #FREGS_SIZE + 16 + + /* Create our frame record above the ftrace regs */ + stp x29, x30, [sp, #FREGS_SIZE] + add x29, sp, #FREGS_SIZE + + /* Prepare arguments for the the tracer func */ + sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) + mov x1, x9 // parent_ip (callsite's LR) + mov x3, sp // regs + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + mov x2, x11 // op + ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func + blr x4 // op->func(ip, parent_ip, op, regs) + +#else + ldr_l x2, function_trace_op // op + +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) + bl ftrace_stub // func(ip, parent_ip, op, regs) +#endif + +/* + * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved + * x19-x29 per the AAPCS, and we created frame records upon entry, so we need + * to restore x0-x8, x29, and x30. + */ + /* Restore function arguments */ + ldp x0, x1, [sp, #FREGS_X0] + ldp x2, x3, [sp, #FREGS_X2] + ldp x4, x5, [sp, #FREGS_X4] + ldp x6, x7, [sp, #FREGS_X6] + ldr x8, [sp, #FREGS_X8] + + /* Restore the callsite's FP */ + ldr x29, [sp, #FREGS_FP] + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + ldr x17, [sp, #FREGS_DIRECT_TRAMP] + cbnz x17, ftrace_caller_direct_late +#endif + + /* Restore the callsite's LR and PC */ + ldr x30, [sp, #FREGS_LR] + ldr x9, [sp, #FREGS_PC] + + /* Restore the callsite's SP */ + add sp, sp, #FREGS_SIZE + 32 + + ret x9 + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +SYM_INNER_LABEL(ftrace_caller_direct_late, SYM_L_LOCAL) + /* + * Head to a direct trampoline in x17 after having run other tracers. + * The ftrace_regs are live, and x0-x8 and FP have been restored. The + * LR, PC, and SP have not been restored. + */ + + /* + * Restore the callsite's LR and PC matching the trampoline calling + * convention. + */ + ldr x9, [sp, #FREGS_LR] + ldr x30, [sp, #FREGS_PC] + + /* Restore the callsite's SP */ + add sp, sp, #FREGS_SIZE + 32 + +SYM_INNER_LABEL(ftrace_caller_direct, SYM_L_LOCAL) + /* + * Head to a direct trampoline in x17. + * + * We use `BR X17` as this can safely land on a `BTI C` or `PACIASP` in + * the trampoline, and will not unbalance any return stack. + */ + br x17 +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ +SYM_CODE_END(ftrace_caller) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +SYM_CODE_START(ftrace_stub_direct_tramp) + bti c + mov x10, x30 + mov x30, x9 + ret x10 +SYM_CODE_END(ftrace_stub_direct_tramp) +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ + +#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ + +/* + * Gcc with -pg will put the following code in the beginning of each function: + * mov x0, x30 + * bl _mcount + * [function's body ...] + * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic + * ftrace is enabled. + * + * Please note that x0 as an argument will not be used here because we can + * get lr(x30) of instrumented function at any time by winding up call stack + * as long as the kernel is compiled without -fomit-frame-pointer. + * (or CONFIG_FRAME_POINTER, this is forced on arm64) + * + * stack layout after mcount_enter in _mcount(): + * + * current sp/fp => 0:+-----+ + * in _mcount() | x29 | -> instrumented function's fp + * +-----+ + * | x30 | -> _mcount()'s lr (= instrumented function's pc) + * old sp => +16:+-----+ + * when instrumented | | + * function calls | ... | + * _mcount() | | + * | | + * instrumented => +xx:+-----+ + * function's fp | x29 | -> parent's fp + * +-----+ + * | x30 | -> instrumented function's lr (= parent's pc) + * +-----+ + * | ... | + */ + + .macro mcount_enter + stp x29, x30, [sp, #-16]! + mov x29, sp + .endm + + .macro mcount_exit + ldp x29, x30, [sp], #16 + ret + .endm + + .macro mcount_adjust_addr rd, rn + sub \rd, \rn, #AARCH64_INSN_SIZE + .endm + + /* for instrumented function's parent */ + .macro mcount_get_parent_fp reg + ldr \reg, [x29] + ldr \reg, [\reg] + .endm + + /* for instrumented function */ + .macro mcount_get_pc0 reg + mcount_adjust_addr \reg, x30 + .endm + + .macro mcount_get_pc reg + ldr \reg, [x29, #8] + mcount_adjust_addr \reg, \reg + .endm + + .macro mcount_get_lr reg + ldr \reg, [x29] + ldr \reg, [\reg, #8] + .endm + + .macro mcount_get_lr_addr reg + ldr \reg, [x29] + add \reg, \reg, #8 + .endm + +/* + * _mcount() is used to build the kernel with -pg option, but all the branch + * instructions to _mcount() are replaced to NOP initially at kernel start up, + * and later on, NOP to branch to ftrace_caller() when enabled or branch to + * NOP when disabled per-function base. + */ +SYM_FUNC_START(_mcount) + ret +SYM_FUNC_END(_mcount) +EXPORT_SYMBOL(_mcount) +NOKPROBE(_mcount) + +/* + * void ftrace_caller(unsigned long return_address) + * @return_address: return address to instrumented function + * + * This function is a counterpart of _mcount() in 'static' ftrace, and + * makes calls to: + * - tracer function to probe instrumented function's entry, + * - ftrace_graph_caller to set up an exit hook + */ +SYM_FUNC_START(ftrace_caller) + mcount_enter + + mcount_get_pc0 x0 // function's pc + mcount_get_lr x1 // function's lr + +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr); + nop // This will be replaced with "bl xxx" + // where xxx can be any kind of tracer. + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller(); + nop // If enabled, this will be replaced + // "b ftrace_graph_caller" +#endif + + mcount_exit +SYM_FUNC_END(ftrace_caller) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * void ftrace_graph_caller(void) + * + * Called from _mcount() or ftrace_caller() when function_graph tracer is + * selected. + * This function w/ prepare_ftrace_return() fakes link register's value on + * the call stack in order to intercept instrumented function's return path + * and run return_to_handler() later on its exit. + */ +SYM_FUNC_START(ftrace_graph_caller) + mcount_get_pc x0 // function's pc + mcount_get_lr_addr x1 // pointer to function's saved lr + mcount_get_parent_fp x2 // parent's fp + bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp) + + mcount_exit +SYM_FUNC_END(ftrace_graph_caller) +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ + +SYM_TYPED_FUNC_START(ftrace_stub) + ret +SYM_FUNC_END(ftrace_stub) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_TYPED_FUNC_START(ftrace_stub_graph) + ret +SYM_FUNC_END(ftrace_stub_graph) + +/* + * void return_to_handler(void) + * + * Run ftrace_return_to_handler() before going back to parent. + * @fp is checked against the value passed by ftrace_graph_caller(). + */ +SYM_CODE_START(return_to_handler) + /* save return value regs */ + sub sp, sp, #FGRET_REGS_SIZE + stp x0, x1, [sp, #FGRET_REGS_X0] + stp x2, x3, [sp, #FGRET_REGS_X2] + stp x4, x5, [sp, #FGRET_REGS_X4] + stp x6, x7, [sp, #FGRET_REGS_X6] + str x29, [sp, #FGRET_REGS_FP] // parent's fp + + mov x0, sp + bl ftrace_return_to_handler // addr = ftrace_return_to_hander(regs); + mov x30, x0 // restore the original return address + + /* restore return value regs */ + ldp x0, x1, [sp, #FGRET_REGS_X0] + ldp x2, x3, [sp, #FGRET_REGS_X2] + ldp x4, x5, [sp, #FGRET_REGS_X4] + ldp x6, x7, [sp, #FGRET_REGS_X6] + add sp, sp, #FGRET_REGS_SIZE + + ret +SYM_CODE_END(return_to_handler) +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S new file mode 100644 index 0000000000..7fcbee0f6c --- /dev/null +++ b/arch/arm64/kernel/entry.S @@ -0,0 +1,1099 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Low-level exception handling code + * + * Copyright (C) 2012 ARM Ltd. + * Authors: Catalin Marinas + * Will Deacon + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + .macro clear_gp_regs + .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 + mov x\n, xzr + .endr + .endm + + .macro kernel_ventry, el:req, ht:req, regsize:req, label:req + .align 7 +.Lventry_start\@: + .if \el == 0 + /* + * This must be the first instruction of the EL0 vector entries. It is + * skipped by the trampoline vectors, to trigger the cleanup. + */ + b .Lskip_tramp_vectors_cleanup\@ + .if \regsize == 64 + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + .else + mov x30, xzr + .endif +.Lskip_tramp_vectors_cleanup\@: + .endif + + sub sp, sp, #PT_REGS_SIZE +#ifdef CONFIG_VMAP_STACK + /* + * Test whether the SP has overflowed, without corrupting a GPR. + * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) + * should always be zero. + */ + add sp, sp, x0 // sp' = sp + x0 + sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp + tbnz x0, #THREAD_SHIFT, 0f + sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 + sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp + b el\el\ht\()_\regsize\()_\label + +0: + /* + * Either we've just detected an overflow, or we've taken an exception + * while on the overflow stack. Either way, we won't return to + * userspace, and can clobber EL0 registers to free up GPRs. + */ + + /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ + msr tpidr_el0, x0 + + /* Recover the original x0 value and stash it in tpidrro_el0 */ + sub x0, sp, x0 + msr tpidrro_el0, x0 + + /* Switch to the overflow stack */ + adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 + + /* + * Check whether we were already on the overflow stack. This may happen + * after panic() re-enables interrupts. + */ + mrs x0, tpidr_el0 // sp of interrupted context + sub x0, sp, x0 // delta with top of overflow stack + tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? + b.ne __bad_stack // no? -> bad stack pointer + + /* We were already on the overflow stack. Restore sp/x0 and carry on. */ + sub sp, sp, x0 + mrs x0, tpidrro_el0 +#endif + b el\el\ht\()_\regsize\()_\label +.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? + .endm + + .macro tramp_alias, dst, sym + .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text + movz \dst, :abs_g2_s:.Lalias\@ + movk \dst, :abs_g1_nc:.Lalias\@ + movk \dst, :abs_g0_nc:.Lalias\@ + .endm + + /* + * This macro corrupts x0-x3. It is the caller's duty to save/restore + * them if required. + */ + .macro apply_ssbd, state, tmp1, tmp2 +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable + b .L__asm_ssbd_skip\@ // Patched to NOP +alternative_cb_end + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 + cbz \tmp2, .L__asm_ssbd_skip\@ + ldr \tmp2, [tsk, #TSK_TI_FLAGS] + tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + mov w1, #\state +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit + nop // Patched to SMC/HVC #0 +alternative_cb_end +.L__asm_ssbd_skip\@: + .endm + + /* Check for MTE asynchronous tag check faults */ + .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr +#ifdef CONFIG_ARM64_MTE + .arch_extension lse +alternative_if_not ARM64_MTE + b 1f +alternative_else_nop_endif + /* + * Asynchronous tag check faults are only possible in ASYNC (2) or + * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is + * set, so skip the check if it is unset. + */ + tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f + mrs_s \tmp, SYS_TFSRE0_EL1 + tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f + /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ + mov \tmp, #_TIF_MTE_ASYNC_FAULT + add \ti_flags, tsk, #TSK_TI_FLAGS + stset \tmp, [\ti_flags] +1: +#endif + .endm + + /* Clear the MTE asynchronous tag check faults */ + .macro clear_mte_async_tcf thread_sctlr +#ifdef CONFIG_ARM64_MTE +alternative_if ARM64_MTE + /* See comment in check_mte_async_tcf above. */ + tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f + dsb ish + msr_s SYS_TFSRE0_EL1, xzr +1: +alternative_else_nop_endif +#endif + .endm + + .macro mte_set_gcr, mte_ctrl, tmp +#ifdef CONFIG_ARM64_MTE + ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 + orr \tmp, \tmp, #SYS_GCR_EL1_RRND + msr_s SYS_GCR_EL1, \tmp +#endif + .endm + + .macro mte_set_kernel_gcr, tmp, tmp2 +#ifdef CONFIG_KASAN_HW_TAGS +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable + b 1f +alternative_cb_end + mov \tmp, KERNEL_GCR_EL1 + msr_s SYS_GCR_EL1, \tmp +1: +#endif + .endm + + .macro mte_set_user_gcr, tsk, tmp, tmp2 +#ifdef CONFIG_KASAN_HW_TAGS +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable + b 1f +alternative_cb_end + ldr \tmp, [\tsk, #THREAD_MTE_CTRL] + + mte_set_gcr \tmp, \tmp2 +1: +#endif + .endm + + .macro kernel_entry, el, regsize = 64 + .if \el == 0 + alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT + .endif + .if \regsize == 32 + mov w0, w0 // zero upper 32 bits of x0 + .endif + stp x0, x1, [sp, #16 * 0] + stp x2, x3, [sp, #16 * 1] + stp x4, x5, [sp, #16 * 2] + stp x6, x7, [sp, #16 * 3] + stp x8, x9, [sp, #16 * 4] + stp x10, x11, [sp, #16 * 5] + stp x12, x13, [sp, #16 * 6] + stp x14, x15, [sp, #16 * 7] + stp x16, x17, [sp, #16 * 8] + stp x18, x19, [sp, #16 * 9] + stp x20, x21, [sp, #16 * 10] + stp x22, x23, [sp, #16 * 11] + stp x24, x25, [sp, #16 * 12] + stp x26, x27, [sp, #16 * 13] + stp x28, x29, [sp, #16 * 14] + + .if \el == 0 + clear_gp_regs + mrs x21, sp_el0 + ldr_this_cpu tsk, __entry_task, x20 + msr sp_el0, tsk + + /* + * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions + * when scheduling. + */ + ldr x19, [tsk, #TSK_TI_FLAGS] + disable_step_tsk x19, x20 + + /* Check for asynchronous tag check faults in user space */ + ldr x0, [tsk, THREAD_SCTLR_USER] + check_mte_async_tcf x22, x23, x0 + +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * Enable IA for in-kernel PAC if the task had it disabled. Although + * this could be implemented with an unconditional MRS which would avoid + * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. + * + * Install the kernel IA key only if IA was enabled in the task. If IA + * was disabled on kernel exit then we would have left the kernel IA + * installed so there is no need to install it again. + */ + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 + b 2f +1: + mrs x0, sctlr_el1 + orr x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif + + apply_ssbd 1, x22, x23 + + mte_set_kernel_gcr x22, x23 + + /* + * Any non-self-synchronizing system register updates required for + * kernel entry should be placed before this point. + */ +alternative_if ARM64_MTE + isb + b 1f +alternative_else_nop_endif +alternative_if ARM64_HAS_ADDRESS_AUTH + isb +alternative_else_nop_endif +1: + + scs_load_current + .else + add x21, sp, #PT_REGS_SIZE + get_current_task tsk + .endif /* \el == 0 */ + mrs x22, elr_el1 + mrs x23, spsr_el1 + stp lr, x21, [sp, #S_LR] + + /* + * For exceptions from EL0, create a final frame record. + * For exceptions from EL1, create a synthetic frame record so the + * interrupted code shows up in the backtrace. + */ + .if \el == 0 + stp xzr, xzr, [sp, #S_STACKFRAME] + .else + stp x29, x22, [sp, #S_STACKFRAME] + .endif + add x29, sp, #S_STACKFRAME + +#ifdef CONFIG_ARM64_SW_TTBR0_PAN +alternative_if_not ARM64_HAS_PAN + bl __swpan_entry_el\el +alternative_else_nop_endif +#endif + + stp x22, x23, [sp, #S_PC] + + /* Not in a syscall by default (el0_svc overwrites for real syscall) */ + .if \el == 0 + mov w21, #NO_SYSCALL + str w21, [sp, #S_SYSCALLNO] + .endif + +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_save\@ +alternative_else_nop_endif + + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] + mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET + msr_s SYS_ICC_PMR_EL1, x20 + +.Lskip_pmr_save\@: +#endif + + /* + * Registers that may be useful after this macro is invoked: + * + * x20 - ICC_PMR_EL1 + * x21 - aborted SP + * x22 - aborted PC + * x23 - aborted PSTATE + */ + .endm + + .macro kernel_exit, el + .if \el != 0 + disable_daif + .endif + +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_restore\@ +alternative_else_nop_endif + + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + + /* Ensure priority change is seen by redistributor */ +alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC + dsb sy +alternative_else_nop_endif + +.Lskip_pmr_restore\@: +#endif + + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR + +#ifdef CONFIG_ARM64_SW_TTBR0_PAN +alternative_if_not ARM64_HAS_PAN + bl __swpan_exit_el\el +alternative_else_nop_endif +#endif + + .if \el == 0 + ldr x23, [sp, #S_SP] // load return stack pointer + msr sp_el0, x23 + tst x22, #PSR_MODE32_BIT // native task? + b.eq 3f + +#ifdef CONFIG_ARM64_ERRATUM_845719 +alternative_if ARM64_WORKAROUND_845719 +#ifdef CONFIG_PID_IN_CONTEXTIDR + mrs x29, contextidr_el1 + msr contextidr_el1, x29 +#else + msr contextidr_el1, xzr +#endif +alternative_else_nop_endif +#endif +3: + scs_save tsk + + /* Ignore asynchronous tag check faults in the uaccess routines */ + ldr x0, [tsk, THREAD_SCTLR_USER] + clear_mte_async_tcf x0 + +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * IA was enabled for in-kernel PAC. Disable it now if needed, or + * alternatively install the user's IA. All other per-task keys and + * SCTLR bits were updated on task switch. + * + * No kernel C function calls after this. + */ + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_user tsk, x0, x1, x2 + b 2f +1: + mrs x0, sctlr_el1 + bic x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif + + mte_set_user_gcr tsk, x0, x1 + + apply_ssbd 0, x0, x1 + .endif + + msr elr_el1, x21 // set up the return data + msr spsr_el1, x22 + ldp x0, x1, [sp, #16 * 0] + ldp x2, x3, [sp, #16 * 1] + ldp x4, x5, [sp, #16 * 2] + ldp x6, x7, [sp, #16 * 3] + ldp x8, x9, [sp, #16 * 4] + ldp x10, x11, [sp, #16 * 5] + ldp x12, x13, [sp, #16 * 6] + ldp x14, x15, [sp, #16 * 7] + ldp x16, x17, [sp, #16 * 8] + ldp x18, x19, [sp, #16 * 9] + ldp x20, x21, [sp, #16 * 10] + ldp x22, x23, [sp, #16 * 11] + ldp x24, x25, [sp, #16 * 12] + ldp x26, x27, [sp, #16 * 13] + ldp x28, x29, [sp, #16 * 14] + + .if \el == 0 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0 + + msr far_el1, x29 + + ldr_this_cpu x30, this_cpu_vector, x29 + tramp_alias x29, tramp_exit + msr vbar_el1, x30 // install vector table + ldr lr, [sp, #S_LR] // restore x30 + add sp, sp, #PT_REGS_SIZE // restore sp + br x29 + +.L_skip_tramp_exit_\@: +#endif + ldr lr, [sp, #S_LR] + add sp, sp, #PT_REGS_SIZE // restore sp + + /* This must be after the last explicit memory access */ +alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD + tlbi vale1, xzr + dsb nsh +alternative_else_nop_endif + eret + .else + ldr lr, [sp, #S_LR] + add sp, sp, #PT_REGS_SIZE // restore sp + + /* Ensure any device/NC reads complete */ + alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 + + eret + .endif + sb + .endm + +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Set the TTBR0 PAN bit in SPSR. When the exception is taken from + * EL0, there is no need to check the state of TTBR0_EL1 since + * accesses are always enabled. + * Note that the meaning of this bit differs from the ARMv8.1 PAN + * feature as all TTBR0_EL1 accesses are disabled, not just those to + * user mappings. + */ +SYM_CODE_START_LOCAL(__swpan_entry_el1) + mrs x21, ttbr0_el1 + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID + orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR + b.eq 1f // TTBR0 access already disabled + and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR +SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL) + __uaccess_ttbr0_disable x21 +1: ret +SYM_CODE_END(__swpan_entry_el1) + + /* + * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR + * PAN bit checking. + */ +SYM_CODE_START_LOCAL(__swpan_exit_el1) + tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set + __uaccess_ttbr0_enable x0, x1 +1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit + ret +SYM_CODE_END(__swpan_exit_el1) + +SYM_CODE_START_LOCAL(__swpan_exit_el0) + __uaccess_ttbr0_enable x0, x1 + /* + * Enable errata workarounds only if returning to user. The only + * workaround currently required for TTBR0_EL1 changes are for the + * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache + * corruption). + */ + b post_ttbr_update_workaround +SYM_CODE_END(__swpan_exit_el0) +#endif + +/* GPRs used by entry code */ +tsk .req x28 // current thread_info + + .text + +/* + * Exception vectors. + */ + .pushsection ".entry.text", "ax" + + .align 11 +SYM_CODE_START(vectors) + kernel_ventry 1, t, 64, sync // Synchronous EL1t + kernel_ventry 1, t, 64, irq // IRQ EL1t + kernel_ventry 1, t, 64, fiq // FIQ EL1t + kernel_ventry 1, t, 64, error // Error EL1t + + kernel_ventry 1, h, 64, sync // Synchronous EL1h + kernel_ventry 1, h, 64, irq // IRQ EL1h + kernel_ventry 1, h, 64, fiq // FIQ EL1h + kernel_ventry 1, h, 64, error // Error EL1h + + kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 + kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 + kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 + kernel_ventry 0, t, 64, error // Error 64-bit EL0 + + kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 + kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 + kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 + kernel_ventry 0, t, 32, error // Error 32-bit EL0 +SYM_CODE_END(vectors) + +#ifdef CONFIG_VMAP_STACK +SYM_CODE_START_LOCAL(__bad_stack) + /* + * We detected an overflow in kernel_ventry, which switched to the + * overflow stack. Stash the exception regs, and head to our overflow + * handler. + */ + + /* Restore the original x0 value */ + mrs x0, tpidrro_el0 + + /* + * Store the original GPRs to the new stack. The orginal SP (minus + * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry. + */ + sub sp, sp, #PT_REGS_SIZE + kernel_entry 1 + mrs x0, tpidr_el0 + add x0, x0, #PT_REGS_SIZE + str x0, [sp, #S_SP] + + /* Stash the regs for handle_bad_stack */ + mov x0, sp + + /* Time to die */ + bl handle_bad_stack + ASM_BUG() +SYM_CODE_END(__bad_stack) +#endif /* CONFIG_VMAP_STACK */ + + + .macro entry_handler el:req, ht:req, regsize:req, label:req +SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) + kernel_entry \el, \regsize + mov x0, sp + bl el\el\ht\()_\regsize\()_\label\()_handler + .if \el == 0 + b ret_to_user + .else + b ret_to_kernel + .endif +SYM_CODE_END(el\el\ht\()_\regsize\()_\label) + .endm + +/* + * Early exception handlers + */ + entry_handler 1, t, 64, sync + entry_handler 1, t, 64, irq + entry_handler 1, t, 64, fiq + entry_handler 1, t, 64, error + + entry_handler 1, h, 64, sync + entry_handler 1, h, 64, irq + entry_handler 1, h, 64, fiq + entry_handler 1, h, 64, error + + entry_handler 0, t, 64, sync + entry_handler 0, t, 64, irq + entry_handler 0, t, 64, fiq + entry_handler 0, t, 64, error + + entry_handler 0, t, 32, sync + entry_handler 0, t, 32, irq + entry_handler 0, t, 32, fiq + entry_handler 0, t, 32, error + +SYM_CODE_START_LOCAL(ret_to_kernel) + kernel_exit 1 +SYM_CODE_END(ret_to_kernel) + +SYM_CODE_START_LOCAL(ret_to_user) + ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step + enable_step_tsk x19, x2 +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + bl stackleak_erase_on_task_stack +#endif + kernel_exit 0 +SYM_CODE_END(ret_to_user) + + .popsection // .entry.text + + // Move from tramp_pg_dir to swapper_pg_dir + .macro tramp_map_kernel, tmp + mrs \tmp, ttbr1_el1 + add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET + bic \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 +alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 + /* ASID already in \tmp[63:48] */ + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) + /* 2MB boundary containing the vectors, so we nobble the walk cache */ + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) + isb + tlbi vae1, \tmp + dsb nsh +alternative_else_nop_endif +#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ + .endm + + // Move from swapper_pg_dir to tramp_pg_dir + .macro tramp_unmap_kernel, tmp + mrs \tmp, ttbr1_el1 + sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET + orr \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp + /* + * We avoid running the post_ttbr_update_workaround here because + * it's only needed by Cavium ThunderX, which requires KPTI to be + * disabled. + */ + .endm + + .macro tramp_data_read_var dst, var +#ifdef CONFIG_RELOCATABLE + ldr \dst, .L__tramp_data_\var + .ifndef .L__tramp_data_\var + .pushsection ".entry.tramp.rodata", "a", %progbits + .align 3 +.L__tramp_data_\var: + .quad \var + .popsection + .endif +#else + /* + * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a + * compile time constant (and hence not secret and not worth hiding). + * + * As statically allocated kernel code and data always live in the top + * 47 bits of the address space we can sign-extend bit 47 and avoid an + * instruction to load the upper 16 bits (which must be 0xFFFF). + */ + movz \dst, :abs_g2_s:\var + movk \dst, :abs_g1_nc:\var + movk \dst, :abs_g0_nc:\var +#endif + .endm + +#define BHB_MITIGATION_NONE 0 +#define BHB_MITIGATION_LOOP 1 +#define BHB_MITIGATION_FW 2 +#define BHB_MITIGATION_INSN 3 + + .macro tramp_ventry, vector_start, regsize, kpti, bhb + .align 7 +1: + .if \regsize == 64 + msr tpidrro_el0, x30 // Restored in kernel_ventry + .endif + + .if \bhb == BHB_MITIGATION_LOOP + /* + * This sequence must appear before the first indirect branch. i.e. the + * ret out of tramp_ventry. It appears here because x30 is free. + */ + __mitigate_spectre_bhb_loop x30 + .endif // \bhb == BHB_MITIGATION_LOOP + + .if \bhb == BHB_MITIGATION_INSN + clearbhb + isb + .endif // \bhb == BHB_MITIGATION_INSN + + .if \kpti == 1 + /* + * Defend against branch aliasing attacks by pushing a dummy + * entry onto the return stack and using a RET instruction to + * enter the full-fat kernel vectors. + */ + bl 2f + b . +2: + tramp_map_kernel x30 +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 + tramp_data_read_var x30, vectors +alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM + prfm plil1strm, [x30, #(1b - \vector_start)] +alternative_else_nop_endif + + msr vbar_el1, x30 + isb + .else + adr_l x30, vectors + .endif // \kpti == 1 + + .if \bhb == BHB_MITIGATION_FW + /* + * The firmware sequence must appear before the first indirect branch. + * i.e. the ret out of tramp_ventry. But it also needs the stack to be + * mapped to save/restore the registers the SMC clobbers. + */ + __mitigate_spectre_bhb_fw + .endif // \bhb == BHB_MITIGATION_FW + + add x30, x30, #(1b - \vector_start + 4) + ret +.org 1b + 128 // Did we overflow the ventry slot? + .endm + + .macro generate_tramp_vector, kpti, bhb +.Lvector_start\@: + .space 0x400 + + .rept 4 + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb + .endr + .endm + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + * The order must match __bp_harden_el1_vectors and the + * arm64_bp_harden_el1_vectors enum. + */ + .pushsection ".entry.tramp.text", "ax" + .align 11 +SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE +SYM_CODE_END(tramp_vectors) + +SYM_CODE_START_LOCAL(tramp_exit) + tramp_unmap_kernel x29 + mrs x29, far_el1 // restore x29 + eret + sb +SYM_CODE_END(tramp_exit) + .popsection // .entry.tramp.text +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + +/* + * Exception vectors for spectre mitigations on entry from EL1 when + * kpti is not in use. + */ + .macro generate_el1_vector, bhb +.Lvector_start\@: + kernel_ventry 1, t, 64, sync // Synchronous EL1t + kernel_ventry 1, t, 64, irq // IRQ EL1t + kernel_ventry 1, t, 64, fiq // FIQ EL1h + kernel_ventry 1, t, 64, error // Error EL1t + + kernel_ventry 1, h, 64, sync // Synchronous EL1h + kernel_ventry 1, h, 64, irq // IRQ EL1h + kernel_ventry 1, h, 64, fiq // FIQ EL1h + kernel_ventry 1, h, 64, error // Error EL1h + + .rept 4 + tramp_ventry .Lvector_start\@, 64, 0, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, 0, \bhb + .endr + .endm + +/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ + .pushsection ".entry.text", "ax" + .align 11 +SYM_CODE_START(__bp_harden_el1_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_el1_vector bhb=BHB_MITIGATION_LOOP + generate_el1_vector bhb=BHB_MITIGATION_FW + generate_el1_vector bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ +SYM_CODE_END(__bp_harden_el1_vectors) + .popsection + + +/* + * Register switch for AArch64. The callee-saved registers need to be saved + * and restored. On entry: + * x0 = previous task_struct (must be preserved across the switch) + * x1 = next task_struct + * Previous and next are guaranteed not to be the same. + * + */ +SYM_FUNC_START(cpu_switch_to) + mov x10, #THREAD_CPU_CONTEXT + add x8, x0, x10 + mov x9, sp + stp x19, x20, [x8], #16 // store callee-saved registers + stp x21, x22, [x8], #16 + stp x23, x24, [x8], #16 + stp x25, x26, [x8], #16 + stp x27, x28, [x8], #16 + stp x29, x9, [x8], #16 + str lr, [x8] + add x8, x1, x10 + ldp x19, x20, [x8], #16 // restore callee-saved registers + ldp x21, x22, [x8], #16 + ldp x23, x24, [x8], #16 + ldp x25, x26, [x8], #16 + ldp x27, x28, [x8], #16 + ldp x29, x9, [x8], #16 + ldr lr, [x8] + mov sp, x9 + msr sp_el0, x1 + ptrauth_keys_install_kernel x1, x8, x9, x10 + scs_save x0 + scs_load_current + ret +SYM_FUNC_END(cpu_switch_to) +NOKPROBE(cpu_switch_to) + +/* + * This is how we return from a fork. + */ +SYM_CODE_START(ret_from_fork) + bl schedule_tail + cbz x19, 1f // not a kernel thread + mov x0, x20 + blr x19 +1: get_current_task tsk + mov x0, sp + bl asm_exit_to_user_mode + b ret_to_user +SYM_CODE_END(ret_from_fork) +NOKPROBE(ret_from_fork) + +/* + * void call_on_irq_stack(struct pt_regs *regs, + * void (*func)(struct pt_regs *)); + * + * Calls func(regs) using this CPU's irq stack and shadow irq stack. + */ +SYM_FUNC_START(call_on_irq_stack) +#ifdef CONFIG_SHADOW_CALL_STACK + get_current_task x16 + scs_save x16 + ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 +#endif + + /* Create a frame record to save our LR and SP (implicit in FP) */ + stp x29, x30, [sp, #-16]! + mov x29, sp + + ldr_this_cpu x16, irq_stack_ptr, x17 + + /* Move to the new stack and call the function there */ + add sp, x16, #IRQ_STACK_SIZE + blr x1 + + /* + * Restore the SP from the FP, and restore the FP and LR from the frame + * record. + */ + mov sp, x29 + ldp x29, x30, [sp], #16 + scs_load_current + ret +SYM_FUNC_END(call_on_irq_stack) +NOKPROBE(call_on_irq_stack) + +#ifdef CONFIG_ARM_SDE_INTERFACE + +#include +#include + +.macro sdei_handler_exit exit_mode + /* On success, this call never returns... */ + cmp \exit_mode, #SDEI_EXIT_SMC + b.ne 99f + smc #0 + b . +99: hvc #0 + b . +.endm + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * The regular SDEI entry point may have been unmapped along with the rest of + * the kernel. This trampoline restores the kernel mapping to make the x1 memory + * argument accessible. + * + * This clobbers x4, __sdei_handler() will restore this from firmware's + * copy. + */ +.pushsection ".entry.tramp.text", "ax" +SYM_CODE_START(__sdei_asm_entry_trampoline) + mrs x4, ttbr1_el1 + tbz x4, #USER_ASID_BIT, 1f + + tramp_map_kernel tmp=x4 + isb + mov x4, xzr + + /* + * Remember whether to unmap the kernel on exit. + */ +1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] + tramp_data_read_var x4, __sdei_asm_handler + br x4 +SYM_CODE_END(__sdei_asm_entry_trampoline) +NOKPROBE(__sdei_asm_entry_trampoline) + +/* + * Make the exit call and restore the original ttbr1_el1 + * + * x0 & x1: setup for the exit API call + * x2: exit_mode + * x4: struct sdei_registered_event argument from registration time. + */ +SYM_CODE_START(__sdei_asm_exit_trampoline) + ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] + cbnz x4, 1f + + tramp_unmap_kernel tmp=x4 + +1: sdei_handler_exit exit_mode=x2 +SYM_CODE_END(__sdei_asm_exit_trampoline) +NOKPROBE(__sdei_asm_exit_trampoline) +.popsection // .entry.tramp.text +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + +/* + * Software Delegated Exception entry point. + * + * x0: Event number + * x1: struct sdei_registered_event argument from registration time. + * x2: interrupted PC + * x3: interrupted PSTATE + * x4: maybe clobbered by the trampoline + * + * Firmware has preserved x0->x17 for us, we must save/restore the rest to + * follow SMC-CC. We save (or retrieve) all the registers as the handler may + * want them. + */ +SYM_CODE_START(__sdei_asm_handler) + stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] + stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] + stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] + stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] + stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] + stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] + stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] + stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] + stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] + stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] + stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] + stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] + stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] + stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] + mov x4, sp + stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] + + mov x19, x1 + + /* Store the registered-event for crash_smp_send_stop() */ + ldrb w4, [x19, #SDEI_EVENT_PRIORITY] + cbnz w4, 1f + adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 + b 2f +1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 +2: str x19, [x5] + +#ifdef CONFIG_VMAP_STACK + /* + * entry.S may have been using sp as a scratch register, find whether + * this is a normal or critical event and switch to the appropriate + * stack for this CPU. + */ + cbnz w4, 1f + ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 + b 2f +1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 +2: mov x6, #SDEI_STACK_SIZE + add x5, x5, x6 + mov sp, x5 +#endif + +#ifdef CONFIG_SHADOW_CALL_STACK + /* Use a separate shadow call stack for normal and critical events */ + cbnz w4, 3f + ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 + b 4f +3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 +4: +#endif + + /* + * We may have interrupted userspace, or a guest, or exit-from or + * return-to either of these. We can't trust sp_el0, restore it. + */ + mrs x28, sp_el0 + ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 + msr sp_el0, x0 + + /* If we interrupted the kernel point to the previous stack/frame. */ + and x0, x3, #0xc + mrs x1, CurrentEL + cmp x0, x1 + csel x29, x29, xzr, eq // fp, or zero + csel x4, x2, xzr, eq // elr, or zero + + stp x29, x4, [sp, #-16]! + mov x29, sp + + add x0, x19, #SDEI_EVENT_INTREGS + mov x1, x19 + bl __sdei_handler + + msr sp_el0, x28 + /* restore regs >x17 that we clobbered */ + mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline + ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] + ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] + ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] + mov sp, x1 + + mov x1, x0 // address to complete_and_resume + /* x0 = (x0 <= SDEI_EV_FAILED) ? + * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME + */ + cmp x0, #SDEI_EV_FAILED + mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE + mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME + csel x0, x2, x3, ls + + ldr_l x2, sdei_exit_mode + + /* Clear the registered-event seen by crash_smp_send_stop() */ + ldrb w3, [x4, #SDEI_EVENT_PRIORITY] + cbnz w3, 1f + adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 + b 2f +1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 +2: str xzr, [x5] + +alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 + sdei_handler_exit exit_mode=x2 +alternative_else_nop_endif + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline + br x5 +#endif +SYM_CODE_END(__sdei_asm_handler) +NOKPROBE(__sdei_asm_handler) + +SYM_CODE_START(__sdei_handler_abort) + mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME + adr x1, 1f + ldr_l x2, sdei_exit_mode + sdei_handler_exit exit_mode=x2 + // exit the handler and jump to the next instruction. + // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx. +1: ret +SYM_CODE_END(__sdei_handler_abort) +NOKPROBE(__sdei_handler_abort) +#endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c new file mode 100644 index 0000000000..f9b3adebcb --- /dev/null +++ b/arch/arm64/kernel/fpsimd.c @@ -0,0 +1,2136 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * FP/SIMD context switching and fault handling + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FPEXC_IOF (1 << 0) +#define FPEXC_DZF (1 << 1) +#define FPEXC_OFF (1 << 2) +#define FPEXC_UFF (1 << 3) +#define FPEXC_IXF (1 << 4) +#define FPEXC_IDF (1 << 7) + +/* + * (Note: in this discussion, statements about FPSIMD apply equally to SVE.) + * + * In order to reduce the number of times the FPSIMD state is needlessly saved + * and restored, we need to keep track of two things: + * (a) for each task, we need to remember which CPU was the last one to have + * the task's FPSIMD state loaded into its FPSIMD registers; + * (b) for each CPU, we need to remember which task's userland FPSIMD state has + * been loaded into its FPSIMD registers most recently, or whether it has + * been used to perform kernel mode NEON in the meantime. + * + * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to + * the id of the current CPU every time the state is loaded onto a CPU. For (b), + * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the + * address of the userland FPSIMD state of the task that was loaded onto the CPU + * the most recently, or NULL if kernel mode NEON has been performed after that. + * + * With this in place, we no longer have to restore the next FPSIMD state right + * when switching between tasks. Instead, we can defer this check to userland + * resume, at which time we verify whether the CPU's fpsimd_last_state and the + * task's fpsimd_cpu are still mutually in sync. If this is the case, we + * can omit the FPSIMD restore. + * + * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to + * indicate whether or not the userland FPSIMD state of the current task is + * present in the registers. The flag is set unless the FPSIMD registers of this + * CPU currently contain the most recent userland FPSIMD state of the current + * task. If the task is behaving as a VMM, then this is will be managed by + * KVM which will clear it to indicate that the vcpu FPSIMD state is currently + * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware + * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and + * flag the register state as invalid. + * + * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may + * save the task's FPSIMD context back to task_struct from softirq context. + * To prevent this from racing with the manipulation of the task's FPSIMD state + * from task context and thereby corrupting the state, it is necessary to + * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE + * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to + * run but prevent them to use FPSIMD. + * + * For a certain task, the sequence may look something like this: + * - the task gets scheduled in; if both the task's fpsimd_cpu field + * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu + * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is + * cleared, otherwise it is set; + * + * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's + * userland FPSIMD state is copied from memory to the registers, the task's + * fpsimd_cpu field is set to the id of the current CPU, the current + * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the + * TIF_FOREIGN_FPSTATE flag is cleared; + * + * - the task executes an ordinary syscall; upon return to userland, the + * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is + * restored; + * + * - the task executes a syscall which executes some NEON instructions; this is + * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD + * register contents to memory, clears the fpsimd_last_state per-cpu variable + * and sets the TIF_FOREIGN_FPSTATE flag; + * + * - the task gets preempted after kernel_neon_end() is called; as we have not + * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so + * whatever is in the FPSIMD registers is not saved to memory, but discarded. + */ + +static DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state); + +__ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = { +#ifdef CONFIG_ARM64_SVE + [ARM64_VEC_SVE] = { + .type = ARM64_VEC_SVE, + .name = "SVE", + .min_vl = SVE_VL_MIN, + .max_vl = SVE_VL_MIN, + .max_virtualisable_vl = SVE_VL_MIN, + }, +#endif +#ifdef CONFIG_ARM64_SME + [ARM64_VEC_SME] = { + .type = ARM64_VEC_SME, + .name = "SME", + }, +#endif +}; + +static unsigned int vec_vl_inherit_flag(enum vec_type type) +{ + switch (type) { + case ARM64_VEC_SVE: + return TIF_SVE_VL_INHERIT; + case ARM64_VEC_SME: + return TIF_SME_VL_INHERIT; + default: + WARN_ON_ONCE(1); + return 0; + } +} + +struct vl_config { + int __default_vl; /* Default VL for tasks */ +}; + +static struct vl_config vl_config[ARM64_VEC_MAX]; + +static inline int get_default_vl(enum vec_type type) +{ + return READ_ONCE(vl_config[type].__default_vl); +} + +#ifdef CONFIG_ARM64_SVE + +static inline int get_sve_default_vl(void) +{ + return get_default_vl(ARM64_VEC_SVE); +} + +static inline void set_default_vl(enum vec_type type, int val) +{ + WRITE_ONCE(vl_config[type].__default_vl, val); +} + +static inline void set_sve_default_vl(int val) +{ + set_default_vl(ARM64_VEC_SVE, val); +} + +static void __percpu *efi_sve_state; + +#else /* ! CONFIG_ARM64_SVE */ + +/* Dummy declaration for code that will be optimised out: */ +extern void __percpu *efi_sve_state; + +#endif /* ! CONFIG_ARM64_SVE */ + +#ifdef CONFIG_ARM64_SME + +static int get_sme_default_vl(void) +{ + return get_default_vl(ARM64_VEC_SME); +} + +static void set_sme_default_vl(int val) +{ + set_default_vl(ARM64_VEC_SME, val); +} + +static void sme_free(struct task_struct *); + +#else + +static inline void sme_free(struct task_struct *t) { } + +#endif + +DEFINE_PER_CPU(bool, fpsimd_context_busy); +EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy); + +static void fpsimd_bind_task_to_cpu(void); + +static void __get_cpu_fpsimd_context(void) +{ + bool busy = __this_cpu_xchg(fpsimd_context_busy, true); + + WARN_ON(busy); +} + +/* + * Claim ownership of the CPU FPSIMD context for use by the calling context. + * + * The caller may freely manipulate the FPSIMD context metadata until + * put_cpu_fpsimd_context() is called. + * + * The double-underscore version must only be called if you know the task + * can't be preempted. + * + * On RT kernels local_bh_disable() is not sufficient because it only + * serializes soft interrupt related sections via a local lock, but stays + * preemptible. Disabling preemption is the right choice here as bottom + * half processing is always in thread context on RT kernels so it + * implicitly prevents bottom half processing as well. + */ +static void get_cpu_fpsimd_context(void) +{ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_disable(); + else + preempt_disable(); + __get_cpu_fpsimd_context(); +} + +static void __put_cpu_fpsimd_context(void) +{ + bool busy = __this_cpu_xchg(fpsimd_context_busy, false); + + WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ +} + +/* + * Release the CPU FPSIMD context. + * + * Must be called from a context in which get_cpu_fpsimd_context() was + * previously called, with no call to put_cpu_fpsimd_context() in the + * meantime. + */ +static void put_cpu_fpsimd_context(void) +{ + __put_cpu_fpsimd_context(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_enable(); + else + preempt_enable(); +} + +static bool have_cpu_fpsimd_context(void) +{ + return !preemptible() && __this_cpu_read(fpsimd_context_busy); +} + +unsigned int task_get_vl(const struct task_struct *task, enum vec_type type) +{ + return task->thread.vl[type]; +} + +void task_set_vl(struct task_struct *task, enum vec_type type, + unsigned long vl) +{ + task->thread.vl[type] = vl; +} + +unsigned int task_get_vl_onexec(const struct task_struct *task, + enum vec_type type) +{ + return task->thread.vl_onexec[type]; +} + +void task_set_vl_onexec(struct task_struct *task, enum vec_type type, + unsigned long vl) +{ + task->thread.vl_onexec[type] = vl; +} + +/* + * TIF_SME controls whether a task can use SME without trapping while + * in userspace, when TIF_SME is set then we must have storage + * allocated in sve_state and sme_state to store the contents of both ZA + * and the SVE registers for both streaming and non-streaming modes. + * + * If both SVCR.ZA and SVCR.SM are disabled then at any point we + * may disable TIF_SME and reenable traps. + */ + + +/* + * TIF_SVE controls whether a task can use SVE without trapping while + * in userspace, and also (together with TIF_SME) the way a task's + * FPSIMD/SVE state is stored in thread_struct. + * + * The kernel uses this flag to track whether a user task is actively + * using SVE, and therefore whether full SVE register state needs to + * be tracked. If not, the cheaper FPSIMD context handling code can + * be used instead of the more costly SVE equivalents. + * + * * TIF_SVE or SVCR.SM set: + * + * The task can execute SVE instructions while in userspace without + * trapping to the kernel. + * + * During any syscall, the kernel may optionally clear TIF_SVE and + * discard the vector state except for the FPSIMD subset. + * + * * TIF_SVE clear: + * + * An attempt by the user task to execute an SVE instruction causes + * do_sve_acc() to be called, which does some preparation and then + * sets TIF_SVE. + * + * During any syscall, the kernel may optionally clear TIF_SVE and + * discard the vector state except for the FPSIMD subset. + * + * The data will be stored in one of two formats: + * + * * FPSIMD only - FP_STATE_FPSIMD: + * + * When the FPSIMD only state stored task->thread.fp_type is set to + * FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in + * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are + * logically zero but not stored anywhere; P0-P15 and FFR are not + * stored and have unspecified values from userspace's point of + * view. For hygiene purposes, the kernel zeroes them on next use, + * but userspace is discouraged from relying on this. + * + * task->thread.sve_state does not need to be non-NULL, valid or any + * particular size: it must not be dereferenced and any data stored + * there should be considered stale and not referenced. + * + * * SVE state - FP_STATE_SVE: + * + * When the full SVE state is stored task->thread.fp_type is set to + * FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the + * corresponding Zn), P0-P15 and FFR are encoded in in + * task->thread.sve_state, formatted appropriately for vector + * length task->thread.sve_vl or, if SVCR.SM is set, + * task->thread.sme_vl. The storage for the vector registers in + * task->thread.uw.fpsimd_state should be ignored. + * + * task->thread.sve_state must point to a valid buffer at least + * sve_state_size(task) bytes in size. The data stored in + * task->thread.uw.fpsimd_state.vregs should be considered stale + * and not referenced. + * + * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state + * irrespective of whether TIF_SVE is clear or set, since these are + * not vector length dependent. + */ + +/* + * Update current's FPSIMD/SVE registers from thread_struct. + * + * This function should be called only when the FPSIMD/SVE state in + * thread_struct is known to be up to date, when preparing to enter + * userspace. + */ +static void task_fpsimd_load(void) +{ + bool restore_sve_regs = false; + bool restore_ffr; + + WARN_ON(!system_supports_fpsimd()); + WARN_ON(!have_cpu_fpsimd_context()); + + if (system_supports_sve() || system_supports_sme()) { + switch (current->thread.fp_type) { + case FP_STATE_FPSIMD: + /* Stop tracking SVE for this task until next use. */ + if (test_and_clear_thread_flag(TIF_SVE)) + sve_user_disable(); + break; + case FP_STATE_SVE: + if (!thread_sm_enabled(¤t->thread) && + !WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE))) + sve_user_enable(); + + if (test_thread_flag(TIF_SVE)) + sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1); + + restore_sve_regs = true; + restore_ffr = true; + break; + default: + /* + * This indicates either a bug in + * fpsimd_save() or memory corruption, we + * should always record an explicit format + * when we save. We always at least have the + * memory allocated for FPSMID registers so + * try that and hope for the best. + */ + WARN_ON_ONCE(1); + clear_thread_flag(TIF_SVE); + break; + } + } + + /* Restore SME, override SVE register configuration if needed */ + if (system_supports_sme()) { + unsigned long sme_vl = task_get_sme_vl(current); + + /* Ensure VL is set up for restoring data */ + if (test_thread_flag(TIF_SME)) + sme_set_vq(sve_vq_from_vl(sme_vl) - 1); + + write_sysreg_s(current->thread.svcr, SYS_SVCR); + + if (thread_za_enabled(¤t->thread)) + sme_load_state(current->thread.sme_state, + system_supports_sme2()); + + if (thread_sm_enabled(¤t->thread)) + restore_ffr = system_supports_fa64(); + } + + if (restore_sve_regs) { + WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE); + sve_load_state(sve_pffr(¤t->thread), + ¤t->thread.uw.fpsimd_state.fpsr, + restore_ffr); + } else { + WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD); + fpsimd_load_state(¤t->thread.uw.fpsimd_state); + } +} + +/* + * Ensure FPSIMD/SVE storage in memory for the loaded context is up to + * date with respect to the CPU registers. Note carefully that the + * current context is the context last bound to the CPU stored in + * last, if KVM is involved this may be the guest VM context rather + * than the host thread for the VM pointed to by current. This means + * that we must always reference the state storage via last rather + * than via current, if we are saving KVM state then it will have + * ensured that the type of registers to save is set in last->to_save. + */ +static void fpsimd_save(void) +{ + struct cpu_fp_state const *last = + this_cpu_ptr(&fpsimd_last_state); + /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ + bool save_sve_regs = false; + bool save_ffr; + unsigned int vl; + + WARN_ON(!system_supports_fpsimd()); + WARN_ON(!have_cpu_fpsimd_context()); + + if (test_thread_flag(TIF_FOREIGN_FPSTATE)) + return; + + /* + * If a task is in a syscall the ABI allows us to only + * preserve the state shared with FPSIMD so don't bother + * saving the full SVE state in that case. + */ + if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE) && + !in_syscall(current_pt_regs())) || + last->to_save == FP_STATE_SVE) { + save_sve_regs = true; + save_ffr = true; + vl = last->sve_vl; + } + + if (system_supports_sme()) { + u64 *svcr = last->svcr; + + *svcr = read_sysreg_s(SYS_SVCR); + + if (*svcr & SVCR_ZA_MASK) + sme_save_state(last->sme_state, + system_supports_sme2()); + + /* If we are in streaming mode override regular SVE. */ + if (*svcr & SVCR_SM_MASK) { + save_sve_regs = true; + save_ffr = system_supports_fa64(); + vl = last->sme_vl; + } + } + + if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) { + /* Get the configured VL from RDVL, will account for SM */ + if (WARN_ON(sve_get_vl() != vl)) { + /* + * Can't save the user regs, so current would + * re-enter user with corrupt state. + * There's no way to recover, so kill it: + */ + force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); + return; + } + + sve_save_state((char *)last->sve_state + + sve_ffr_offset(vl), + &last->st->fpsr, save_ffr); + *last->fp_type = FP_STATE_SVE; + } else { + fpsimd_save_state(last->st); + *last->fp_type = FP_STATE_FPSIMD; + } +} + +/* + * All vector length selection from userspace comes through here. + * We're on a slow path, so some sanity-checks are included. + * If things go wrong there's a bug somewhere, but try to fall back to a + * safe choice. + */ +static unsigned int find_supported_vector_length(enum vec_type type, + unsigned int vl) +{ + struct vl_info *info = &vl_info[type]; + int bit; + int max_vl = info->max_vl; + + if (WARN_ON(!sve_vl_valid(vl))) + vl = info->min_vl; + + if (WARN_ON(!sve_vl_valid(max_vl))) + max_vl = info->min_vl; + + if (vl > max_vl) + vl = max_vl; + if (vl < info->min_vl) + vl = info->min_vl; + + bit = find_next_bit(info->vq_map, SVE_VQ_MAX, + __vq_to_bit(sve_vq_from_vl(vl))); + return sve_vl_from_vq(__bit_to_vq(bit)); +} + +#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) + +static int vec_proc_do_default_vl(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct vl_info *info = table->extra1; + enum vec_type type = info->type; + int ret; + int vl = get_default_vl(type); + struct ctl_table tmp_table = { + .data = &vl, + .maxlen = sizeof(vl), + }; + + ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); + if (ret || !write) + return ret; + + /* Writing -1 has the special meaning "set to max": */ + if (vl == -1) + vl = info->max_vl; + + if (!sve_vl_valid(vl)) + return -EINVAL; + + set_default_vl(type, find_supported_vector_length(type, vl)); + return 0; +} + +static struct ctl_table sve_default_vl_table[] = { + { + .procname = "sve_default_vector_length", + .mode = 0644, + .proc_handler = vec_proc_do_default_vl, + .extra1 = &vl_info[ARM64_VEC_SVE], + }, + { } +}; + +static int __init sve_sysctl_init(void) +{ + if (system_supports_sve()) + if (!register_sysctl("abi", sve_default_vl_table)) + return -EINVAL; + + return 0; +} + +#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ +static int __init sve_sysctl_init(void) { return 0; } +#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ + +#if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL) +static struct ctl_table sme_default_vl_table[] = { + { + .procname = "sme_default_vector_length", + .mode = 0644, + .proc_handler = vec_proc_do_default_vl, + .extra1 = &vl_info[ARM64_VEC_SME], + }, + { } +}; + +static int __init sme_sysctl_init(void) +{ + if (system_supports_sme()) + if (!register_sysctl("abi", sme_default_vl_table)) + return -EINVAL; + + return 0; +} + +#else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */ +static int __init sme_sysctl_init(void) { return 0; } +#endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */ + +#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ + (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) + +#ifdef CONFIG_CPU_BIG_ENDIAN +static __uint128_t arm64_cpu_to_le128(__uint128_t x) +{ + u64 a = swab64(x); + u64 b = swab64(x >> 64); + + return ((__uint128_t)a << 64) | b; +} +#else +static __uint128_t arm64_cpu_to_le128(__uint128_t x) +{ + return x; +} +#endif + +#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) + +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, + unsigned int vq) +{ + unsigned int i; + __uint128_t *p; + + for (i = 0; i < SVE_NUM_ZREGS; ++i) { + p = (__uint128_t *)ZREG(sst, vq, i); + *p = arm64_cpu_to_le128(fst->vregs[i]); + } +} + +/* + * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to + * task->thread.sve_state. + * + * Task can be a non-runnable task, or current. In the latter case, + * the caller must have ownership of the cpu FPSIMD context before calling + * this function. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + * task->thread.uw.fpsimd_state must be up to date before calling this + * function. + */ +static void fpsimd_to_sve(struct task_struct *task) +{ + unsigned int vq; + void *sst = task->thread.sve_state; + struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; + + if (!system_supports_sve() && !system_supports_sme()) + return; + + vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); + __fpsimd_to_sve(sst, fst, vq); +} + +/* + * Transfer the SVE state in task->thread.sve_state to + * task->thread.uw.fpsimd_state. + * + * Task can be a non-runnable task, or current. In the latter case, + * the caller must have ownership of the cpu FPSIMD context before calling + * this function. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + * task->thread.sve_state must be up to date before calling this function. + */ +static void sve_to_fpsimd(struct task_struct *task) +{ + unsigned int vq, vl; + void const *sst = task->thread.sve_state; + struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; + unsigned int i; + __uint128_t const *p; + + if (!system_supports_sve() && !system_supports_sme()) + return; + + vl = thread_get_cur_vl(&task->thread); + vq = sve_vq_from_vl(vl); + for (i = 0; i < SVE_NUM_ZREGS; ++i) { + p = (__uint128_t const *)ZREG(sst, vq, i); + fst->vregs[i] = arm64_le128_to_cpu(*p); + } +} + +#ifdef CONFIG_ARM64_SVE +/* + * Call __sve_free() directly only if you know task can't be scheduled + * or preempted. + */ +static void __sve_free(struct task_struct *task) +{ + kfree(task->thread.sve_state); + task->thread.sve_state = NULL; +} + +static void sve_free(struct task_struct *task) +{ + WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); + + __sve_free(task); +} + +/* + * Return how many bytes of memory are required to store the full SVE + * state for task, given task's currently configured vector length. + */ +size_t sve_state_size(struct task_struct const *task) +{ + unsigned int vl = 0; + + if (system_supports_sve()) + vl = task_get_sve_vl(task); + if (system_supports_sme()) + vl = max(vl, task_get_sme_vl(task)); + + return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)); +} + +/* + * Ensure that task->thread.sve_state is allocated and sufficiently large. + * + * This function should be used only in preparation for replacing + * task->thread.sve_state with new data. The memory is always zeroed + * here to prevent stale data from showing through: this is done in + * the interest of testability and predictability: except in the + * do_sve_acc() case, there is no ABI requirement to hide stale data + * written previously be task. + */ +void sve_alloc(struct task_struct *task, bool flush) +{ + if (task->thread.sve_state) { + if (flush) + memset(task->thread.sve_state, 0, + sve_state_size(task)); + return; + } + + /* This is a small allocation (maximum ~8KB) and Should Not Fail. */ + task->thread.sve_state = + kzalloc(sve_state_size(task), GFP_KERNEL); +} + + +/* + * Force the FPSIMD state shared with SVE to be updated in the SVE state + * even if the SVE state is the current active state. + * + * This should only be called by ptrace. task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + */ +void fpsimd_force_sync_to_sve(struct task_struct *task) +{ + fpsimd_to_sve(task); +} + +/* + * Ensure that task->thread.sve_state is up to date with respect to + * the user task, irrespective of when SVE is in use or not. + * + * This should only be called by ptrace. task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + */ +void fpsimd_sync_to_sve(struct task_struct *task) +{ + if (!test_tsk_thread_flag(task, TIF_SVE) && + !thread_sm_enabled(&task->thread)) + fpsimd_to_sve(task); +} + +/* + * Ensure that task->thread.uw.fpsimd_state is up to date with respect to + * the user task, irrespective of whether SVE is in use or not. + * + * This should only be called by ptrace. task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + */ +void sve_sync_to_fpsimd(struct task_struct *task) +{ + if (task->thread.fp_type == FP_STATE_SVE) + sve_to_fpsimd(task); +} + +/* + * Ensure that task->thread.sve_state is up to date with respect to + * the task->thread.uw.fpsimd_state. + * + * This should only be called by ptrace to merge new FPSIMD register + * values into a task for which SVE is currently active. + * task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + * task->thread.uw.fpsimd_state must already have been initialised with + * the new FPSIMD register values to be merged in. + */ +void sve_sync_from_fpsimd_zeropad(struct task_struct *task) +{ + unsigned int vq; + void *sst = task->thread.sve_state; + struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; + + if (!test_tsk_thread_flag(task, TIF_SVE) && + !thread_sm_enabled(&task->thread)) + return; + + vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); + + memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); + __fpsimd_to_sve(sst, fst, vq); +} + +int vec_set_vector_length(struct task_struct *task, enum vec_type type, + unsigned long vl, unsigned long flags) +{ + bool free_sme = false; + + if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | + PR_SVE_SET_VL_ONEXEC)) + return -EINVAL; + + if (!sve_vl_valid(vl)) + return -EINVAL; + + /* + * Clamp to the maximum vector length that VL-agnostic code + * can work with. A flag may be assigned in the future to + * allow setting of larger vector lengths without confusing + * older software. + */ + if (vl > VL_ARCH_MAX) + vl = VL_ARCH_MAX; + + vl = find_supported_vector_length(type, vl); + + if (flags & (PR_SVE_VL_INHERIT | + PR_SVE_SET_VL_ONEXEC)) + task_set_vl_onexec(task, type, vl); + else + /* Reset VL to system default on next exec: */ + task_set_vl_onexec(task, type, 0); + + /* Only actually set the VL if not deferred: */ + if (flags & PR_SVE_SET_VL_ONEXEC) + goto out; + + if (vl == task_get_vl(task, type)) + goto out; + + /* + * To ensure the FPSIMD bits of the SVE vector registers are preserved, + * write any live register state back to task_struct, and convert to a + * regular FPSIMD thread. + */ + if (task == current) { + get_cpu_fpsimd_context(); + + fpsimd_save(); + } + + fpsimd_flush_task_state(task); + if (test_and_clear_tsk_thread_flag(task, TIF_SVE) || + thread_sm_enabled(&task->thread)) { + sve_to_fpsimd(task); + task->thread.fp_type = FP_STATE_FPSIMD; + } + + if (system_supports_sme()) { + if (type == ARM64_VEC_SME || + !(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) { + /* + * We are changing the SME VL or weren't using + * SME anyway, discard the state and force a + * reallocation. + */ + task->thread.svcr &= ~(SVCR_SM_MASK | + SVCR_ZA_MASK); + clear_tsk_thread_flag(task, TIF_SME); + free_sme = true; + } + } + + if (task == current) + put_cpu_fpsimd_context(); + + task_set_vl(task, type, vl); + + /* + * Free the changed states if they are not in use, SME will be + * reallocated to the correct size on next use and we just + * allocate SVE now in case it is needed for use in streaming + * mode. + */ + if (system_supports_sve()) { + sve_free(task); + sve_alloc(task, true); + } + + if (free_sme) + sme_free(task); + +out: + update_tsk_thread_flag(task, vec_vl_inherit_flag(type), + flags & PR_SVE_VL_INHERIT); + + return 0; +} + +/* + * Encode the current vector length and flags for return. + * This is only required for prctl(): ptrace has separate fields. + * SVE and SME use the same bits for _ONEXEC and _INHERIT. + * + * flags are as for vec_set_vector_length(). + */ +static int vec_prctl_status(enum vec_type type, unsigned long flags) +{ + int ret; + + if (flags & PR_SVE_SET_VL_ONEXEC) + ret = task_get_vl_onexec(current, type); + else + ret = task_get_vl(current, type); + + if (test_thread_flag(vec_vl_inherit_flag(type))) + ret |= PR_SVE_VL_INHERIT; + + return ret; +} + +/* PR_SVE_SET_VL */ +int sve_set_current_vl(unsigned long arg) +{ + unsigned long vl, flags; + int ret; + + vl = arg & PR_SVE_VL_LEN_MASK; + flags = arg & ~vl; + + if (!system_supports_sve() || is_compat_task()) + return -EINVAL; + + ret = vec_set_vector_length(current, ARM64_VEC_SVE, vl, flags); + if (ret) + return ret; + + return vec_prctl_status(ARM64_VEC_SVE, flags); +} + +/* PR_SVE_GET_VL */ +int sve_get_current_vl(void) +{ + if (!system_supports_sve() || is_compat_task()) + return -EINVAL; + + return vec_prctl_status(ARM64_VEC_SVE, 0); +} + +#ifdef CONFIG_ARM64_SME +/* PR_SME_SET_VL */ +int sme_set_current_vl(unsigned long arg) +{ + unsigned long vl, flags; + int ret; + + vl = arg & PR_SME_VL_LEN_MASK; + flags = arg & ~vl; + + if (!system_supports_sme() || is_compat_task()) + return -EINVAL; + + ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags); + if (ret) + return ret; + + return vec_prctl_status(ARM64_VEC_SME, flags); +} + +/* PR_SME_GET_VL */ +int sme_get_current_vl(void) +{ + if (!system_supports_sme() || is_compat_task()) + return -EINVAL; + + return vec_prctl_status(ARM64_VEC_SME, 0); +} +#endif /* CONFIG_ARM64_SME */ + +static void vec_probe_vqs(struct vl_info *info, + DECLARE_BITMAP(map, SVE_VQ_MAX)) +{ + unsigned int vq, vl; + + bitmap_zero(map, SVE_VQ_MAX); + + for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) { + write_vl(info->type, vq - 1); /* self-syncing */ + + switch (info->type) { + case ARM64_VEC_SVE: + vl = sve_get_vl(); + break; + case ARM64_VEC_SME: + vl = sme_get_vl(); + break; + default: + vl = 0; + break; + } + + /* Minimum VL identified? */ + if (sve_vq_from_vl(vl) > vq) + break; + + vq = sve_vq_from_vl(vl); /* skip intervening lengths */ + set_bit(__vq_to_bit(vq), map); + } +} + +/* + * Initialise the set of known supported VQs for the boot CPU. + * This is called during kernel boot, before secondary CPUs are brought up. + */ +void __init vec_init_vq_map(enum vec_type type) +{ + struct vl_info *info = &vl_info[type]; + vec_probe_vqs(info, info->vq_map); + bitmap_copy(info->vq_partial_map, info->vq_map, SVE_VQ_MAX); +} + +/* + * If we haven't committed to the set of supported VQs yet, filter out + * those not supported by the current CPU. + * This function is called during the bring-up of early secondary CPUs only. + */ +void vec_update_vq_map(enum vec_type type) +{ + struct vl_info *info = &vl_info[type]; + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + + vec_probe_vqs(info, tmp_map); + bitmap_and(info->vq_map, info->vq_map, tmp_map, SVE_VQ_MAX); + bitmap_or(info->vq_partial_map, info->vq_partial_map, tmp_map, + SVE_VQ_MAX); +} + +/* + * Check whether the current CPU supports all VQs in the committed set. + * This function is called during the bring-up of late secondary CPUs only. + */ +int vec_verify_vq_map(enum vec_type type) +{ + struct vl_info *info = &vl_info[type]; + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + unsigned long b; + + vec_probe_vqs(info, tmp_map); + + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); + if (bitmap_intersects(tmp_map, info->vq_map, SVE_VQ_MAX)) { + pr_warn("%s: cpu%d: Required vector length(s) missing\n", + info->name, smp_processor_id()); + return -EINVAL; + } + + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) + return 0; + + /* + * For KVM, it is necessary to ensure that this CPU doesn't + * support any vector length that guests may have probed as + * unsupported. + */ + + /* Recover the set of supported VQs: */ + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); + /* Find VQs supported that are not globally supported: */ + bitmap_andnot(tmp_map, tmp_map, info->vq_map, SVE_VQ_MAX); + + /* Find the lowest such VQ, if any: */ + b = find_last_bit(tmp_map, SVE_VQ_MAX); + if (b >= SVE_VQ_MAX) + return 0; /* no mismatches */ + + /* + * Mismatches above sve_max_virtualisable_vl are fine, since + * no guest is allowed to configure ZCR_EL2.LEN to exceed this: + */ + if (sve_vl_from_vq(__bit_to_vq(b)) <= info->max_virtualisable_vl) { + pr_warn("%s: cpu%d: Unsupported vector length(s) present\n", + info->name, smp_processor_id()); + return -EINVAL; + } + + return 0; +} + +static void __init sve_efi_setup(void) +{ + int max_vl = 0; + int i; + + if (!IS_ENABLED(CONFIG_EFI)) + return; + + for (i = 0; i < ARRAY_SIZE(vl_info); i++) + max_vl = max(vl_info[i].max_vl, max_vl); + + /* + * alloc_percpu() warns and prints a backtrace if this goes wrong. + * This is evidence of a crippled system and we are returning void, + * so no attempt is made to handle this situation here. + */ + if (!sve_vl_valid(max_vl)) + goto fail; + + efi_sve_state = __alloc_percpu( + SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)), SVE_VQ_BYTES); + if (!efi_sve_state) + goto fail; + + return; + +fail: + panic("Cannot allocate percpu memory for EFI SVE save/restore"); +} + +/* + * Enable SVE for EL1. + * Intended for use by the cpufeatures code during CPU boot. + */ +void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); + isb(); +} + +/* + * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE + * vector length. + * + * Use only if SVE is present. + * This function clobbers the SVE vector length. + */ +u64 read_zcr_features(void) +{ + /* + * Set the maximum possible VL, and write zeroes to all other + * bits to see if they stick. + */ + sve_kernel_enable(NULL); + write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); + + /* Return LEN value that would be written to get the maximum VL */ + return sve_vq_from_vl(sve_get_vl()) - 1; +} + +void __init sve_setup(void) +{ + struct vl_info *info = &vl_info[ARM64_VEC_SVE]; + u64 zcr; + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + unsigned long b; + + if (!system_supports_sve()) + return; + + /* + * The SVE architecture mandates support for 128-bit vectors, + * so sve_vq_map must have at least SVE_VQ_MIN set. + * If something went wrong, at least try to patch it up: + */ + if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map))) + set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map); + + zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); + info->max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); + + /* + * Sanity-check that the max VL we determined through CPU features + * corresponds properly to sve_vq_map. If not, do our best: + */ + if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SVE, + info->max_vl))) + info->max_vl = find_supported_vector_length(ARM64_VEC_SVE, + info->max_vl); + + /* + * For the default VL, pick the maximum supported value <= 64. + * VL == 64 is guaranteed not to grow the signal frame. + */ + set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE, 64)); + + bitmap_andnot(tmp_map, info->vq_partial_map, info->vq_map, + SVE_VQ_MAX); + + b = find_last_bit(tmp_map, SVE_VQ_MAX); + if (b >= SVE_VQ_MAX) + /* No non-virtualisable VLs found */ + info->max_virtualisable_vl = SVE_VQ_MAX; + else if (WARN_ON(b == SVE_VQ_MAX - 1)) + /* No virtualisable VLs? This is architecturally forbidden. */ + info->max_virtualisable_vl = SVE_VQ_MIN; + else /* b + 1 < SVE_VQ_MAX */ + info->max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); + + if (info->max_virtualisable_vl > info->max_vl) + info->max_virtualisable_vl = info->max_vl; + + pr_info("%s: maximum available vector length %u bytes per vector\n", + info->name, info->max_vl); + pr_info("%s: default vector length %u bytes per vector\n", + info->name, get_sve_default_vl()); + + /* KVM decides whether to support mismatched systems. Just warn here: */ + if (sve_max_virtualisable_vl() < sve_max_vl()) + pr_warn("%s: unvirtualisable vector lengths present\n", + info->name); + + sve_efi_setup(); +} + +/* + * Called from the put_task_struct() path, which cannot get here + * unless dead_task is really dead and not schedulable. + */ +void fpsimd_release_task(struct task_struct *dead_task) +{ + __sve_free(dead_task); + sme_free(dead_task); +} + +#endif /* CONFIG_ARM64_SVE */ + +#ifdef CONFIG_ARM64_SME + +/* + * Ensure that task->thread.sme_state is allocated and sufficiently large. + * + * This function should be used only in preparation for replacing + * task->thread.sme_state with new data. The memory is always zeroed + * here to prevent stale data from showing through: this is done in + * the interest of testability and predictability, the architecture + * guarantees that when ZA is enabled it will be zeroed. + */ +void sme_alloc(struct task_struct *task, bool flush) +{ + if (task->thread.sme_state) { + if (flush) + memset(task->thread.sme_state, 0, + sme_state_size(task)); + return; + } + + /* This could potentially be up to 64K. */ + task->thread.sme_state = + kzalloc(sme_state_size(task), GFP_KERNEL); +} + +static void sme_free(struct task_struct *task) +{ + kfree(task->thread.sme_state); + task->thread.sme_state = NULL; +} + +void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Set priority for all PEs to architecturally defined minimum */ + write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK, + SYS_SMPRI_EL1); + + /* Allow SME in kernel */ + write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1); + isb(); + + /* Allow EL0 to access TPIDR2 */ + write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1); + isb(); +} + +/* + * This must be called after sme_kernel_enable(), we rely on the + * feature table being sorted to ensure this. + */ +void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Allow use of ZT0 */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK, + SYS_SMCR_EL1); +} + +/* + * This must be called after sme_kernel_enable(), we rely on the + * feature table being sorted to ensure this. + */ +void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Allow use of FA64 */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK, + SYS_SMCR_EL1); +} + +/* + * Read the pseudo-SMCR used by cpufeatures to identify the supported + * vector length. + * + * Use only if SME is present. + * This function clobbers the SME vector length. + */ +u64 read_smcr_features(void) +{ + sme_kernel_enable(NULL); + + /* + * Set the maximum possible VL. + */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK, + SYS_SMCR_EL1); + + /* Return LEN value that would be written to get the maximum VL */ + return sve_vq_from_vl(sme_get_vl()) - 1; +} + +void __init sme_setup(void) +{ + struct vl_info *info = &vl_info[ARM64_VEC_SME]; + u64 smcr; + int min_bit; + + if (!system_supports_sme()) + return; + + /* + * SME doesn't require any particular vector length be + * supported but it does require at least one. We should have + * disabled the feature entirely while bringing up CPUs but + * let's double check here. + */ + WARN_ON(bitmap_empty(info->vq_map, SVE_VQ_MAX)); + + min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX); + info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit)); + + smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1); + info->max_vl = sve_vl_from_vq((smcr & SMCR_ELx_LEN_MASK) + 1); + + /* + * Sanity-check that the max VL we determined through CPU features + * corresponds properly to sme_vq_map. If not, do our best: + */ + if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SME, + info->max_vl))) + info->max_vl = find_supported_vector_length(ARM64_VEC_SME, + info->max_vl); + + WARN_ON(info->min_vl > info->max_vl); + + /* + * For the default VL, pick the maximum supported value <= 32 + * (256 bits) if there is one since this is guaranteed not to + * grow the signal frame when in streaming mode, otherwise the + * minimum available VL will be used. + */ + set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME, 32)); + + pr_info("SME: minimum available vector length %u bytes per vector\n", + info->min_vl); + pr_info("SME: maximum available vector length %u bytes per vector\n", + info->max_vl); + pr_info("SME: default vector length %u bytes per vector\n", + get_sme_default_vl()); +} + +#endif /* CONFIG_ARM64_SME */ + +static void sve_init_regs(void) +{ + /* + * Convert the FPSIMD state to SVE, zeroing all the state that + * is not shared with FPSIMD. If (as is likely) the current + * state is live in the registers then do this there and + * update our metadata for the current task including + * disabling the trap, otherwise update our in-memory copy. + * We are guaranteed to not be in streaming mode, we can only + * take a SVE trap when not in streaming mode and we can't be + * in streaming mode when taking a SME trap. + */ + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { + unsigned long vq_minus_one = + sve_vq_from_vl(task_get_sve_vl(current)) - 1; + sve_set_vq(vq_minus_one); + sve_flush_live(true, vq_minus_one); + fpsimd_bind_task_to_cpu(); + } else { + fpsimd_to_sve(current); + current->thread.fp_type = FP_STATE_SVE; + } +} + +/* + * Trapped SVE access + * + * Storage is allocated for the full SVE state, the current FPSIMD + * register contents are migrated across, and the access trap is + * disabled. + * + * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() + * would have disabled the SVE access trap for userspace during + * ret_to_user, making an SVE access trap impossible in that case. + */ +void do_sve_acc(unsigned long esr, struct pt_regs *regs) +{ + /* Even if we chose not to use SVE, the hardware could still trap: */ + if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + sve_alloc(current, true); + if (!current->thread.sve_state) { + force_sig(SIGKILL); + return; + } + + get_cpu_fpsimd_context(); + + if (test_and_set_thread_flag(TIF_SVE)) + WARN_ON(1); /* SVE access shouldn't have trapped */ + + /* + * Even if the task can have used streaming mode we can only + * generate SVE access traps in normal SVE mode and + * transitioning out of streaming mode may discard any + * streaming mode state. Always clear the high bits to avoid + * any potential errors tracking what is properly initialised. + */ + sve_init_regs(); + + put_cpu_fpsimd_context(); +} + +/* + * Trapped SME access + * + * Storage is allocated for the full SVE and SME state, the current + * FPSIMD register contents are migrated to SVE if SVE is not already + * active, and the access trap is disabled. + * + * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state() + * would have disabled the SME access trap for userspace during + * ret_to_user, making an SME access trap impossible in that case. + */ +void do_sme_acc(unsigned long esr, struct pt_regs *regs) +{ + /* Even if we chose not to use SME, the hardware could still trap: */ + if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + /* + * If this not a trap due to SME being disabled then something + * is being used in the wrong mode, report as SIGILL. + */ + if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + sve_alloc(current, false); + sme_alloc(current, true); + if (!current->thread.sve_state || !current->thread.sme_state) { + force_sig(SIGKILL); + return; + } + + get_cpu_fpsimd_context(); + + /* With TIF_SME userspace shouldn't generate any traps */ + if (test_and_set_thread_flag(TIF_SME)) + WARN_ON(1); + + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { + unsigned long vq_minus_one = + sve_vq_from_vl(task_get_sme_vl(current)) - 1; + sme_set_vq(vq_minus_one); + + fpsimd_bind_task_to_cpu(); + } + + put_cpu_fpsimd_context(); +} + +/* + * Trapped FP/ASIMD access. + */ +void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) +{ + /* TODO: implement lazy context saving/restoring */ + WARN_ON(1); +} + +/* + * Raise a SIGFPE for the current process. + */ +void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs) +{ + unsigned int si_code = FPE_FLTUNK; + + if (esr & ESR_ELx_FP_EXC_TFV) { + if (esr & FPEXC_IOF) + si_code = FPE_FLTINV; + else if (esr & FPEXC_DZF) + si_code = FPE_FLTDIV; + else if (esr & FPEXC_OFF) + si_code = FPE_FLTOVF; + else if (esr & FPEXC_UFF) + si_code = FPE_FLTUND; + else if (esr & FPEXC_IXF) + si_code = FPE_FLTRES; + } + + send_sig_fault(SIGFPE, si_code, + (void __user *)instruction_pointer(regs), + current); +} + +void fpsimd_thread_switch(struct task_struct *next) +{ + bool wrong_task, wrong_cpu; + + if (!system_supports_fpsimd()) + return; + + __get_cpu_fpsimd_context(); + + /* Save unsaved fpsimd state, if any: */ + fpsimd_save(); + + /* + * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's + * state. For kernel threads, FPSIMD registers are never loaded + * and wrong_task and wrong_cpu will always be true. + */ + wrong_task = __this_cpu_read(fpsimd_last_state.st) != + &next->thread.uw.fpsimd_state; + wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id(); + + update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE, + wrong_task || wrong_cpu); + + __put_cpu_fpsimd_context(); +} + +static void fpsimd_flush_thread_vl(enum vec_type type) +{ + int vl, supported_vl; + + /* + * Reset the task vector length as required. This is where we + * ensure that all user tasks have a valid vector length + * configured: no kernel task can become a user task without + * an exec and hence a call to this function. By the time the + * first call to this function is made, all early hardware + * probing is complete, so __sve_default_vl should be valid. + * If a bug causes this to go wrong, we make some noise and + * try to fudge thread.sve_vl to a safe value here. + */ + vl = task_get_vl_onexec(current, type); + if (!vl) + vl = get_default_vl(type); + + if (WARN_ON(!sve_vl_valid(vl))) + vl = vl_info[type].min_vl; + + supported_vl = find_supported_vector_length(type, vl); + if (WARN_ON(supported_vl != vl)) + vl = supported_vl; + + task_set_vl(current, type, vl); + + /* + * If the task is not set to inherit, ensure that the vector + * length will be reset by a subsequent exec: + */ + if (!test_thread_flag(vec_vl_inherit_flag(type))) + task_set_vl_onexec(current, type, 0); +} + +void fpsimd_flush_thread(void) +{ + void *sve_state = NULL; + void *sme_state = NULL; + + if (!system_supports_fpsimd()) + return; + + get_cpu_fpsimd_context(); + + fpsimd_flush_task_state(current); + memset(¤t->thread.uw.fpsimd_state, 0, + sizeof(current->thread.uw.fpsimd_state)); + + if (system_supports_sve()) { + clear_thread_flag(TIF_SVE); + + /* Defer kfree() while in atomic context */ + sve_state = current->thread.sve_state; + current->thread.sve_state = NULL; + + fpsimd_flush_thread_vl(ARM64_VEC_SVE); + } + + if (system_supports_sme()) { + clear_thread_flag(TIF_SME); + + /* Defer kfree() while in atomic context */ + sme_state = current->thread.sme_state; + current->thread.sme_state = NULL; + + fpsimd_flush_thread_vl(ARM64_VEC_SME); + current->thread.svcr = 0; + } + + current->thread.fp_type = FP_STATE_FPSIMD; + + put_cpu_fpsimd_context(); + kfree(sve_state); + kfree(sme_state); +} + +/* + * Save the userland FPSIMD state of 'current' to memory, but only if the state + * currently held in the registers does in fact belong to 'current' + */ +void fpsimd_preserve_current_state(void) +{ + if (!system_supports_fpsimd()) + return; + + get_cpu_fpsimd_context(); + fpsimd_save(); + put_cpu_fpsimd_context(); +} + +/* + * Like fpsimd_preserve_current_state(), but ensure that + * current->thread.uw.fpsimd_state is updated so that it can be copied to + * the signal frame. + */ +void fpsimd_signal_preserve_current_state(void) +{ + fpsimd_preserve_current_state(); + if (test_thread_flag(TIF_SVE)) + sve_to_fpsimd(current); +} + +/* + * Called by KVM when entering the guest. + */ +void fpsimd_kvm_prepare(void) +{ + if (!system_supports_sve()) + return; + + /* + * KVM does not save host SVE state since we can only enter + * the guest from a syscall so the ABI means that only the + * non-saved SVE state needs to be saved. If we have left + * SVE enabled for performance reasons then update the task + * state to be FPSIMD only. + */ + get_cpu_fpsimd_context(); + + if (test_and_clear_thread_flag(TIF_SVE)) { + sve_to_fpsimd(current); + current->thread.fp_type = FP_STATE_FPSIMD; + } + + put_cpu_fpsimd_context(); +} + +/* + * Associate current's FPSIMD context with this cpu + * The caller must have ownership of the cpu FPSIMD context before calling + * this function. + */ +static void fpsimd_bind_task_to_cpu(void) +{ + struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state); + + WARN_ON(!system_supports_fpsimd()); + last->st = ¤t->thread.uw.fpsimd_state; + last->sve_state = current->thread.sve_state; + last->sme_state = current->thread.sme_state; + last->sve_vl = task_get_sve_vl(current); + last->sme_vl = task_get_sme_vl(current); + last->svcr = ¤t->thread.svcr; + last->fp_type = ¤t->thread.fp_type; + last->to_save = FP_STATE_CURRENT; + current->thread.fpsimd_cpu = smp_processor_id(); + + /* + * Toggle SVE and SME trapping for userspace if needed, these + * are serialsied by ret_to_user(). + */ + if (system_supports_sme()) { + if (test_thread_flag(TIF_SME)) + sme_user_enable(); + else + sme_user_disable(); + } + + if (system_supports_sve()) { + if (test_thread_flag(TIF_SVE)) + sve_user_enable(); + else + sve_user_disable(); + } +} + +void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state) +{ + struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state); + + WARN_ON(!system_supports_fpsimd()); + WARN_ON(!in_softirq() && !irqs_disabled()); + + *last = *state; +} + +/* + * Load the userland FPSIMD state of 'current' from memory, but only if the + * FPSIMD state already held in the registers is /not/ the most recent FPSIMD + * state of 'current'. This is called when we are preparing to return to + * userspace to ensure that userspace sees a good register state. + */ +void fpsimd_restore_current_state(void) +{ + /* + * For the tasks that were created before we detected the absence of + * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), + * e.g, init. This could be then inherited by the children processes. + * If we later detect that the system doesn't support FP/SIMD, + * we must clear the flag for all the tasks to indicate that the + * FPSTATE is clean (as we can't have one) to avoid looping for ever in + * do_notify_resume(). + */ + if (!system_supports_fpsimd()) { + clear_thread_flag(TIF_FOREIGN_FPSTATE); + return; + } + + get_cpu_fpsimd_context(); + + if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { + task_fpsimd_load(); + fpsimd_bind_task_to_cpu(); + } + + put_cpu_fpsimd_context(); +} + +/* + * Load an updated userland FPSIMD state for 'current' from memory and set the + * flag that indicates that the FPSIMD register contents are the most recent + * FPSIMD state of 'current'. This is used by the signal code to restore the + * register state when returning from a signal handler in FPSIMD only cases, + * any SVE context will be discarded. + */ +void fpsimd_update_current_state(struct user_fpsimd_state const *state) +{ + if (WARN_ON(!system_supports_fpsimd())) + return; + + get_cpu_fpsimd_context(); + + current->thread.uw.fpsimd_state = *state; + if (test_thread_flag(TIF_SVE)) + fpsimd_to_sve(current); + + task_fpsimd_load(); + fpsimd_bind_task_to_cpu(); + + clear_thread_flag(TIF_FOREIGN_FPSTATE); + + put_cpu_fpsimd_context(); +} + +/* + * Invalidate live CPU copies of task t's FPSIMD state + * + * This function may be called with preemption enabled. The barrier() + * ensures that the assignment to fpsimd_cpu is visible to any + * preemption/softirq that could race with set_tsk_thread_flag(), so + * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. + * + * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any + * subsequent code. + */ +void fpsimd_flush_task_state(struct task_struct *t) +{ + t->thread.fpsimd_cpu = NR_CPUS; + /* + * If we don't support fpsimd, bail out after we have + * reset the fpsimd_cpu for this task and clear the + * FPSTATE. + */ + if (!system_supports_fpsimd()) + return; + barrier(); + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); + + barrier(); +} + +/* + * Invalidate any task's FPSIMD state that is present on this cpu. + * The FPSIMD context should be acquired with get_cpu_fpsimd_context() + * before calling this function. + */ +static void fpsimd_flush_cpu_state(void) +{ + WARN_ON(!system_supports_fpsimd()); + __this_cpu_write(fpsimd_last_state.st, NULL); + + /* + * Leaving streaming mode enabled will cause issues for any kernel + * NEON and leaving streaming mode or ZA enabled may increase power + * consumption. + */ + if (system_supports_sme()) + sme_smstop(); + + set_thread_flag(TIF_FOREIGN_FPSTATE); +} + +/* + * Save the FPSIMD state to memory and invalidate cpu view. + * This function must be called with preemption disabled. + */ +void fpsimd_save_and_flush_cpu_state(void) +{ + if (!system_supports_fpsimd()) + return; + WARN_ON(preemptible()); + __get_cpu_fpsimd_context(); + fpsimd_save(); + fpsimd_flush_cpu_state(); + __put_cpu_fpsimd_context(); +} + +#ifdef CONFIG_KERNEL_MODE_NEON + +/* + * Kernel-side NEON support functions + */ + +/* + * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling + * context + * + * Must not be called unless may_use_simd() returns true. + * Task context in the FPSIMD registers is saved back to memory as necessary. + * + * A matching call to kernel_neon_end() must be made before returning from the + * calling context. + * + * The caller may freely use the FPSIMD registers until kernel_neon_end() is + * called. + */ +void kernel_neon_begin(void) +{ + if (WARN_ON(!system_supports_fpsimd())) + return; + + BUG_ON(!may_use_simd()); + + get_cpu_fpsimd_context(); + + /* Save unsaved fpsimd state, if any: */ + fpsimd_save(); + + /* Invalidate any task state remaining in the fpsimd regs: */ + fpsimd_flush_cpu_state(); +} +EXPORT_SYMBOL_GPL(kernel_neon_begin); + +/* + * kernel_neon_end(): give the CPU FPSIMD registers back to the current task + * + * Must be called from a context in which kernel_neon_begin() was previously + * called, with no call to kernel_neon_end() in the meantime. + * + * The caller must not use the FPSIMD registers after this function is called, + * unless kernel_neon_begin() is called again in the meantime. + */ +void kernel_neon_end(void) +{ + if (!system_supports_fpsimd()) + return; + + put_cpu_fpsimd_context(); +} +EXPORT_SYMBOL_GPL(kernel_neon_end); + +#ifdef CONFIG_EFI + +static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state); +static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); +static DEFINE_PER_CPU(bool, efi_sve_state_used); +static DEFINE_PER_CPU(bool, efi_sm_state); + +/* + * EFI runtime services support functions + * + * The ABI for EFI runtime services allows EFI to use FPSIMD during the call. + * This means that for EFI (and only for EFI), we have to assume that FPSIMD + * is always used rather than being an optional accelerator. + * + * These functions provide the necessary support for ensuring FPSIMD + * save/restore in the contexts from which EFI is used. + * + * Do not use them for any other purpose -- if tempted to do so, you are + * either doing something wrong or you need to propose some refactoring. + */ + +/* + * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call + */ +void __efi_fpsimd_begin(void) +{ + if (!system_supports_fpsimd()) + return; + + WARN_ON(preemptible()); + + if (may_use_simd()) { + kernel_neon_begin(); + } else { + /* + * If !efi_sve_state, SVE can't be in use yet and doesn't need + * preserving: + */ + if (system_supports_sve() && likely(efi_sve_state)) { + char *sve_state = this_cpu_ptr(efi_sve_state); + bool ffr = true; + u64 svcr; + + __this_cpu_write(efi_sve_state_used, true); + + if (system_supports_sme()) { + svcr = read_sysreg_s(SYS_SVCR); + + __this_cpu_write(efi_sm_state, + svcr & SVCR_SM_MASK); + + /* + * Unless we have FA64 FFR does not + * exist in streaming mode. + */ + if (!system_supports_fa64()) + ffr = !(svcr & SVCR_SM_MASK); + } + + sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()), + &this_cpu_ptr(&efi_fpsimd_state)->fpsr, + ffr); + + if (system_supports_sme()) + sysreg_clear_set_s(SYS_SVCR, + SVCR_SM_MASK, 0); + + } else { + fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); + } + + __this_cpu_write(efi_fpsimd_state_used, true); + } +} + +/* + * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call + */ +void __efi_fpsimd_end(void) +{ + if (!system_supports_fpsimd()) + return; + + if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) { + kernel_neon_end(); + } else { + if (system_supports_sve() && + likely(__this_cpu_read(efi_sve_state_used))) { + char const *sve_state = this_cpu_ptr(efi_sve_state); + bool ffr = true; + + /* + * Restore streaming mode; EFI calls are + * normal function calls so should not return in + * streaming mode. + */ + if (system_supports_sme()) { + if (__this_cpu_read(efi_sm_state)) { + sysreg_clear_set_s(SYS_SVCR, + 0, + SVCR_SM_MASK); + + /* + * Unless we have FA64 FFR does not + * exist in streaming mode. + */ + if (!system_supports_fa64()) + ffr = false; + } + } + + sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()), + &this_cpu_ptr(&efi_fpsimd_state)->fpsr, + ffr); + + __this_cpu_write(efi_sve_state_used, false); + } else { + fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state)); + } + } +} + +#endif /* CONFIG_EFI */ + +#endif /* CONFIG_KERNEL_MODE_NEON */ + +#ifdef CONFIG_CPU_PM +static int fpsimd_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + switch (cmd) { + case CPU_PM_ENTER: + fpsimd_save_and_flush_cpu_state(); + break; + case CPU_PM_EXIT: + break; + case CPU_PM_ENTER_FAILED: + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static struct notifier_block fpsimd_cpu_pm_notifier_block = { + .notifier_call = fpsimd_cpu_pm_notifier, +}; + +static void __init fpsimd_pm_init(void) +{ + cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block); +} + +#else +static inline void fpsimd_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +#ifdef CONFIG_HOTPLUG_CPU +static int fpsimd_cpu_dead(unsigned int cpu) +{ + per_cpu(fpsimd_last_state.st, cpu) = NULL; + return 0; +} + +static inline void fpsimd_hotplug_init(void) +{ + cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead", + NULL, fpsimd_cpu_dead); +} + +#else +static inline void fpsimd_hotplug_init(void) { } +#endif + +/* + * FP/SIMD support code initialisation. + */ +static int __init fpsimd_init(void) +{ + if (cpu_have_named_feature(FP)) { + fpsimd_pm_init(); + fpsimd_hotplug_init(); + } else { + pr_notice("Floating-point is not implemented\n"); + } + + if (!cpu_have_named_feature(ASIMD)) + pr_notice("Advanced SIMD is not implemented\n"); + + + sve_sysctl_init(); + sme_sysctl_init(); + + return 0; +} +core_initcall(fpsimd_init); diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c new file mode 100644 index 0000000000..a650f5e11f --- /dev/null +++ b/arch/arm64/kernel/ftrace.c @@ -0,0 +1,517 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/arm64/kernel/ftrace.c + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +struct fregs_offset { + const char *name; + int offset; +}; + +#define FREGS_OFFSET(n, field) \ +{ \ + .name = n, \ + .offset = offsetof(struct ftrace_regs, field), \ +} + +static const struct fregs_offset fregs_offsets[] = { + FREGS_OFFSET("x0", regs[0]), + FREGS_OFFSET("x1", regs[1]), + FREGS_OFFSET("x2", regs[2]), + FREGS_OFFSET("x3", regs[3]), + FREGS_OFFSET("x4", regs[4]), + FREGS_OFFSET("x5", regs[5]), + FREGS_OFFSET("x6", regs[6]), + FREGS_OFFSET("x7", regs[7]), + FREGS_OFFSET("x8", regs[8]), + + FREGS_OFFSET("x29", fp), + FREGS_OFFSET("x30", lr), + FREGS_OFFSET("lr", lr), + + FREGS_OFFSET("sp", sp), + FREGS_OFFSET("pc", pc), +}; + +int ftrace_regs_query_register_offset(const char *name) +{ + for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) { + const struct fregs_offset *roff = &fregs_offsets[i]; + if (!strcmp(roff->name, name)) + return roff->offset; + } + + return -EINVAL; +} +#endif + +unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * When using mcount, addr is the address of the mcount call + * instruction, and no adjustment is necessary. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) + return addr; + + /* + * When using patchable-function-entry without pre-function NOPS, addr + * is the address of the first NOP after the function entry point. + * + * The compiler has either generated: + * + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL + * + * Or: + * + * addr-04: BTI C + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL `, which is at `addr + 4` bytes in either case. + * + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return addr + AARCH64_INSN_SIZE; + + /* + * When using patchable-function-entry with pre-function NOPs, addr is + * the address of the first pre-function NOP. + * + * Starting from an 8-byte aligned base, the compiler has either + * generated: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: NOP // To be patched to MOV X9, LR + * addr+12: NOP // To be patched to BL + * + * Or: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: BTI C + * addr+12: NOP // To be patched to MOV X9, LR + * addr+16: NOP // To be patched to BL + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL `, which is at either addr+12 or addr+16 depending on + * whether there is a BTI. + */ + + if (!IS_ALIGNED(addr, sizeof(unsigned long))) { + WARN_RATELIMIT(1, "Misaligned patch-site %pS\n", + (void *)(addr + 8)); + return 0; + } + + /* Skip the NOPs placed before the function entry point */ + addr += 2 * AARCH64_INSN_SIZE; + + /* Skip any BTI */ + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) { + u32 insn = le32_to_cpu(*(__le32 *)addr); + + if (aarch64_insn_is_bti(insn)) { + addr += AARCH64_INSN_SIZE; + } else if (insn != aarch64_insn_gen_nop()) { + WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n", + (void *)addr, insn); + } + } + + /* Skip the first NOP after function entry */ + addr += AARCH64_INSN_SIZE; + + return addr; +} + +/* + * Replace a single instruction, which may be a branch or NOP. + * If @validate == true, a replaced instruction is checked against 'old'. + */ +static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, + bool validate) +{ + u32 replaced; + + /* + * Note: + * We are paranoid about modifying text, as if a bug were to happen, it + * could cause us to read or write to someplace that could cause harm. + * Carefully read and modify the code with aarch64_insn_*() which uses + * probe_kernel_*(), and make sure what we read is what we expected it + * to be before modifying it. + */ + if (validate) { + if (aarch64_insn_read((void *)pc, &replaced)) + return -EFAULT; + + if (replaced != old) + return -EINVAL; + } + if (aarch64_insn_patch_text_nosync((void *)pc, new)) + return -EPERM; + + return 0; +} + +/* + * Replace tracer function in ftrace_caller() + */ +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long pc; + u32 new; + + /* + * When using CALL_OPS, the function to call is associated with the + * call site, and we don't have a global function pointer to update. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return 0; + + pc = (unsigned long)ftrace_call; + new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, + AARCH64_INSN_BRANCH_LINK); + + return ftrace_modify_code(pc, 0, new, false); +} + +static struct plt_entry *get_ftrace_plt(struct module *mod) +{ +#ifdef CONFIG_MODULES + struct plt_entry *plt = mod->arch.ftrace_trampolines; + + return &plt[FTRACE_PLT_IDX]; +#else + return NULL; +#endif +} + +static bool reachable_by_bl(unsigned long addr, unsigned long pc) +{ + long offset = (long)addr - (long)pc; + + return offset >= -SZ_128M && offset < SZ_128M; +} + +/* + * Find the address the callsite must branch to in order to reach '*addr'. + * + * Due to the limited range of 'BL' instructions, modules may be placed too far + * away to branch directly and must use a PLT. + * + * Returns true when '*addr' contains a reachable target address, or has been + * modified to contain a PLT address. Returns false otherwise. + */ +static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, + struct module *mod, + unsigned long *addr) +{ + unsigned long pc = rec->ip; + struct plt_entry *plt; + + /* + * If a custom trampoline is unreachable, rely on the ftrace_caller + * trampoline which knows how to indirectly reach that trampoline + * through ops->direct_call. + */ + if (*addr != FTRACE_ADDR && !reachable_by_bl(*addr, pc)) + *addr = FTRACE_ADDR; + + /* + * When the target is within range of the 'BL' instruction, use 'addr' + * as-is and branch to that directly. + */ + if (reachable_by_bl(*addr, pc)) + return true; + + /* + * When the target is outside of the range of a 'BL' instruction, we + * must use a PLT to reach it. We can only place PLTs for modules, and + * only when module PLT support is built-in. + */ + if (!IS_ENABLED(CONFIG_MODULES)) + return false; + + /* + * 'mod' is only set at module load time, but if we end up + * dealing with an out-of-range condition, we can assume it + * is due to a module being loaded far away from the kernel. + * + * NOTE: __module_text_address() must be called with preemption + * disabled, but we can rely on ftrace_lock to ensure that 'mod' + * retains its validity throughout the remainder of this code. + */ + if (!mod) { + preempt_disable(); + mod = __module_text_address(pc); + preempt_enable(); + } + + if (WARN_ON(!mod)) + return false; + + plt = get_ftrace_plt(mod); + if (!plt) { + pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); + return false; + } + + *addr = (unsigned long)plt; + return true; +} + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec) +{ + const struct ftrace_ops *ops = NULL; + + if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + ops = ftrace_find_unique_ops(rec); + WARN_ON_ONCE(!ops); + } + + if (!ops) + ops = &ftrace_list_ops; + + return ops; +} + +static int ftrace_rec_set_ops(const struct dyn_ftrace *rec, + const struct ftrace_ops *ops) +{ + unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8); + return aarch64_insn_write_literal_u64((void *)literal, + (unsigned long)ops); +} + +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, &ftrace_nop_ops); +} + +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec)); +} +#else +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; } +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; } +#endif + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old, new; + int ret; + + ret = ftrace_rec_update_ops(rec); + if (ret) + return ret; + + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; + + old = aarch64_insn_gen_nop(); + new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); + + return ftrace_modify_code(pc, old, new, true); +} + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old, new; + int ret; + + ret = ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec)); + if (ret) + return ret; + + if (!ftrace_find_callable_addr(rec, NULL, &old_addr)) + return -EINVAL; + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; + + old = aarch64_insn_gen_branch_imm(pc, old_addr, + AARCH64_INSN_BRANCH_LINK); + new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); + + return ftrace_modify_code(pc, old, new, true); +} +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +/* + * The compiler has inserted two NOPs before the regular function prologue. + * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live, + * and x9-x18 are free for our use. + * + * At runtime we want to be able to swing a single NOP <-> BL to enable or + * disable the ftrace call. The BL requires us to save the original LR value, + * so here we insert a over the first NOP so the instructions + * before the regular prologue are: + * + * | Compiled | Disabled | Enabled | + * +----------+------------+------------+ + * | NOP | MOV X9, LR | MOV X9, LR | + * | NOP | NOP | BL | + * + * The LR value will be recovered by ftrace_caller, and restored into LR + * before returning to the regular function prologue. When a function is not + * being traced, the MOV is not harmful given x9 is not live per the AAPCS. + * + * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of + * the BL. + */ +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + unsigned long pc = rec->ip - AARCH64_INSN_SIZE; + u32 old, new; + int ret; + + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; + + old = aarch64_insn_gen_nop(); + new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9, + AARCH64_INSN_REG_LR, + AARCH64_INSN_VARIANT_64BIT); + return ftrace_modify_code(pc, old, new, true); +} +#endif + +/* + * Turn off the call to ftrace_caller() in instrumented function + */ +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old = 0, new; + int ret; + + new = aarch64_insn_gen_nop(); + + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; + + /* + * When using mcount, callsites in modules may have been initalized to + * call an arbitrary module PLT (which redirects to the _mcount stub) + * rather than the ftrace PLT we'll use at runtime (which redirects to + * the ftrace trampoline). We can ignore the old PLT when initializing + * the callsite. + * + * Note: 'mod' is only set at module load time. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod) + return aarch64_insn_patch_text_nosync((void *)pc, new); + + if (!ftrace_find_callable_addr(rec, mod, &addr)) + return -EINVAL; + + old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); + + return ftrace_modify_code(pc, old, new, true); +} + +void arch_ftrace_update_code(int command) +{ + command |= FTRACE_MAY_SLEEP; + ftrace_modify_all_code(command); +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * function_graph tracer expects ftrace_return_to_handler() to be called + * on the way back to parent. For this purpose, this function is called + * in _mcount() or ftrace_caller() to replace return address (*parent) on + * the call stack to return_to_handler. + */ +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Note: + * No protection against faulting at *parent, which may be seen + * on other archs. It's unlikely on AArch64. + */ + old = *parent; + + if (!function_graph_enter(old, self_addr, frame_pointer, + (void *)frame_pointer)) { + *parent = return_hooker; + } +} + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs) +{ + prepare_ftrace_return(ip, &fregs->lr, fregs->fp); +} +#else +/* + * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() + * depending on @enable. + */ +static int ftrace_modify_graph_caller(bool enable) +{ + unsigned long pc = (unsigned long)&ftrace_graph_call; + u32 branch, nop; + + branch = aarch64_insn_gen_branch_imm(pc, + (unsigned long)ftrace_graph_caller, + AARCH64_INSN_BRANCH_NOLINK); + nop = aarch64_insn_gen_nop(); + + if (enable) + return ftrace_modify_code(pc, nop, branch, true); + else + return ftrace_modify_code(pc, branch, nop, true); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S new file mode 100644 index 0000000000..7b236994f0 --- /dev/null +++ b/arch/arm64/kernel/head.S @@ -0,0 +1,902 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Low-level CPU initialisation + * Based on arch/arm/kernel/head.S + * + * Copyright (C) 1994-2002 Russell King + * Copyright (C) 2003-2012 ARM Ltd. + * Authors: Catalin Marinas + * Will Deacon + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "efi-header.S" + +#if (PAGE_OFFSET & 0x1fffff) != 0 +#error PAGE_OFFSET must be at least 2MB aligned +#endif + +/* + * Kernel startup entry point. + * --------------------------- + * + * The requirements are: + * MMU = off, D-cache = off, I-cache = on or off, + * x0 = physical address to the FDT blob. + * + * Note that the callee-saved registers are used for storing variables + * that are useful before the MMU is enabled. The allocations are described + * in the entry routines. + */ + __HEAD + /* + * DO NOT MODIFY. Image header expected by Linux boot-loaders. + */ + efi_signature_nop // special NOP to identity as PE/COFF executable + b primary_entry // branch to kernel start, magic + .quad 0 // Image load offset from start of RAM, little-endian + le64sym _kernel_size_le // Effective size of kernel image, little-endian + le64sym _kernel_flags_le // Informative flags, little-endian + .quad 0 // reserved + .quad 0 // reserved + .quad 0 // reserved + .ascii ARM64_IMAGE_MAGIC // Magic number + .long .Lpe_header_offset // Offset to the PE header. + + __EFI_PE_HEADER + + .section ".idmap.text","a" + + /* + * The following callee saved general purpose registers are used on the + * primary lowlevel boot path: + * + * Register Scope Purpose + * x19 primary_entry() .. start_kernel() whether we entered with the MMU on + * x20 primary_entry() .. __primary_switch() CPU boot mode + * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 + * x22 create_idmap() .. start_kernel() ID map VA of the DT blob + * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset + * x24 __primary_switch() linear map KASLR seed + * x25 primary_entry() .. start_kernel() supported VA size + * x28 create_idmap() callee preserved temp register + */ +SYM_CODE_START(primary_entry) + bl record_mmu_state + bl preserve_boot_args + bl create_idmap + + /* + * If we entered with the MMU and caches on, clean the ID mapped part + * of the primary boot code to the PoC so we can safely execute it with + * the MMU off. + */ + cbz x19, 0f + adrp x0, __idmap_text_start + adr_l x1, __idmap_text_end + adr_l x2, dcache_clean_poc + blr x2 +0: mov x0, x19 + bl init_kernel_el // w0=cpu_boot_mode + mov x20, x0 + + /* + * The following calls CPU setup code, see arch/arm64/mm/proc.S for + * details. + * On return, the CPU will be ready for the MMU to be turned on and + * the TCR will have been set. + */ +#if VA_BITS > 48 + mrs_s x0, SYS_ID_AA64MMFR2_EL1 + tst x0, ID_AA64MMFR2_EL1_VARange_MASK + mov x0, #VA_BITS + mov x25, #VA_BITS_MIN + csel x25, x25, x0, eq + mov x0, x25 +#endif + bl __cpu_setup // initialise processor + b __primary_switch +SYM_CODE_END(primary_entry) + + __INIT +SYM_CODE_START_LOCAL(record_mmu_state) + mrs x19, CurrentEL + cmp x19, #CurrentEL_EL2 + mrs x19, sctlr_el1 + b.ne 0f + mrs x19, sctlr_el2 +0: +CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) +CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) + tst x19, #SCTLR_ELx_C // Z := (C == 0) + and x19, x19, #SCTLR_ELx_M // isolate M bit + csel x19, xzr, x19, eq // clear x19 if Z + ret + + /* + * Set the correct endianness early so all memory accesses issued + * before init_kernel_el() occur in the correct byte order. Note that + * this means the MMU must be disabled, or the active ID map will end + * up getting interpreted with the wrong byte order. + */ +1: eor x19, x19, #SCTLR_ELx_EE + bic x19, x19, #SCTLR_ELx_M + b.ne 2f + pre_disable_mmu_workaround + msr sctlr_el2, x19 + b 3f +2: pre_disable_mmu_workaround + msr sctlr_el1, x19 +3: isb + mov x19, xzr + ret +SYM_CODE_END(record_mmu_state) + +/* + * Preserve the arguments passed by the bootloader in x0 .. x3 + */ +SYM_CODE_START_LOCAL(preserve_boot_args) + mov x21, x0 // x21=FDT + + adr_l x0, boot_args // record the contents of + stp x21, x1, [x0] // x0 .. x3 at kernel entry + stp x2, x3, [x0, #16] + + cbnz x19, 0f // skip cache invalidation if MMU is on + dmb sy // needed before dc ivac with + // MMU off + + add x1, x0, #0x20 // 4 x 8 bytes + b dcache_inval_poc // tail call +0: str_l x19, mmu_enabled_at_boot, x0 + ret +SYM_CODE_END(preserve_boot_args) + +SYM_FUNC_START_LOCAL(clear_page_tables) + /* + * Clear the init page tables. + */ + adrp x0, init_pg_dir + adrp x1, init_pg_end + sub x2, x1, x0 + mov x1, xzr + b __pi_memset // tail call +SYM_FUNC_END(clear_page_tables) + +/* + * Macro to populate page table entries, these entries can be pointers to the next level + * or last level entries pointing to physical memory. + * + * tbl: page table address + * rtbl: pointer to page table or physical memory + * index: start index to write + * eindex: end index to write - [index, eindex] written to + * flags: flags for pagetable entry to or in + * inc: increment to rtbl between each entry + * tmp1: temporary variable + * + * Preserves: tbl, eindex, flags, inc + * Corrupts: index, tmp1 + * Returns: rtbl + */ + .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 +.Lpe\@: phys_to_pte \tmp1, \rtbl + orr \tmp1, \tmp1, \flags // tmp1 = table entry + str \tmp1, [\tbl, \index, lsl #3] + add \rtbl, \rtbl, \inc // rtbl = pa next level + add \index, \index, #1 + cmp \index, \eindex + b.ls .Lpe\@ + .endm + +/* + * Compute indices of table entries from virtual address range. If multiple entries + * were needed in the previous page table level then the next page table level is assumed + * to be composed of multiple pages. (This effectively scales the end index). + * + * vstart: virtual address of start of range + * vend: virtual address of end of range - we map [vstart, vend] + * shift: shift used to transform virtual address into index + * order: #imm 2log(number of entries in page table) + * istart: index in table corresponding to vstart + * iend: index in table corresponding to vend + * count: On entry: how many extra entries were required in previous level, scales + * our end index. + * On exit: returns how many extra entries required for next page table level + * + * Preserves: vstart, vend + * Returns: istart, iend, count + */ + .macro compute_indices, vstart, vend, shift, order, istart, iend, count + ubfx \istart, \vstart, \shift, \order + ubfx \iend, \vend, \shift, \order + add \iend, \iend, \count, lsl \order + sub \count, \iend, \istart + .endm + +/* + * Map memory for specified virtual address range. Each level of page table needed supports + * multiple entries. If a level requires n entries the next page table level is assumed to be + * formed from n pages. + * + * tbl: location of page table + * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) + * vstart: virtual address of start of range + * vend: virtual address of end of range - we map [vstart, vend - 1] + * flags: flags to use to map last level entries + * phys: physical address corresponding to vstart - physical memory is contiguous + * order: #imm 2log(number of entries in PGD table) + * + * If extra_shift is set, an extra level will be populated if the end address does + * not fit in 'extra_shift' bits. This assumes vend is in the TTBR0 range. + * + * Temporaries: istart, iend, tmp, count, sv - these need to be different registers + * Preserves: vstart, flags + * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv + */ + .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift + sub \vend, \vend, #1 + add \rtbl, \tbl, #PAGE_SIZE + mov \count, #0 + + .ifnb \extra_shift + tst \vend, #~((1 << (\extra_shift)) - 1) + b.eq .L_\@ + compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count + mov \sv, \rtbl + populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp + mov \tbl, \sv + .endif +.L_\@: + compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count + mov \sv, \rtbl + populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp + mov \tbl, \sv + +#if SWAPPER_PGTABLE_LEVELS > 3 + compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count + mov \sv, \rtbl + populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp + mov \tbl, \sv +#endif + +#if SWAPPER_PGTABLE_LEVELS > 2 + compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count + mov \sv, \rtbl + populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp + mov \tbl, \sv +#endif + + compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count + bic \rtbl, \phys, #SWAPPER_BLOCK_SIZE - 1 + populate_entries \tbl, \rtbl, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp + .endm + +/* + * Remap a subregion created with the map_memory macro with modified attributes + * or output address. The entire remapped region must have been covered in the + * invocation of map_memory. + * + * x0: last level table address (returned in first argument to map_memory) + * x1: start VA of the existing mapping + * x2: start VA of the region to update + * x3: end VA of the region to update (exclusive) + * x4: start PA associated with the region to update + * x5: attributes to set on the updated region + * x6: order of the last level mappings + */ +SYM_FUNC_START_LOCAL(remap_region) + sub x3, x3, #1 // make end inclusive + + // Get the index offset for the start of the last level table + lsr x1, x1, x6 + bfi x1, xzr, #0, #PAGE_SHIFT - 3 + + // Derive the start and end indexes into the last level table + // associated with the provided region + lsr x2, x2, x6 + lsr x3, x3, x6 + sub x2, x2, x1 + sub x3, x3, x1 + + mov x1, #1 + lsl x6, x1, x6 // block size at this level + + populate_entries x0, x4, x2, x3, x5, x6, x7 + ret +SYM_FUNC_END(remap_region) + +SYM_FUNC_START_LOCAL(create_idmap) + mov x28, lr + /* + * The ID map carries a 1:1 mapping of the physical address range + * covered by the loaded image, which could be anywhere in DRAM. This + * means that the required size of the VA (== PA) space is decided at + * boot time, and could be more than the configured size of the VA + * space for ordinary kernel and user space mappings. + * + * There are three cases to consider here: + * - 39 <= VA_BITS < 48, and the ID map needs up to 48 VA bits to cover + * the placement of the image. In this case, we configure one extra + * level of translation on the fly for the ID map only. (This case + * also covers 42-bit VA/52-bit PA on 64k pages). + * + * - VA_BITS == 48, and the ID map needs more than 48 VA bits. This can + * only happen when using 64k pages, in which case we need to extend + * the root level table rather than add a level. Note that we can + * treat this case as 'always extended' as long as we take care not + * to program an unsupported T0SZ value into the TCR register. + * + * - Combinations that would require two additional levels of + * translation are not supported, e.g., VA_BITS==36 on 16k pages, or + * VA_BITS==39/4k pages with 5-level paging, where the input address + * requires more than 47 or 48 bits, respectively. + */ +#if (VA_BITS < 48) +#define IDMAP_PGD_ORDER (VA_BITS - PGDIR_SHIFT) +#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) + + /* + * If VA_BITS < 48, we have to configure an additional table level. + * First, we have to verify our assumption that the current value of + * VA_BITS was chosen such that all translation levels are fully + * utilised, and that lowering T0SZ will always result in an additional + * translation level to be configured. + */ +#if VA_BITS != EXTRA_SHIFT +#error "Mismatch between VA_BITS and page size/number of translation levels" +#endif +#else +#define IDMAP_PGD_ORDER (PHYS_MASK_SHIFT - PGDIR_SHIFT) +#define EXTRA_SHIFT + /* + * If VA_BITS == 48, we don't have to configure an additional + * translation level, but the top-level table has more entries. + */ +#endif + adrp x0, init_idmap_pg_dir + adrp x3, _text + adrp x6, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE + mov_q x7, SWAPPER_RX_MMUFLAGS + + map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT + + /* Remap the kernel page tables r/w in the ID map */ + adrp x1, _text + adrp x2, init_pg_dir + adrp x3, init_pg_end + bic x4, x2, #SWAPPER_BLOCK_SIZE - 1 + mov_q x5, SWAPPER_RW_MMUFLAGS + mov x6, #SWAPPER_BLOCK_SHIFT + bl remap_region + + /* Remap the FDT after the kernel image */ + adrp x1, _text + adrp x22, _end + SWAPPER_BLOCK_SIZE + bic x2, x22, #SWAPPER_BLOCK_SIZE - 1 + bfi x22, x21, #0, #SWAPPER_BLOCK_SHIFT // remapped FDT address + add x3, x2, #MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE + bic x4, x21, #SWAPPER_BLOCK_SIZE - 1 + mov_q x5, SWAPPER_RW_MMUFLAGS + mov x6, #SWAPPER_BLOCK_SHIFT + bl remap_region + + /* + * Since the page tables have been populated with non-cacheable + * accesses (MMU disabled), invalidate those tables again to + * remove any speculatively loaded cache lines. + */ + cbnz x19, 0f // skip cache invalidation if MMU is on + dmb sy + + adrp x0, init_idmap_pg_dir + adrp x1, init_idmap_pg_end + bl dcache_inval_poc +0: ret x28 +SYM_FUNC_END(create_idmap) + +SYM_FUNC_START_LOCAL(create_kernel_mapping) + adrp x0, init_pg_dir + mov_q x5, KIMAGE_VADDR // compile time __va(_text) +#ifdef CONFIG_RELOCATABLE + add x5, x5, x23 // add KASLR displacement +#endif + adrp x6, _end // runtime __pa(_end) + adrp x3, _text // runtime __pa(_text) + sub x6, x6, x3 // _end - _text + add x6, x6, x5 // runtime __va(_end) + mov_q x7, SWAPPER_RW_MMUFLAGS + + map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14 + + dsb ishst // sync with page table walker + ret +SYM_FUNC_END(create_kernel_mapping) + + /* + * Initialize CPU registers with task-specific and cpu-specific context. + * + * Create a final frame record at task_pt_regs(current)->stackframe, so + * that the unwinder can identify the final frame record of any task by + * its location in the task stack. We reserve the entire pt_regs space + * for consistency with user tasks and kthreads. + */ + .macro init_cpu_task tsk, tmp1, tmp2 + msr sp_el0, \tsk + + ldr \tmp1, [\tsk, #TSK_STACK] + add sp, \tmp1, #THREAD_SIZE + sub sp, sp, #PT_REGS_SIZE + + stp xzr, xzr, [sp, #S_STACKFRAME] + add x29, sp, #S_STACKFRAME + + scs_load_current + + adr_l \tmp1, __per_cpu_offset + ldr w\tmp2, [\tsk, #TSK_TI_CPU] + ldr \tmp1, [\tmp1, \tmp2, lsl #3] + set_this_cpu_offset \tmp1 + .endm + +/* + * The following fragment of code is executed with the MMU enabled. + * + * x0 = __pa(KERNEL_START) + */ +SYM_FUNC_START_LOCAL(__primary_switched) + adr_l x4, init_task + init_cpu_task x4, x5, x6 + + adr_l x8, vectors // load VBAR_EL1 with virtual + msr vbar_el1, x8 // vector table address + isb + + stp x29, x30, [sp, #-16]! + mov x29, sp + + str_l x21, __fdt_pointer, x5 // Save FDT pointer + + ldr_l x4, kimage_vaddr // Save the offset between + sub x4, x4, x0 // the kernel virtual and + str_l x4, kimage_voffset, x5 // physical mappings + + mov x0, x20 + bl set_cpu_boot_mode_flag + + // Clear BSS + adr_l x0, __bss_start + mov x1, xzr + adr_l x2, __bss_stop + sub x2, x2, x0 + bl __pi_memset + dsb ishst // Make zero page visible to PTW + +#if VA_BITS > 48 + adr_l x8, vabits_actual // Set this early so KASAN early init + str x25, [x8] // ... observes the correct value + dc civac, x8 // Make visible to booting secondaries +#endif + +#ifdef CONFIG_RANDOMIZE_BASE + adrp x5, memstart_offset_seed // Save KASLR linear map seed + strh w24, [x5, :lo12:memstart_offset_seed] +#endif +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + bl kasan_early_init +#endif + mov x0, x21 // pass FDT address in x0 + bl early_fdt_map // Try mapping the FDT early + mov x0, x20 // pass the full boot status + bl init_feature_override // Parse cpu feature overrides +#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS + bl scs_patch_vmlinux +#endif + mov x0, x20 + bl finalise_el2 // Prefer VHE if possible + ldp x29, x30, [sp], #16 + bl start_kernel + ASM_BUG() +SYM_FUNC_END(__primary_switched) + +/* + * end early head section, begin head code that is also used for + * hotplug and needs to have the same protections as the text region + */ + .section ".idmap.text","a" + +/* + * Starting from EL2 or EL1, configure the CPU to execute at the highest + * reachable EL supported by the kernel in a chosen default state. If dropping + * from EL2 to EL1, configure EL2 before configuring EL1. + * + * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if + * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET. + * + * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if + * booted in EL1 or EL2 respectively, with the top 32 bits containing + * potential context flags. These flags are *not* stored in __boot_cpu_mode. + * + * x0: whether we are being called from the primary boot path with the MMU on + */ +SYM_FUNC_START(init_kernel_el) + mrs x1, CurrentEL + cmp x1, #CurrentEL_EL2 + b.eq init_el2 + +SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) + mov_q x0, INIT_SCTLR_EL1_MMU_OFF + pre_disable_mmu_workaround + msr sctlr_el1, x0 + isb + mov_q x0, INIT_PSTATE_EL1 + msr spsr_el1, x0 + msr elr_el1, lr + mov w0, #BOOT_CPU_MODE_EL1 + eret + +SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) + msr elr_el2, lr + + // clean all HYP code to the PoC if we booted at EL2 with the MMU on + cbz x0, 0f + adrp x0, __hyp_idmap_text_start + adr_l x1, __hyp_text_end + adr_l x2, dcache_clean_poc + blr x2 +0: + mov_q x0, HCR_HOST_NVHE_FLAGS + msr hcr_el2, x0 + isb + + init_el2_state + + /* Hypervisor stub */ + adr_l x0, __hyp_stub_vectors + msr vbar_el2, x0 + isb + + mov_q x1, INIT_SCTLR_EL1_MMU_OFF + + /* + * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, + * making it impossible to start in nVHE mode. Is that + * compliant with the architecture? Absolutely not! + */ + mrs x0, hcr_el2 + and x0, x0, #HCR_E2H + cbz x0, 1f + + /* Set a sane SCTLR_EL1, the VHE way */ + pre_disable_mmu_workaround + msr_s SYS_SCTLR_EL12, x1 + mov x2, #BOOT_CPU_FLAG_E2H + b 2f + +1: + pre_disable_mmu_workaround + msr sctlr_el1, x1 + mov x2, xzr +2: + __init_el2_nvhe_prepare_eret + + mov w0, #BOOT_CPU_MODE_EL2 + orr x0, x0, x2 + eret +SYM_FUNC_END(init_kernel_el) + + /* + * This provides a "holding pen" for platforms to hold all secondary + * cores are held until we're ready for them to initialise. + */ +SYM_FUNC_START(secondary_holding_pen) + mov x0, xzr + bl init_kernel_el // w0=cpu_boot_mode + mrs x2, mpidr_el1 + mov_q x1, MPIDR_HWID_BITMASK + and x2, x2, x1 + adr_l x3, secondary_holding_pen_release +pen: ldr x4, [x3] + cmp x4, x2 + b.eq secondary_startup + wfe + b pen +SYM_FUNC_END(secondary_holding_pen) + + /* + * Secondary entry point that jumps straight into the kernel. Only to + * be used where CPUs are brought online dynamically by the kernel. + */ +SYM_FUNC_START(secondary_entry) + mov x0, xzr + bl init_kernel_el // w0=cpu_boot_mode + b secondary_startup +SYM_FUNC_END(secondary_entry) + +SYM_FUNC_START_LOCAL(secondary_startup) + /* + * Common entry point for secondary CPUs. + */ + mov x20, x0 // preserve boot mode + bl __cpu_secondary_check52bitva +#if VA_BITS > 48 + ldr_l x0, vabits_actual +#endif + bl __cpu_setup // initialise processor + adrp x1, swapper_pg_dir + adrp x2, idmap_pg_dir + bl __enable_mmu + ldr x8, =__secondary_switched + br x8 +SYM_FUNC_END(secondary_startup) + + .text +SYM_FUNC_START_LOCAL(__secondary_switched) + mov x0, x20 + bl set_cpu_boot_mode_flag + + mov x0, x20 + bl finalise_el2 + + str_l xzr, __early_cpu_boot_status, x3 + adr_l x5, vectors + msr vbar_el1, x5 + isb + + adr_l x0, secondary_data + ldr x2, [x0, #CPU_BOOT_TASK] + cbz x2, __secondary_too_slow + + init_cpu_task x2, x1, x3 + +#ifdef CONFIG_ARM64_PTR_AUTH + ptrauth_keys_init_cpu x2, x3, x4, x5 +#endif + + bl secondary_start_kernel + ASM_BUG() +SYM_FUNC_END(__secondary_switched) + +SYM_FUNC_START_LOCAL(__secondary_too_slow) + wfe + wfi + b __secondary_too_slow +SYM_FUNC_END(__secondary_too_slow) + +/* + * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed + * in w0. See arch/arm64/include/asm/virt.h for more info. + */ +SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) + adr_l x1, __boot_cpu_mode + cmp w0, #BOOT_CPU_MODE_EL2 + b.ne 1f + add x1, x1, #4 +1: str w0, [x1] // Save CPU boot mode + ret +SYM_FUNC_END(set_cpu_boot_mode_flag) + +/* + * The booting CPU updates the failed status @__early_cpu_boot_status, + * with MMU turned off. + * + * update_early_cpu_boot_status tmp, status + * - Corrupts tmp1, tmp2 + * - Writes 'status' to __early_cpu_boot_status and makes sure + * it is committed to memory. + */ + + .macro update_early_cpu_boot_status status, tmp1, tmp2 + mov \tmp2, #\status + adr_l \tmp1, __early_cpu_boot_status + str \tmp2, [\tmp1] + dmb sy + dc ivac, \tmp1 // Invalidate potentially stale cache line + .endm + +/* + * Enable the MMU. + * + * x0 = SCTLR_EL1 value for turning on the MMU. + * x1 = TTBR1_EL1 value + * x2 = ID map root table address + * + * Returns to the caller via x30/lr. This requires the caller to be covered + * by the .idmap.text section. + * + * Checks if the selected granule size is supported by the CPU. + * If it isn't, park the CPU + */ + .section ".idmap.text","a" +SYM_FUNC_START(__enable_mmu) + mrs x3, ID_AA64MMFR0_EL1 + ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 + cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN + b.lt __no_granule_support + cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX + b.gt __no_granule_support + phys_to_ttbr x2, x2 + msr ttbr0_el1, x2 // load TTBR0 + load_ttbr1 x1, x1, x3 + + set_sctlr_el1 x0 + + ret +SYM_FUNC_END(__enable_mmu) + +SYM_FUNC_START(__cpu_secondary_check52bitva) +#if VA_BITS > 48 + ldr_l x0, vabits_actual + cmp x0, #52 + b.ne 2f + + mrs_s x0, SYS_ID_AA64MMFR2_EL1 + and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK + cbnz x0, 2f + + update_early_cpu_boot_status \ + CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 +1: wfe + wfi + b 1b + +#endif +2: ret +SYM_FUNC_END(__cpu_secondary_check52bitva) + +SYM_FUNC_START_LOCAL(__no_granule_support) + /* Indicate that this CPU can't boot and is stuck in the kernel */ + update_early_cpu_boot_status \ + CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 +1: + wfe + wfi + b 1b +SYM_FUNC_END(__no_granule_support) + +#ifdef CONFIG_RELOCATABLE +SYM_FUNC_START_LOCAL(__relocate_kernel) + /* + * Iterate over each entry in the relocation table, and apply the + * relocations in place. + */ + adr_l x9, __rela_start + adr_l x10, __rela_end + mov_q x11, KIMAGE_VADDR // default virtual offset + add x11, x11, x23 // actual virtual offset + +0: cmp x9, x10 + b.hs 1f + ldp x12, x13, [x9], #24 + ldr x14, [x9, #-8] + cmp w13, #R_AARCH64_RELATIVE + b.ne 0b + add x14, x14, x23 // relocate + str x14, [x12, x23] + b 0b + +1: +#ifdef CONFIG_RELR + /* + * Apply RELR relocations. + * + * RELR is a compressed format for storing relative relocations. The + * encoded sequence of entries looks like: + * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] + * + * i.e. start with an address, followed by any number of bitmaps. The + * address entry encodes 1 relocation. The subsequent bitmap entries + * encode up to 63 relocations each, at subsequent offsets following + * the last address entry. + * + * The bitmap entries must have 1 in the least significant bit. The + * assumption here is that an address cannot have 1 in lsb. Odd + * addresses are not supported. Any odd addresses are stored in the RELA + * section, which is handled above. + * + * Excluding the least significant bit in the bitmap, each non-zero + * bit in the bitmap represents a relocation to be applied to + * a corresponding machine word that follows the base address + * word. The second least significant bit represents the machine + * word immediately following the initial address, and each bit + * that follows represents the next word, in linear order. As such, + * a single bitmap can encode up to 63 relocations in a 64-bit object. + * + * In this implementation we store the address of the next RELR table + * entry in x9, the address being relocated by the current address or + * bitmap entry in x13 and the address being relocated by the current + * bit in x14. + */ + adr_l x9, __relr_start + adr_l x10, __relr_end + +2: cmp x9, x10 + b.hs 7f + ldr x11, [x9], #8 + tbnz x11, #0, 3f // branch to handle bitmaps + add x13, x11, x23 + ldr x12, [x13] // relocate address entry + add x12, x12, x23 + str x12, [x13], #8 // adjust to start of bitmap + b 2b + +3: mov x14, x13 +4: lsr x11, x11, #1 + cbz x11, 6f + tbz x11, #0, 5f // skip bit if not set + ldr x12, [x14] // relocate bit + add x12, x12, x23 + str x12, [x14] + +5: add x14, x14, #8 // move to next bit's address + b 4b + +6: /* + * Move to the next bitmap's address. 8 is the word size, and 63 is the + * number of significant bits in a bitmap entry. + */ + add x13, x13, #(8 * 63) + b 2b + +7: +#endif + ret + +SYM_FUNC_END(__relocate_kernel) +#endif + +SYM_FUNC_START_LOCAL(__primary_switch) + adrp x1, reserved_pg_dir + adrp x2, init_idmap_pg_dir + bl __enable_mmu +#ifdef CONFIG_RELOCATABLE + adrp x23, KERNEL_START + and x23, x23, MIN_KIMG_ALIGN - 1 +#ifdef CONFIG_RANDOMIZE_BASE + mov x0, x22 + adrp x1, init_pg_end + mov sp, x1 + mov x29, xzr + bl __pi_kaslr_early_init + and x24, x0, #SZ_2M - 1 // capture memstart offset seed + bic x0, x0, #SZ_2M - 1 + orr x23, x23, x0 // record kernel offset +#endif +#endif + bl clear_page_tables + bl create_kernel_mapping + + adrp x1, init_pg_dir + load_ttbr1 x1, x1, x2 +#ifdef CONFIG_RELOCATABLE + bl __relocate_kernel +#endif + ldr x8, =__primary_switched + adrp x0, KERNEL_START // __pa(KERNEL_START) + br x8 +SYM_FUNC_END(__primary_switch) diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S new file mode 100644 index 0000000000..0e1d9c3c6a --- /dev/null +++ b/arch/arm64/kernel/hibernate-asm.S @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hibernate low-level support + * + * Copyright (C) 2016 ARM Ltd. + * Author: James Morse + */ +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * Resume from hibernate + * + * Loads temporary page tables then restores the memory image. + * Finally branches to cpu_resume() to restore the state saved by + * swsusp_arch_suspend(). + * + * Because this code has to be copied to a 'safe' page, it can't call out to + * other functions by PC-relative address. Also remember that it may be + * mid-way through over-writing other functions. For this reason it contains + * code from caches_clean_inval_pou() and uses the copy_page() macro. + * + * This 'safe' page is mapped via ttbr0, and executed from there. This function + * switches to a copy of the linear map in ttbr1, performs the restore, then + * switches ttbr1 to the original kernel's swapper_pg_dir. + * + * All of memory gets written to, including code. We need to clean the kernel + * text to the Point of Coherence (PoC) before secondary cores can be booted. + * Because the kernel modules and executable pages mapped to user space are + * also written as data, we clean all pages we touch to the Point of + * Unification (PoU). + * + * x0: physical address of temporary page tables + * x1: physical address of swapper page tables + * x2: address of cpu_resume + * x3: linear map address of restore_pblist in the current kernel + * x4: physical address of __hyp_stub_vectors, or 0 + * x5: physical address of a zero page that remains zero after resume + */ +.pushsection ".hibernate_exit.text", "ax" +SYM_CODE_START(swsusp_arch_suspend_exit) + /* + * We execute from ttbr0, change ttbr1 to our copied linear map tables + * with a break-before-make via the zero page + */ + break_before_make_ttbr_switch x5, x0, x6, x8 + + mov x21, x1 + mov x30, x2 + mov x24, x4 + mov x25, x5 + + /* walk the restore_pblist and use copy_page() to over-write memory */ + mov x19, x3 + +1: ldr x10, [x19, #HIBERN_PBE_ORIG] + mov x0, x10 + ldr x1, [x19, #HIBERN_PBE_ADDR] + + copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 + + add x1, x10, #PAGE_SIZE + /* Clean the copied page to PoU - based on caches_clean_inval_pou() */ + raw_dcache_line_size x2, x3 + sub x3, x2, #1 + bic x4, x10, x3 +2: /* clean D line / unified line */ +alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE + add x4, x4, x2 + cmp x4, x1 + b.lo 2b + + ldr x19, [x19, #HIBERN_PBE_NEXT] + cbnz x19, 1b + dsb ish /* wait for PoU cleaning to finish */ + + /* switch to the restored kernels page tables */ + break_before_make_ttbr_switch x25, x21, x6, x8 + + ic ialluis + dsb ish + isb + + cbz x24, 3f /* Do we need to re-initialise EL2? */ + hvc #0 +3: ret +SYM_CODE_END(swsusp_arch_suspend_exit) +.popsection diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c new file mode 100644 index 0000000000..02870beb27 --- /dev/null +++ b/arch/arm64/kernel/hibernate.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0-only +/*: + * Hibernate support specific for ARM64 + * + * Derived from work on ARM hibernation support by: + * + * Ubuntu project, hibernation support for mach-dove + * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) + * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) + * Copyright (C) 2006 Rafael J. Wysocki + */ +#define pr_fmt(x) "hibernate: " x +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Hibernate core relies on this value being 0 on resume, and marks it + * __nosavedata assuming it will keep the resume kernel's '0' value. This + * doesn't happen with either KASLR. + * + * defined as "__visible int in_suspend __nosavedata" in + * kernel/power/hibernate.c + */ +extern int in_suspend; + +/* Do we need to reset el2? */ +#define el2_reset_needed() (is_hyp_nvhe()) + +/* hyp-stub vectors, used to restore el2 during resume from hibernate. */ +extern char __hyp_stub_vectors[]; + +/* + * The logical cpu number we should resume on, initialised to a non-cpu + * number. + */ +static int sleep_cpu = -EINVAL; + +/* + * Values that may not change over hibernate/resume. We put the build number + * and date in here so that we guarantee not to resume with a different + * kernel. + */ +struct arch_hibernate_hdr_invariants { + char uts_version[__NEW_UTS_LEN + 1]; +}; + +/* These values need to be know across a hibernate/restore. */ +static struct arch_hibernate_hdr { + struct arch_hibernate_hdr_invariants invariants; + + /* These are needed to find the relocated kernel if built with kaslr */ + phys_addr_t ttbr1_el1; + void (*reenter_kernel)(void); + + /* + * We need to know where the __hyp_stub_vectors are after restore to + * re-configure el2. + */ + phys_addr_t __hyp_stub_vectors; + + u64 sleep_cpu_mpidr; +} resume_hdr; + +static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) +{ + memset(i, 0, sizeof(*i)); + memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); +} + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); + unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); + + return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) || + crash_is_nosave(pfn); +} + +void notrace save_processor_state(void) +{ +} + +void notrace restore_processor_state(void) +{ +} + +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct arch_hibernate_hdr *hdr = addr; + + if (max_size < sizeof(*hdr)) + return -EOVERFLOW; + + arch_hdr_invariants(&hdr->invariants); + hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir); + hdr->reenter_kernel = _cpu_resume; + + /* We can't use __hyp_get_vectors() because kvm may still be loaded */ + if (el2_reset_needed()) + hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors); + else + hdr->__hyp_stub_vectors = 0; + + /* Save the mpidr of the cpu we called cpu_suspend() on... */ + if (sleep_cpu < 0) { + pr_err("Failing to hibernate on an unknown CPU.\n"); + return -ENODEV; + } + hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu); + pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu, + hdr->sleep_cpu_mpidr); + + return 0; +} +EXPORT_SYMBOL(arch_hibernation_header_save); + +int arch_hibernation_header_restore(void *addr) +{ + int ret; + struct arch_hibernate_hdr_invariants invariants; + struct arch_hibernate_hdr *hdr = addr; + + arch_hdr_invariants(&invariants); + if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { + pr_crit("Hibernate image not generated by this kernel!\n"); + return -EINVAL; + } + + sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr); + pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu, + hdr->sleep_cpu_mpidr); + if (sleep_cpu < 0) { + pr_crit("Hibernated on a CPU not known to this kernel!\n"); + sleep_cpu = -EINVAL; + return -EINVAL; + } + + ret = bringup_hibernate_cpu(sleep_cpu); + if (ret) { + sleep_cpu = -EINVAL; + return ret; + } + + resume_hdr = *hdr; + + return 0; +} +EXPORT_SYMBOL(arch_hibernation_header_restore); + +static void *hibernate_page_alloc(void *arg) +{ + return (void *)get_safe_page((__force gfp_t)(unsigned long)arg); +} + +/* + * Copies length bytes, starting at src_start into an new page, + * perform cache maintenance, then maps it at the specified address low + * address as executable. + * + * This is used by hibernate to copy the code it needs to execute when + * overwriting the kernel text. This function generates a new set of page + * tables, which it loads into ttbr0. + * + * Length is provided as we probably only want 4K of data, even on a 64K + * page system. + */ +static int create_safe_exec_page(void *src_start, size_t length, + phys_addr_t *phys_dst_addr) +{ + struct trans_pgd_info trans_info = { + .trans_alloc_page = hibernate_page_alloc, + .trans_alloc_arg = (__force void *)GFP_ATOMIC, + }; + + void *page = (void *)get_safe_page(GFP_ATOMIC); + phys_addr_t trans_ttbr0; + unsigned long t0sz; + int rc; + + if (!page) + return -ENOMEM; + + memcpy(page, src_start, length); + caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length); + rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page); + if (rc) + return rc; + + cpu_install_ttbr0(trans_ttbr0, t0sz); + *phys_dst_addr = virt_to_phys(page); + + return 0; +} + +#ifdef CONFIG_ARM64_MTE + +static DEFINE_XARRAY(mte_pages); + +static int save_tags(struct page *page, unsigned long pfn) +{ + void *tag_storage, *ret; + + tag_storage = mte_allocate_tag_storage(); + if (!tag_storage) + return -ENOMEM; + + mte_save_page_tags(page_address(page), tag_storage); + + ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL); + if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { + mte_free_tag_storage(tag_storage); + return xa_err(ret); + } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) { + mte_free_tag_storage(ret); + } + + return 0; +} + +static void swsusp_mte_free_storage(void) +{ + XA_STATE(xa_state, &mte_pages, 0); + void *tags; + + xa_lock(&mte_pages); + xas_for_each(&xa_state, tags, ULONG_MAX) { + mte_free_tag_storage(tags); + } + xa_unlock(&mte_pages); + + xa_destroy(&mte_pages); +} + +static int swsusp_mte_save_tags(void) +{ + struct zone *zone; + unsigned long pfn, max_zone_pfn; + int ret = 0; + int n = 0; + + if (!system_supports_mte()) + return 0; + + for_each_populated_zone(zone) { + max_zone_pfn = zone_end_pfn(zone); + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { + struct page *page = pfn_to_online_page(pfn); + + if (!page) + continue; + + if (!page_mte_tagged(page)) + continue; + + ret = save_tags(page, pfn); + if (ret) { + swsusp_mte_free_storage(); + goto out; + } + + n++; + } + } + pr_info("Saved %d MTE pages\n", n); + +out: + return ret; +} + +static void swsusp_mte_restore_tags(void) +{ + XA_STATE(xa_state, &mte_pages, 0); + int n = 0; + void *tags; + + xa_lock(&mte_pages); + xas_for_each(&xa_state, tags, ULONG_MAX) { + unsigned long pfn = xa_state.xa_index; + struct page *page = pfn_to_online_page(pfn); + + mte_restore_page_tags(page_address(page), tags); + + mte_free_tag_storage(tags); + n++; + } + xa_unlock(&mte_pages); + + pr_info("Restored %d MTE pages\n", n); + + xa_destroy(&mte_pages); +} + +#else /* CONFIG_ARM64_MTE */ + +static int swsusp_mte_save_tags(void) +{ + return 0; +} + +static void swsusp_mte_restore_tags(void) +{ +} + +#endif /* CONFIG_ARM64_MTE */ + +int swsusp_arch_suspend(void) +{ + int ret = 0; + unsigned long flags; + struct sleep_stack_data state; + + if (cpus_are_stuck_in_kernel()) { + pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); + return -EBUSY; + } + + flags = local_daif_save(); + + if (__cpu_suspend_enter(&state)) { + /* make the crash dump kernel image visible/saveable */ + crash_prepare_suspend(); + + ret = swsusp_mte_save_tags(); + if (ret) + return ret; + + sleep_cpu = smp_processor_id(); + ret = swsusp_save(); + } else { + /* Clean kernel core startup/idle code to PoC*/ + dcache_clean_inval_poc((unsigned long)__mmuoff_data_start, + (unsigned long)__mmuoff_data_end); + dcache_clean_inval_poc((unsigned long)__idmap_text_start, + (unsigned long)__idmap_text_end); + + /* Clean kvm setup code to PoC? */ + if (el2_reset_needed()) { + dcache_clean_inval_poc( + (unsigned long)__hyp_idmap_text_start, + (unsigned long)__hyp_idmap_text_end); + dcache_clean_inval_poc((unsigned long)__hyp_text_start, + (unsigned long)__hyp_text_end); + } + + swsusp_mte_restore_tags(); + + /* make the crash dump kernel image protected again */ + crash_post_resume(); + + /* + * Tell the hibernation core that we've just restored + * the memory + */ + in_suspend = 0; + + sleep_cpu = -EINVAL; + __cpu_suspend_exit(); + + /* + * Just in case the boot kernel did turn the SSBD + * mitigation off behind our back, let's set the state + * to what we expect it to be. + */ + spectre_v4_enable_mitigation(NULL); + } + + local_daif_restore(flags); + + return ret; +} + +/* + * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). + * + * Memory allocated by get_safe_page() will be dealt with by the hibernate code, + * we don't need to free it here. + */ +int swsusp_arch_resume(void) +{ + int rc; + void *zero_page; + size_t exit_size; + pgd_t *tmp_pg_dir; + phys_addr_t el2_vectors; + void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, + void *, phys_addr_t, phys_addr_t); + struct trans_pgd_info trans_info = { + .trans_alloc_page = hibernate_page_alloc, + .trans_alloc_arg = (void *)GFP_ATOMIC, + }; + + /* + * Restoring the memory image will overwrite the ttbr1 page tables. + * Create a second copy of just the linear map, and use this when + * restoring. + */ + rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET, + PAGE_END); + if (rc) + return rc; + + /* + * We need a zero page that is zero before & after resume in order + * to break before make on the ttbr1 page tables. + */ + zero_page = (void *)get_safe_page(GFP_ATOMIC); + if (!zero_page) { + pr_err("Failed to allocate zero page.\n"); + return -ENOMEM; + } + + if (el2_reset_needed()) { + rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors); + if (rc) { + pr_err("Failed to setup el2 vectors\n"); + return rc; + } + } + + exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; + /* + * Copy swsusp_arch_suspend_exit() to a safe page. This will generate + * a new set of ttbr0 page tables and load them. + */ + rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, + (phys_addr_t *)&hibernate_exit); + if (rc) { + pr_err("Failed to create safe executable page for hibernate_exit code.\n"); + return rc; + } + + /* + * KASLR will cause the el2 vectors to be in a different location in + * the resumed kernel. Load hibernate's temporary copy into el2. + * + * We can skip this step if we booted at EL1, or are running with VHE. + */ + if (el2_reset_needed()) + __hyp_set_vectors(el2_vectors); + + hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, + resume_hdr.reenter_kernel, restore_pblist, + resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); + + return 0; +} + +int hibernate_resume_nonboot_cpu_disable(void) +{ + if (sleep_cpu < 0) { + pr_err("Failing to resume from hibernate on an unknown CPU.\n"); + return -ENODEV; + } + + return freeze_secondary_cpus(sleep_cpu); +} diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c new file mode 100644 index 0000000000..35225632d7 --- /dev/null +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -0,0 +1,1023 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + * + * Copyright (C) 2012 ARM Limited + * Author: Will Deacon + */ + +#define pr_fmt(fmt) "hw-breakpoint: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Breakpoint currently in use for each BRP. */ +static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); + +/* Watchpoint currently in use for each WRP. */ +static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); + +/* Currently stepping a per-CPU kernel breakpoint. */ +static DEFINE_PER_CPU(int, stepping_kernel_bp); + +/* Number of BRP/WRP registers on this CPU. */ +static int core_num_brps; +static int core_num_wrps; + +int hw_breakpoint_slots(int type) +{ + /* + * We can be called early, so don't rely on + * our static variables being initialised. + */ + switch (type) { + case TYPE_INST: + return get_num_brps(); + case TYPE_DATA: + return get_num_wrps(); + default: + pr_warn("unknown slot type: %d\n", type); + return 0; + } +} + +#define READ_WB_REG_CASE(OFF, N, REG, VAL) \ + case (OFF + N): \ + AARCH64_DBG_READ(N, REG, VAL); \ + break + +#define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \ + case (OFF + N): \ + AARCH64_DBG_WRITE(N, REG, VAL); \ + break + +#define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \ + READ_WB_REG_CASE(OFF, 0, REG, VAL); \ + READ_WB_REG_CASE(OFF, 1, REG, VAL); \ + READ_WB_REG_CASE(OFF, 2, REG, VAL); \ + READ_WB_REG_CASE(OFF, 3, REG, VAL); \ + READ_WB_REG_CASE(OFF, 4, REG, VAL); \ + READ_WB_REG_CASE(OFF, 5, REG, VAL); \ + READ_WB_REG_CASE(OFF, 6, REG, VAL); \ + READ_WB_REG_CASE(OFF, 7, REG, VAL); \ + READ_WB_REG_CASE(OFF, 8, REG, VAL); \ + READ_WB_REG_CASE(OFF, 9, REG, VAL); \ + READ_WB_REG_CASE(OFF, 10, REG, VAL); \ + READ_WB_REG_CASE(OFF, 11, REG, VAL); \ + READ_WB_REG_CASE(OFF, 12, REG, VAL); \ + READ_WB_REG_CASE(OFF, 13, REG, VAL); \ + READ_WB_REG_CASE(OFF, 14, REG, VAL); \ + READ_WB_REG_CASE(OFF, 15, REG, VAL) + +#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \ + WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 15, REG, VAL) + +static u64 read_wb_reg(int reg, int n) +{ + u64 val = 0; + + switch (reg + n) { + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); + default: + pr_warn("attempt to read from unknown breakpoint register %d\n", n); + } + + return val; +} +NOKPROBE_SYMBOL(read_wb_reg); + +static void write_wb_reg(int reg, int n, u64 val) +{ + switch (reg + n) { + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); + default: + pr_warn("attempt to write to unknown breakpoint register %d\n", n); + } + isb(); +} +NOKPROBE_SYMBOL(write_wb_reg); + +/* + * Convert a breakpoint privilege level to the corresponding exception + * level. + */ +static enum dbg_active_el debug_exception_level(int privilege) +{ + switch (privilege) { + case AARCH64_BREAKPOINT_EL0: + return DBG_ACTIVE_EL0; + case AARCH64_BREAKPOINT_EL1: + return DBG_ACTIVE_EL1; + default: + pr_warn("invalid breakpoint privilege level %d\n", privilege); + return -EINVAL; + } +} +NOKPROBE_SYMBOL(debug_exception_level); + +enum hw_breakpoint_ops { + HW_BREAKPOINT_INSTALL, + HW_BREAKPOINT_UNINSTALL, + HW_BREAKPOINT_RESTORE +}; + +static int is_compat_bp(struct perf_event *bp) +{ + struct task_struct *tsk = bp->hw.target; + + /* + * tsk can be NULL for per-cpu (non-ptrace) breakpoints. + * In this case, use the native interface, since we don't have + * the notion of a "compat CPU" and could end up relying on + * deprecated behaviour if we use unaligned watchpoints in + * AArch64 state. + */ + return tsk && is_compat_thread(task_thread_info(tsk)); +} + +/** + * hw_breakpoint_slot_setup - Find and setup a perf slot according to + * operations + * + * @slots: pointer to array of slots + * @max_slots: max number of slots + * @bp: perf_event to setup + * @ops: operation to be carried out on the slot + * + * Return: + * slot index on success + * -ENOSPC if no slot is available/matches + * -EINVAL on wrong operations parameter + */ +static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, + struct perf_event *bp, + enum hw_breakpoint_ops ops) +{ + int i; + struct perf_event **slot; + + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + switch (ops) { + case HW_BREAKPOINT_INSTALL: + if (!*slot) { + *slot = bp; + return i; + } + break; + case HW_BREAKPOINT_UNINSTALL: + if (*slot == bp) { + *slot = NULL; + return i; + } + break; + case HW_BREAKPOINT_RESTORE: + if (*slot == bp) + return i; + break; + default: + pr_warn_once("Unhandled hw breakpoint ops %d\n", ops); + return -EINVAL; + } + } + return -ENOSPC; +} + +static int hw_breakpoint_control(struct perf_event *bp, + enum hw_breakpoint_ops ops) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct perf_event **slots; + struct debug_info *debug_info = ¤t->thread.debug; + int i, max_slots, ctrl_reg, val_reg, reg_enable; + enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege); + u32 ctrl; + + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + /* Breakpoint */ + ctrl_reg = AARCH64_DBG_REG_BCR; + val_reg = AARCH64_DBG_REG_BVR; + slots = this_cpu_ptr(bp_on_reg); + max_slots = core_num_brps; + reg_enable = !debug_info->bps_disabled; + } else { + /* Watchpoint */ + ctrl_reg = AARCH64_DBG_REG_WCR; + val_reg = AARCH64_DBG_REG_WVR; + slots = this_cpu_ptr(wp_on_reg); + max_slots = core_num_wrps; + reg_enable = !debug_info->wps_disabled; + } + + i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); + + if (WARN_ONCE(i < 0, "Can't find any breakpoint slot")) + return i; + + switch (ops) { + case HW_BREAKPOINT_INSTALL: + /* + * Ensure debug monitors are enabled at the correct exception + * level. + */ + enable_debug_monitors(dbg_el); + fallthrough; + case HW_BREAKPOINT_RESTORE: + /* Setup the address register. */ + write_wb_reg(val_reg, i, info->address); + + /* Setup the control register. */ + ctrl = encode_ctrl_reg(info->ctrl); + write_wb_reg(ctrl_reg, i, + reg_enable ? ctrl | 0x1 : ctrl & ~0x1); + break; + case HW_BREAKPOINT_UNINSTALL: + /* Reset the control register. */ + write_wb_reg(ctrl_reg, i, 0); + + /* + * Release the debug monitors for the correct exception + * level. + */ + disable_debug_monitors(dbg_el); + break; + } + + return 0; +} + +/* + * Install a perf counter breakpoint. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL); +} + +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL); +} + +static int get_hbp_len(u8 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case ARM_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case ARM_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case ARM_BREAKPOINT_LEN_3: + len_in_bytes = 3; + break; + case ARM_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; + case ARM_BREAKPOINT_LEN_5: + len_in_bytes = 5; + break; + case ARM_BREAKPOINT_LEN_6: + len_in_bytes = 6; + break; + case ARM_BREAKPOINT_LEN_7: + len_in_bytes = 7; + break; + case ARM_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; + } + + return len_in_bytes; +} + +/* + * Check whether bp virtual address is in kernel space. + */ +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) +{ + unsigned int len; + unsigned long va; + + va = hw->address; + len = get_hbp_len(hw->ctrl.len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +/* + * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. + * Hopefully this will disappear when ptrace can bypass the conversion + * to generic breakpoint descriptions. + */ +int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, + int *gen_len, int *gen_type, int *offset) +{ + /* Type */ + switch (ctrl.type) { + case ARM_BREAKPOINT_EXECUTE: + *gen_type = HW_BREAKPOINT_X; + break; + case ARM_BREAKPOINT_LOAD: + *gen_type = HW_BREAKPOINT_R; + break; + case ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_W; + break; + case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_RW; + break; + default: + return -EINVAL; + } + + if (!ctrl.len) + return -EINVAL; + *offset = __ffs(ctrl.len); + + /* Len */ + switch (ctrl.len >> *offset) { + case ARM_BREAKPOINT_LEN_1: + *gen_len = HW_BREAKPOINT_LEN_1; + break; + case ARM_BREAKPOINT_LEN_2: + *gen_len = HW_BREAKPOINT_LEN_2; + break; + case ARM_BREAKPOINT_LEN_3: + *gen_len = HW_BREAKPOINT_LEN_3; + break; + case ARM_BREAKPOINT_LEN_4: + *gen_len = HW_BREAKPOINT_LEN_4; + break; + case ARM_BREAKPOINT_LEN_5: + *gen_len = HW_BREAKPOINT_LEN_5; + break; + case ARM_BREAKPOINT_LEN_6: + *gen_len = HW_BREAKPOINT_LEN_6; + break; + case ARM_BREAKPOINT_LEN_7: + *gen_len = HW_BREAKPOINT_LEN_7; + break; + case ARM_BREAKPOINT_LEN_8: + *gen_len = HW_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Construct an arch_hw_breakpoint from a perf_event. + */ +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) +{ + /* Type */ + switch (attr->bp_type) { + case HW_BREAKPOINT_X: + hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; + break; + case HW_BREAKPOINT_R: + hw->ctrl.type = ARM_BREAKPOINT_LOAD; + break; + case HW_BREAKPOINT_W: + hw->ctrl.type = ARM_BREAKPOINT_STORE; + break; + case HW_BREAKPOINT_RW: + hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (attr->bp_len) { + case HW_BREAKPOINT_LEN_1: + hw->ctrl.len = ARM_BREAKPOINT_LEN_1; + break; + case HW_BREAKPOINT_LEN_2: + hw->ctrl.len = ARM_BREAKPOINT_LEN_2; + break; + case HW_BREAKPOINT_LEN_3: + hw->ctrl.len = ARM_BREAKPOINT_LEN_3; + break; + case HW_BREAKPOINT_LEN_4: + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; + break; + case HW_BREAKPOINT_LEN_5: + hw->ctrl.len = ARM_BREAKPOINT_LEN_5; + break; + case HW_BREAKPOINT_LEN_6: + hw->ctrl.len = ARM_BREAKPOINT_LEN_6; + break; + case HW_BREAKPOINT_LEN_7: + hw->ctrl.len = ARM_BREAKPOINT_LEN_7; + break; + case HW_BREAKPOINT_LEN_8: + hw->ctrl.len = ARM_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + /* + * On AArch64, we only permit breakpoints of length 4, whereas + * AArch32 also requires breakpoints of length 2 for Thumb. + * Watchpoints can be of length 1, 2, 4 or 8 bytes. + */ + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + if (is_compat_bp(bp)) { + if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && + hw->ctrl.len != ARM_BREAKPOINT_LEN_4) + return -EINVAL; + } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) { + /* + * FIXME: Some tools (I'm looking at you perf) assume + * that breakpoints should be sizeof(long). This + * is nonsense. For now, we fix up the parameter + * but we should probably return -EINVAL instead. + */ + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; + } + } + + /* Address */ + hw->address = attr->bp_addr; + + /* + * Privilege + * Note that we disallow combined EL0/EL1 breakpoints because + * that would complicate the stepping code. + */ + if (arch_check_bp_in_kernelspace(hw)) + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1; + else + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0; + + /* Enabled? */ + hw->ctrl.enabled = !attr->disabled; + + return 0; +} + +/* + * Validate the arch-specific HW Breakpoint register settings. + */ +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) +{ + int ret; + u64 alignment_mask, offset; + + /* Build the arch_hw_breakpoint. */ + ret = arch_build_bp_info(bp, attr, hw); + if (ret) + return ret; + + /* + * Check address alignment. + * We don't do any clever alignment correction for watchpoints + * because using 64-bit unaligned addresses is deprecated for + * AArch64. + * + * AArch32 tasks expect some simple alignment fixups, so emulate + * that here. + */ + if (is_compat_bp(bp)) { + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) + alignment_mask = 0x7; + else + alignment_mask = 0x3; + offset = hw->address & alignment_mask; + switch (offset) { + case 0: + /* Aligned */ + break; + case 1: + case 2: + /* Allow halfword watchpoints and breakpoints. */ + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) + break; + + fallthrough; + case 3: + /* Allow single byte watchpoint. */ + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; + + fallthrough; + default: + return -EINVAL; + } + } else { + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) + alignment_mask = 0x3; + else + alignment_mask = 0x7; + offset = hw->address & alignment_mask; + } + + hw->address &= ~alignment_mask; + hw->ctrl.len <<= offset; + + /* + * Disallow per-task kernel breakpoints since these would + * complicate the stepping code. + */ + if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) + return -EINVAL; + + return 0; +} + +/* + * Enable/disable all of the breakpoints active at the specified + * exception level at the register level. + * This is used when single-stepping after a breakpoint exception. + */ +static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable) +{ + int i, max_slots, privilege; + u32 ctrl; + struct perf_event **slots; + + switch (reg) { + case AARCH64_DBG_REG_BCR: + slots = this_cpu_ptr(bp_on_reg); + max_slots = core_num_brps; + break; + case AARCH64_DBG_REG_WCR: + slots = this_cpu_ptr(wp_on_reg); + max_slots = core_num_wrps; + break; + default: + return; + } + + for (i = 0; i < max_slots; ++i) { + if (!slots[i]) + continue; + + privilege = counter_arch_bp(slots[i])->ctrl.privilege; + if (debug_exception_level(privilege) != el) + continue; + + ctrl = read_wb_reg(reg, i); + if (enable) + ctrl |= 0x1; + else + ctrl &= ~0x1; + write_wb_reg(reg, i, ctrl); + } +} +NOKPROBE_SYMBOL(toggle_bp_registers); + +/* + * Debug exception handlers. + */ +static int breakpoint_handler(unsigned long unused, unsigned long esr, + struct pt_regs *regs) +{ + int i, step = 0, *kernel_step; + u32 ctrl_reg; + u64 addr, val; + struct perf_event *bp, **slots; + struct debug_info *debug_info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = this_cpu_ptr(bp_on_reg); + addr = instruction_pointer(regs); + debug_info = ¤t->thread.debug; + + for (i = 0; i < core_num_brps; ++i) { + rcu_read_lock(); + + bp = slots[i]; + + if (bp == NULL) + goto unlock; + + /* Check if the breakpoint value matches. */ + val = read_wb_reg(AARCH64_DBG_REG_BVR, i); + if (val != (addr & ~0x3)) + goto unlock; + + /* Possible match, check the byte address select to confirm. */ + ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i); + decode_ctrl_reg(ctrl_reg, &ctrl); + if (!((1 << (addr & 0x3)) & ctrl.len)) + goto unlock; + + counter_arch_bp(bp)->trigger = addr; + perf_bp_event(bp, regs); + + /* Do we need to handle the stepping? */ + if (uses_default_overflow_handler(bp)) + step = 1; +unlock: + rcu_read_unlock(); + } + + if (!step) + return 0; + + if (user_mode(regs)) { + debug_info->bps_disabled = 1; + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0); + + /* If we're already stepping a watchpoint, just return. */ + if (debug_info->wps_disabled) + return 0; + + if (test_thread_flag(TIF_SINGLESTEP)) + debug_info->suspended_step = 1; + else + user_enable_single_step(current); + } else { + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); + kernel_step = this_cpu_ptr(&stepping_kernel_bp); + + if (*kernel_step != ARM_KERNEL_STEP_NONE) + return 0; + + if (kernel_active_single_step()) { + *kernel_step = ARM_KERNEL_STEP_SUSPEND; + } else { + *kernel_step = ARM_KERNEL_STEP_ACTIVE; + kernel_enable_single_step(regs); + } + } + + return 0; +} +NOKPROBE_SYMBOL(breakpoint_handler); + +/* + * Arm64 hardware does not always report a watchpoint hit address that matches + * one of the watchpoints set. It can also report an address "near" the + * watchpoint if a single instruction access both watched and unwatched + * addresses. There is no straight-forward way, short of disassembling the + * offending instruction, to map that address back to the watchpoint. This + * function computes the distance of the memory access from the watchpoint as a + * heuristic for the likelihood that a given access triggered the watchpoint. + * + * See Section D2.10.5 "Determining the memory location that caused a Watchpoint + * exception" of ARMv8 Architecture Reference Manual for details. + * + * The function returns the distance of the address from the bytes watched by + * the watchpoint. In case of an exact match, it returns 0. + */ +static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, + struct arch_hw_breakpoint_ctrl *ctrl) +{ + u64 wp_low, wp_high; + u32 lens, lene; + + addr = untagged_addr(addr); + + lens = __ffs(ctrl->len); + lene = __fls(ctrl->len); + + wp_low = val + lens; + wp_high = val + lene; + if (addr < wp_low) + return wp_low - addr; + else if (addr > wp_high) + return addr - wp_high; + else + return 0; +} + +static int watchpoint_report(struct perf_event *wp, unsigned long addr, + struct pt_regs *regs) +{ + int step = uses_default_overflow_handler(wp); + struct arch_hw_breakpoint *info = counter_arch_bp(wp); + + info->trigger = addr; + + /* + * If we triggered a user watchpoint from a uaccess routine, then + * handle the stepping ourselves since userspace really can't help + * us with this. + */ + if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) + step = 1; + else + perf_bp_event(wp, regs); + + return step; +} + +static int watchpoint_handler(unsigned long addr, unsigned long esr, + struct pt_regs *regs) +{ + int i, step = 0, *kernel_step, access, closest_match = 0; + u64 min_dist = -1, dist; + u32 ctrl_reg; + u64 val; + struct perf_event *wp, **slots; + struct debug_info *debug_info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = this_cpu_ptr(wp_on_reg); + debug_info = ¤t->thread.debug; + + /* + * Find all watchpoints that match the reported address. If no exact + * match is found. Attribute the hit to the closest watchpoint. + */ + rcu_read_lock(); + for (i = 0; i < core_num_wrps; ++i) { + wp = slots[i]; + if (wp == NULL) + continue; + + /* + * Check that the access type matches. + * 0 => load, otherwise => store + */ + access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : + HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) + continue; + + /* Check if the watchpoint value and byte select match. */ + val = read_wb_reg(AARCH64_DBG_REG_WVR, i); + ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i); + decode_ctrl_reg(ctrl_reg, &ctrl); + dist = get_distance_from_watchpoint(addr, val, &ctrl); + if (dist < min_dist) { + min_dist = dist; + closest_match = i; + } + /* Is this an exact match? */ + if (dist != 0) + continue; + + step = watchpoint_report(wp, addr, regs); + } + + /* No exact match found? */ + if (min_dist > 0 && min_dist != -1) + step = watchpoint_report(slots[closest_match], addr, regs); + + rcu_read_unlock(); + + if (!step) + return 0; + + /* + * We always disable EL0 watchpoints because the kernel can + * cause these to fire via an unprivileged access. + */ + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0); + + if (user_mode(regs)) { + debug_info->wps_disabled = 1; + + /* If we're already stepping a breakpoint, just return. */ + if (debug_info->bps_disabled) + return 0; + + if (test_thread_flag(TIF_SINGLESTEP)) + debug_info->suspended_step = 1; + else + user_enable_single_step(current); + } else { + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); + kernel_step = this_cpu_ptr(&stepping_kernel_bp); + + if (*kernel_step != ARM_KERNEL_STEP_NONE) + return 0; + + if (kernel_active_single_step()) { + *kernel_step = ARM_KERNEL_STEP_SUSPEND; + } else { + *kernel_step = ARM_KERNEL_STEP_ACTIVE; + kernel_enable_single_step(regs); + } + } + + return 0; +} +NOKPROBE_SYMBOL(watchpoint_handler); + +/* + * Handle single-step exception. + */ +int reinstall_suspended_bps(struct pt_regs *regs) +{ + struct debug_info *debug_info = ¤t->thread.debug; + int handled_exception = 0, *kernel_step; + + kernel_step = this_cpu_ptr(&stepping_kernel_bp); + + /* + * Called from single-step exception handler. + * Return 0 if execution can resume, 1 if a SIGTRAP should be + * reported. + */ + if (user_mode(regs)) { + if (debug_info->bps_disabled) { + debug_info->bps_disabled = 0; + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1); + handled_exception = 1; + } + + if (debug_info->wps_disabled) { + debug_info->wps_disabled = 0; + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); + handled_exception = 1; + } + + if (handled_exception) { + if (debug_info->suspended_step) { + debug_info->suspended_step = 0; + /* Allow exception handling to fall-through. */ + handled_exception = 0; + } else { + user_disable_single_step(current); + } + } + } else if (*kernel_step != ARM_KERNEL_STEP_NONE) { + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1); + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1); + + if (!debug_info->wps_disabled) + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); + + if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) { + kernel_disable_single_step(); + handled_exception = 1; + } else { + handled_exception = 0; + } + + *kernel_step = ARM_KERNEL_STEP_NONE; + } + + return !handled_exception; +} +NOKPROBE_SYMBOL(reinstall_suspended_bps); + +/* + * Context-switcher for restoring suspended breakpoints. + */ +void hw_breakpoint_thread_switch(struct task_struct *next) +{ + /* + * current next + * disabled: 0 0 => The usual case, NOTIFY_DONE + * 0 1 => Disable the registers + * 1 0 => Enable the registers + * 1 1 => NOTIFY_DONE. per-task bps will + * get taken care of by perf. + */ + + struct debug_info *current_debug_info, *next_debug_info; + + current_debug_info = ¤t->thread.debug; + next_debug_info = &next->thread.debug; + + /* Update breakpoints. */ + if (current_debug_info->bps_disabled != next_debug_info->bps_disabled) + toggle_bp_registers(AARCH64_DBG_REG_BCR, + DBG_ACTIVE_EL0, + !next_debug_info->bps_disabled); + + /* Update watchpoints. */ + if (current_debug_info->wps_disabled != next_debug_info->wps_disabled) + toggle_bp_registers(AARCH64_DBG_REG_WCR, + DBG_ACTIVE_EL0, + !next_debug_info->wps_disabled); +} + +/* + * CPU initialisation. + */ +static int hw_breakpoint_reset(unsigned int cpu) +{ + int i; + struct perf_event **slots; + /* + * When a CPU goes through cold-boot, it does not have any installed + * slot, so it is safe to share the same function for restoring and + * resetting breakpoints; when a CPU is hotplugged in, it goes + * through the slots, which are all empty, hence it just resets control + * and value for debug registers. + * When this function is triggered on warm-boot through a CPU PM + * notifier some slots might be initialized; if so they are + * reprogrammed according to the debug slots content. + */ + for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) { + if (slots[i]) { + hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); + } else { + write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); + } + } + + for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) { + if (slots[i]) { + hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); + } else { + write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); + } + } + + return 0; +} + +/* + * One-time initialisation. + */ +static int __init arch_hw_breakpoint_init(void) +{ + int ret; + + core_num_brps = get_num_brps(); + core_num_wrps = get_num_wrps(); + + pr_info("found %d breakpoint and %d watchpoint registers.\n", + core_num_brps, core_num_wrps); + + /* Register debug fault handlers. */ + hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, + TRAP_HWBKPT, "hw-breakpoint handler"); + hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP, + TRAP_HWBKPT, "hw-watchpoint handler"); + + /* + * Reset the breakpoint resources. We assume that a halting + * debugger will leave the world in a nice state for us. + */ + ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, + "perf/arm64/hw_breakpoint:starting", + hw_breakpoint_reset, NULL); + if (ret) + pr_err("failed to register CPU hotplug notifier: %d\n", ret); + + /* Register cpu_suspend hw breakpoint restore hook */ + cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); + + return ret; +} +arch_initcall(arch_hw_breakpoint_init); + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ +} + +/* + * Dummy function to register with die_notifier. + */ +int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S new file mode 100644 index 0000000000..65f76064c8 --- /dev/null +++ b/arch/arm64/kernel/hyp-stub.S @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hypervisor stub + * + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + + .text + .pushsection .hyp.text, "ax" + + .align 11 + +SYM_CODE_START(__hyp_stub_vectors) + ventry el2_sync_invalid // Synchronous EL2t + ventry el2_irq_invalid // IRQ EL2t + ventry el2_fiq_invalid // FIQ EL2t + ventry el2_error_invalid // Error EL2t + + ventry elx_sync // Synchronous EL2h + ventry el2_irq_invalid // IRQ EL2h + ventry el2_fiq_invalid // FIQ EL2h + ventry el2_error_invalid // Error EL2h + + ventry elx_sync // Synchronous 64-bit EL1 + ventry el1_irq_invalid // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync_invalid // Synchronous 32-bit EL1 + ventry el1_irq_invalid // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +SYM_CODE_END(__hyp_stub_vectors) + + .align 11 + +SYM_CODE_START_LOCAL(elx_sync) + cmp x0, #HVC_SET_VECTORS + b.ne 1f + msr vbar_el2, x1 + b 9f + +1: cmp x0, #HVC_FINALISE_EL2 + b.eq __finalise_el2 + +2: cmp x0, #HVC_SOFT_RESTART + b.ne 3f + mov x0, x2 + mov x2, x4 + mov x4, x1 + mov x1, x3 + br x4 // no return + +3: cmp x0, #HVC_RESET_VECTORS + beq 9f // Nothing to reset! + + /* Someone called kvm_call_hyp() against the hyp-stub... */ + mov_q x0, HVC_STUB_ERR + eret + +9: mov x0, xzr + eret +SYM_CODE_END(elx_sync) + +SYM_CODE_START_LOCAL(__finalise_el2) + finalise_el2_state + + // nVHE? No way! Give me the real thing! + // Sanity check: MMU *must* be off + mrs x1, sctlr_el2 + tbnz x1, #0, 1f + + // Needs to be VHE capable, obviously + check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 0f 1f x1 x2 + +0: // Check whether we only want the hypervisor to run VHE, not the kernel + adr_l x1, arm64_sw_feature_override + ldr x2, [x1, FTR_OVR_VAL_OFFSET] + ldr x1, [x1, FTR_OVR_MASK_OFFSET] + and x2, x2, x1 + ubfx x2, x2, #ARM64_SW_FEATURE_OVERRIDE_HVHE, #4 + cbz x2, 2f + +1: mov_q x0, HVC_STUB_ERR + eret +2: + // Engage the VHE magic! + mov_q x0, HCR_HOST_VHE_FLAGS + msr hcr_el2, x0 + isb + + // Use the EL1 allocated stack, per-cpu offset + mrs x0, sp_el1 + mov sp, x0 + mrs x0, tpidr_el1 + msr tpidr_el2, x0 + + // FP configuration, vectors + mrs_s x0, SYS_CPACR_EL12 + msr cpacr_el1, x0 + mrs_s x0, SYS_VBAR_EL12 + msr vbar_el1, x0 + + // Use EL2 translations for SPE & TRBE and disable access from EL1 + mrs x0, mdcr_el2 + bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) + bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) + msr mdcr_el2, x0 + + // Transfer the MM state from EL1 to EL2 + mrs_s x0, SYS_TCR_EL12 + msr tcr_el1, x0 + mrs_s x0, SYS_TTBR0_EL12 + msr ttbr0_el1, x0 + mrs_s x0, SYS_TTBR1_EL12 + msr ttbr1_el1, x0 + mrs_s x0, SYS_MAIR_EL12 + msr mair_el1, x0 + mrs x1, REG_ID_AA64MMFR3_EL1 + ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4 + cbz x1, .Lskip_tcr2 + mrs x0, REG_TCR2_EL12 + msr REG_TCR2_EL1, x0 + + // Transfer permission indirection state + mrs x1, REG_ID_AA64MMFR3_EL1 + ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 + cbz x1, .Lskip_indirection + mrs x0, REG_PIRE0_EL12 + msr REG_PIRE0_EL1, x0 + mrs x0, REG_PIR_EL12 + msr REG_PIR_EL1, x0 + +.Lskip_indirection: +.Lskip_tcr2: + + isb + + // Hack the exception return to stay at EL2 + mrs x0, spsr_el1 + and x0, x0, #~PSR_MODE_MASK + mov x1, #PSR_MODE_EL2h + orr x0, x0, x1 + msr spsr_el1, x0 + + b enter_vhe +SYM_CODE_END(__finalise_el2) + + // At the point where we reach enter_vhe(), we run with + // the MMU off (which is enforced by __finalise_el2()). + // We thus need to be in the idmap, or everything will + // explode when enabling the MMU. + + .pushsection .idmap.text, "ax" + +SYM_CODE_START_LOCAL(enter_vhe) + // Invalidate TLBs before enabling the MMU + tlbi vmalle1 + dsb nsh + isb + + // Enable the EL2 S1 MMU, as set up from EL1 + mrs_s x0, SYS_SCTLR_EL12 + set_sctlr_el1 x0 + + // Disable the EL1 S1 MMU for a good measure + mov_q x0, INIT_SCTLR_EL1_MMU_OFF + msr_s SYS_SCTLR_EL12, x0 + + mov x0, xzr + + eret +SYM_CODE_END(enter_vhe) + + .popsection + +.macro invalid_vector label +SYM_CODE_START_LOCAL(\label) + b \label +SYM_CODE_END(\label) +.endm + + invalid_vector el2_sync_invalid + invalid_vector el2_irq_invalid + invalid_vector el2_fiq_invalid + invalid_vector el2_error_invalid + invalid_vector el1_sync_invalid + invalid_vector el1_irq_invalid + invalid_vector el1_fiq_invalid + invalid_vector el1_error_invalid + + .popsection + +/* + * __hyp_set_vectors: Call this after boot to set the initial hypervisor + * vectors as part of hypervisor installation. On an SMP system, this should + * be called on each CPU. + * + * x0 must be the physical address of the new vector table, and must be + * 2KB aligned. + * + * Before calling this, you must check that the stub hypervisor is installed + * everywhere, by waiting for any secondary CPUs to be brought up and then + * checking that is_hyp_mode_available() is true. + * + * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or + * something else went wrong... in such cases, trying to install a new + * hypervisor is unlikely to work as desired. + * + * When you call into your shiny new hypervisor, sp_el2 will contain junk, + * so you will need to set that to something sensible at the new hypervisor's + * initialisation entry point. + */ + +SYM_FUNC_START(__hyp_set_vectors) + mov x1, x0 + mov x0, #HVC_SET_VECTORS + hvc #0 + ret +SYM_FUNC_END(__hyp_set_vectors) + +SYM_FUNC_START(__hyp_reset_vectors) + mov x0, #HVC_RESET_VECTORS + hvc #0 + ret +SYM_FUNC_END(__hyp_reset_vectors) + +/* + * Entry point to finalise EL2 and switch to VHE if deemed capable + * + * w0: boot mode, as returned by init_kernel_el() + */ +SYM_FUNC_START(finalise_el2) + // Need to have booted at EL2 + cmp w0, #BOOT_CPU_MODE_EL2 + b.ne 1f + + // and still be at EL1 + mrs x0, CurrentEL + cmp x0, #CurrentEL_EL1 + b.ne 1f + + mov x0, #HVC_FINALISE_EL2 + hvc #0 +1: + ret +SYM_FUNC_END(finalise_el2) diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c new file mode 100644 index 0000000000..c1125753fe --- /dev/null +++ b/arch/arm64/kernel/idle.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Low-level idle sequences + */ + +#include +#include + +#include +#include +#include +#include + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + * + * If the CPU supports priority masking we must do additional work to + * ensure that interrupts are not masked at the PMR (because the core will + * not wake up if we block the wake up signal in the interrupt controller). + */ +void noinstr cpu_do_idle(void) +{ + struct arm_cpuidle_irq_context context; + + arm_cpuidle_save_irq_context(&context); + + dsb(sy); + wfi(); + + arm_cpuidle_restore_irq_context(&context); +} + +/* + * This is our default idle handler. + */ +void noinstr arch_cpu_idle(void) +{ + /* + * This should do all the clock switching and wait for interrupt + * tricks + */ + cpu_do_idle(); +} diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c new file mode 100644 index 0000000000..3addc09f87 --- /dev/null +++ b/arch/arm64/kernel/idreg-override.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Early cpufeature override framework + * + * Copyright (C) 2020 Google LLC + * Author: Marc Zyngier + */ + +#include +#include +#include + +#include +#include +#include + +#define FTR_DESC_NAME_LEN 20 +#define FTR_DESC_FIELD_LEN 10 +#define FTR_ALIAS_NAME_LEN 30 +#define FTR_ALIAS_OPTION_LEN 116 + +static u64 __boot_status __initdata; + +struct ftr_set_desc { + char name[FTR_DESC_NAME_LEN]; + struct arm64_ftr_override *override; + struct { + char name[FTR_DESC_FIELD_LEN]; + u8 shift; + u8 width; + bool (*filter)(u64 val); + } fields[]; +}; + +#define FIELD(n, s, f) { .name = n, .shift = s, .width = 4, .filter = f } + +static bool __init mmfr1_vh_filter(u64 val) +{ + /* + * If we ever reach this point while running VHE, we're + * guaranteed to be on one of these funky, VHE-stuck CPUs. If + * the user was trying to force nVHE on us, proceed with + * attitude adjustment. + */ + return !(__boot_status == (BOOT_CPU_FLAG_E2H | BOOT_CPU_MODE_EL2) && + val == 0); +} + +static const struct ftr_set_desc mmfr1 __initconst = { + .name = "id_aa64mmfr1", + .override = &id_aa64mmfr1_override, + .fields = { + FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter), + {} + }, +}; + +static bool __init pfr0_sve_filter(u64 val) +{ + /* + * Disabling SVE also means disabling all the features that + * are associated with it. The easiest way to do it is just to + * override id_aa64zfr0_el1 to be 0. + */ + if (!val) { + id_aa64zfr0_override.val = 0; + id_aa64zfr0_override.mask = GENMASK(63, 0); + } + + return true; +} + +static const struct ftr_set_desc pfr0 __initconst = { + .name = "id_aa64pfr0", + .override = &id_aa64pfr0_override, + .fields = { + FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter), + {} + }, +}; + +static bool __init pfr1_sme_filter(u64 val) +{ + /* + * Similarly to SVE, disabling SME also means disabling all + * the features that are associated with it. Just set + * id_aa64smfr0_el1 to 0 and don't look back. + */ + if (!val) { + id_aa64smfr0_override.val = 0; + id_aa64smfr0_override.mask = GENMASK(63, 0); + } + + return true; +} + +static const struct ftr_set_desc pfr1 __initconst = { + .name = "id_aa64pfr1", + .override = &id_aa64pfr1_override, + .fields = { + FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ), + FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL), + FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter), + {} + }, +}; + +static const struct ftr_set_desc isar1 __initconst = { + .name = "id_aa64isar1", + .override = &id_aa64isar1_override, + .fields = { + FIELD("gpi", ID_AA64ISAR1_EL1_GPI_SHIFT, NULL), + FIELD("gpa", ID_AA64ISAR1_EL1_GPA_SHIFT, NULL), + FIELD("api", ID_AA64ISAR1_EL1_API_SHIFT, NULL), + FIELD("apa", ID_AA64ISAR1_EL1_APA_SHIFT, NULL), + {} + }, +}; + +static const struct ftr_set_desc isar2 __initconst = { + .name = "id_aa64isar2", + .override = &id_aa64isar2_override, + .fields = { + FIELD("gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT, NULL), + FIELD("apa3", ID_AA64ISAR2_EL1_APA3_SHIFT, NULL), + FIELD("mops", ID_AA64ISAR2_EL1_MOPS_SHIFT, NULL), + {} + }, +}; + +static const struct ftr_set_desc smfr0 __initconst = { + .name = "id_aa64smfr0", + .override = &id_aa64smfr0_override, + .fields = { + FIELD("smever", ID_AA64SMFR0_EL1_SMEver_SHIFT, NULL), + /* FA64 is a one bit field... :-/ */ + { "fa64", ID_AA64SMFR0_EL1_FA64_SHIFT, 1, }, + {} + }, +}; + +static bool __init hvhe_filter(u64 val) +{ + u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1); + + return (val == 1 && + lower_32_bits(__boot_status) == BOOT_CPU_MODE_EL2 && + cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_EL1_VH_SHIFT)); +} + +static const struct ftr_set_desc sw_features __initconst = { + .name = "arm64_sw", + .override = &arm64_sw_feature_override, + .fields = { + FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL), + FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter), + {} + }, +}; + +static const struct ftr_set_desc * const regs[] __initconst = { + &mmfr1, + &pfr0, + &pfr1, + &isar1, + &isar2, + &smfr0, + &sw_features, +}; + +static const struct { + char alias[FTR_ALIAS_NAME_LEN]; + char feature[FTR_ALIAS_OPTION_LEN]; +} aliases[] __initconst = { + { "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" }, + { "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" }, + { "arm64.nosve", "id_aa64pfr0.sve=0" }, + { "arm64.nosme", "id_aa64pfr1.sme=0" }, + { "arm64.nobti", "id_aa64pfr1.bt=0" }, + { "arm64.nopauth", + "id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 " + "id_aa64isar1.api=0 id_aa64isar1.apa=0 " + "id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" }, + { "arm64.nomops", "id_aa64isar2.mops=0" }, + { "arm64.nomte", "id_aa64pfr1.mte=0" }, + { "nokaslr", "arm64_sw.nokaslr=1" }, +}; + +static int __init parse_nokaslr(char *unused) +{ + /* nokaslr param handling is done by early cpufeature code */ + return 0; +} +early_param("nokaslr", parse_nokaslr); + +static int __init find_field(const char *cmdline, + const struct ftr_set_desc *reg, int f, u64 *v) +{ + char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2]; + int len; + + len = snprintf(opt, ARRAY_SIZE(opt), "%s.%s=", + reg->name, reg->fields[f].name); + + if (!parameqn(cmdline, opt, len)) + return -1; + + return kstrtou64(cmdline + len, 0, v); +} + +static void __init match_options(const char *cmdline) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + int f; + + if (!regs[i]->override) + continue; + + for (f = 0; strlen(regs[i]->fields[f].name); f++) { + u64 shift = regs[i]->fields[f].shift; + u64 width = regs[i]->fields[f].width ?: 4; + u64 mask = GENMASK_ULL(shift + width - 1, shift); + u64 v; + + if (find_field(cmdline, regs[i], f, &v)) + continue; + + /* + * If an override gets filtered out, advertise + * it by setting the value to the all-ones while + * clearing the mask... Yes, this is fragile. + */ + if (regs[i]->fields[f].filter && + !regs[i]->fields[f].filter(v)) { + regs[i]->override->val |= mask; + regs[i]->override->mask &= ~mask; + continue; + } + + regs[i]->override->val &= ~mask; + regs[i]->override->val |= (v << shift) & mask; + regs[i]->override->mask |= mask; + + return; + } + } +} + +static __init void __parse_cmdline(const char *cmdline, bool parse_aliases) +{ + do { + char buf[256]; + size_t len; + int i; + + cmdline = skip_spaces(cmdline); + + for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++); + if (!len) + return; + + len = min(len, ARRAY_SIZE(buf) - 1); + memcpy(buf, cmdline, len); + buf[len] = '\0'; + + if (strcmp(buf, "--") == 0) + return; + + cmdline += len; + + match_options(buf); + + for (i = 0; parse_aliases && i < ARRAY_SIZE(aliases); i++) + if (parameq(buf, aliases[i].alias)) + __parse_cmdline(aliases[i].feature, false); + } while (1); +} + +static __init const u8 *get_bootargs_cmdline(void) +{ + const u8 *prop; + void *fdt; + int node; + + fdt = get_early_fdt_ptr(); + if (!fdt) + return NULL; + + node = fdt_path_offset(fdt, "/chosen"); + if (node < 0) + return NULL; + + prop = fdt_getprop(fdt, node, "bootargs", NULL); + if (!prop) + return NULL; + + return strlen(prop) ? prop : NULL; +} + +static __init void parse_cmdline(void) +{ + const u8 *prop = get_bootargs_cmdline(); + + if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop) + __parse_cmdline(CONFIG_CMDLINE, true); + + if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop) + __parse_cmdline(prop, true); +} + +/* Keep checkers quiet */ +void init_feature_override(u64 boot_status); + +asmlinkage void __init init_feature_override(u64 boot_status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + if (regs[i]->override) { + regs[i]->override->val = 0; + regs[i]->override->mask = 0; + } + } + + __boot_status = boot_status; + + parse_cmdline(); + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + if (regs[i]->override) + dcache_clean_inval_poc((unsigned long)regs[i]->override, + (unsigned long)regs[i]->override + + sizeof(*regs[i]->override)); + } +} diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h new file mode 100644 index 0000000000..35f3c79595 --- /dev/null +++ b/arch/arm64/kernel/image-vars.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linker script variables to be set after section resolution, as + * ld.lld does not like variables assigned before SECTIONS is processed. + */ +#ifndef __ARM64_KERNEL_IMAGE_VARS_H +#define __ARM64_KERNEL_IMAGE_VARS_H + +#ifndef LINKER_SCRIPT +#error This file should only be included in vmlinux.lds.S +#endif + +PROVIDE(__efistub_primary_entry = primary_entry); + +/* + * The EFI stub has its own symbol namespace prefixed by __efistub_, to + * isolate it from the kernel proper. The following symbols are legally + * accessed by the stub, so provide some aliases to make them accessible. + * Only include data symbols here, or text symbols of functions that are + * guaranteed to be safe when executed at another offset than they were + * linked at. The routines below are all implemented in assembler in a + * position independent manner + */ +PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou); + +PROVIDE(__efistub__text = _text); +PROVIDE(__efistub__end = _end); +PROVIDE(__efistub___inittext_end = __inittext_end); +PROVIDE(__efistub__edata = _edata); +PROVIDE(__efistub_screen_info = screen_info); +PROVIDE(__efistub__ctype = _ctype); + +PROVIDE(__pi___memcpy = __pi_memcpy); +PROVIDE(__pi___memmove = __pi_memmove); +PROVIDE(__pi___memset = __pi_memset); + +#ifdef CONFIG_KVM + +/* + * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, to + * separate it from the kernel proper. The following symbols are legally + * accessed by it, therefore provide aliases to make them linkable. + * Do not include symbols which may not be safely accessed under hypervisor + * memory mappings. + */ + +/* Alternative callbacks for init-time patching of nVHE hyp code. */ +KVM_NVHE_ALIAS(kvm_patch_vector_branch); +KVM_NVHE_ALIAS(kvm_update_va_mask); +KVM_NVHE_ALIAS(kvm_get_kimage_voffset); +KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0); +KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter); +KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); +KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); +KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb); +KVM_NVHE_ALIAS(alt_cb_patch_nops); + +/* Global kernel state accessed by nVHE hyp code. */ +KVM_NVHE_ALIAS(kvm_vgic_global_state); + +/* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */ +KVM_NVHE_ALIAS(nvhe_hyp_panic_handler); + +/* Vectors installed by hyp-init on reset HVC. */ +KVM_NVHE_ALIAS(__hyp_stub_vectors); + +/* Static keys which are set if a vGIC trap should be handled in hyp. */ +KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); +KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); + +#ifdef CONFIG_ARM64_PSEUDO_NMI +/* Static key checked in GIC_PRIO_IRQOFF. */ +KVM_NVHE_ALIAS(gic_nonsecure_priorities); +#endif + +/* EL2 exception handling */ +KVM_NVHE_ALIAS(__start___kvm_ex_table); +KVM_NVHE_ALIAS(__stop___kvm_ex_table); + +/* PMU available static key */ +#ifdef CONFIG_HW_PERF_EVENTS +KVM_NVHE_ALIAS(kvm_arm_pmu_available); +#endif + +/* Position-independent library routines */ +KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page); +KVM_NVHE_ALIAS_HYP(copy_page, __pi_copy_page); +KVM_NVHE_ALIAS_HYP(memcpy, __pi_memcpy); +KVM_NVHE_ALIAS_HYP(memset, __pi_memset); + +#ifdef CONFIG_KASAN +KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy); +KVM_NVHE_ALIAS_HYP(__memset, __pi_memset); +#endif + +/* Hyp memory sections */ +KVM_NVHE_ALIAS(__hyp_idmap_text_start); +KVM_NVHE_ALIAS(__hyp_idmap_text_end); +KVM_NVHE_ALIAS(__hyp_text_start); +KVM_NVHE_ALIAS(__hyp_text_end); +KVM_NVHE_ALIAS(__hyp_bss_start); +KVM_NVHE_ALIAS(__hyp_bss_end); +KVM_NVHE_ALIAS(__hyp_rodata_start); +KVM_NVHE_ALIAS(__hyp_rodata_end); + +/* pKVM static key */ +KVM_NVHE_ALIAS(kvm_protected_mode_initialized); + +#endif /* CONFIG_KVM */ + +#ifdef CONFIG_EFI_ZBOOT +_kernel_codesize = ABSOLUTE(__inittext_end - _text); +#endif + +#endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h new file mode 100644 index 0000000000..7bc3ba8979 --- /dev/null +++ b/arch/arm64/kernel/image.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linker script macros to generate Image header fields. + * + * Copyright (C) 2014 ARM Ltd. + */ +#ifndef __ARM64_KERNEL_IMAGE_H +#define __ARM64_KERNEL_IMAGE_H + +#ifndef LINKER_SCRIPT +#error This file should only be included in vmlinux.lds.S +#endif + +#include + +/* + * There aren't any ELF relocations we can use to endian-swap values known only + * at link time (e.g. the subtraction of two symbol addresses), so we must get + * the linker to endian-swap certain values before emitting them. + * + * Note that, in order for this to work when building the ELF64 PIE executable + * (for KASLR), these values should not be referenced via R_AARCH64_ABS64 + * relocations, since these are fixed up at runtime rather than at build time + * when PIE is in effect. So we need to split them up in 32-bit high and low + * words. + */ +#ifdef CONFIG_CPU_BIG_ENDIAN +#define DATA_LE32(data) \ + ((((data) & 0x000000ff) << 24) | \ + (((data) & 0x0000ff00) << 8) | \ + (((data) & 0x00ff0000) >> 8) | \ + (((data) & 0xff000000) >> 24)) +#else +#define DATA_LE32(data) ((data) & 0xffffffff) +#endif + +#define DEFINE_IMAGE_LE64(sym, data) \ + sym##_lo32 = DATA_LE32((data) & 0xffffffff); \ + sym##_hi32 = DATA_LE32((data) >> 32) + +#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \ + ARM64_IMAGE_FLAG_##field##_SHIFT) + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE +#else +#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE +#endif + +#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2) + +#define __HEAD_FLAG_PHYS_BASE 1 + +#define __HEAD_FLAGS (__HEAD_FLAG(BE) | \ + __HEAD_FLAG(PAGE_SIZE) | \ + __HEAD_FLAG(PHYS_BASE)) + +/* + * These will output as part of the Image header, which should be little-endian + * regardless of the endianness of the kernel. While constant values could be + * endian swapped in head.S, all are done here for consistency. + */ +#define HEAD_SYMBOLS \ + DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \ + DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); + +#endif /* __ARM64_KERNEL_IMAGE_H */ diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c new file mode 100644 index 0000000000..aa7a4ec6a3 --- /dev/null +++ b/arch/arm64/kernel/io.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/io.c + * + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include + +/* + * Copy data from IO memory space to "real" memory space. + */ +void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) +{ + while (count && !IS_ALIGNED((unsigned long)from, 8)) { + *(u8 *)to = __raw_readb(from); + from++; + to++; + count--; + } + + while (count >= 8) { + *(u64 *)to = __raw_readq(from); + from += 8; + to += 8; + count -= 8; + } + + while (count) { + *(u8 *)to = __raw_readb(from); + from++; + to++; + count--; + } +} +EXPORT_SYMBOL(__memcpy_fromio); + +/* + * Copy data from "real" memory space to IO memory space. + */ +void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) +{ + while (count && !IS_ALIGNED((unsigned long)to, 8)) { + __raw_writeb(*(u8 *)from, to); + from++; + to++; + count--; + } + + while (count >= 8) { + __raw_writeq(*(u64 *)from, to); + from += 8; + to += 8; + count -= 8; + } + + while (count) { + __raw_writeb(*(u8 *)from, to); + from++; + to++; + count--; + } +} +EXPORT_SYMBOL(__memcpy_toio); + +/* + * "memset" on IO memory space. + */ +void __memset_io(volatile void __iomem *dst, int c, size_t count) +{ + u64 qc = (u8)c; + + qc |= qc << 8; + qc |= qc << 16; + qc |= qc << 32; + + while (count && !IS_ALIGNED((unsigned long)dst, 8)) { + __raw_writeb(c, dst); + dst++; + count--; + } + + while (count >= 8) { + __raw_writeq(qc, dst); + dst += 8; + count -= 8; + } + + while (count) { + __raw_writeb(c, dst); + dst++; + count--; + } +} +EXPORT_SYMBOL(__memset_io); diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c new file mode 100644 index 0000000000..6ad5c6ef53 --- /dev/null +++ b/arch/arm64/kernel/irq.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/irq.c + * + * Copyright (C) 1992 Linus Torvalds + * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. + * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. + * Dynamic Tick Timer written by Tony Lindgren and + * Tuukka Tikkanen . + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Only access this in an NMI enter/exit */ +DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); + +DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); + + +DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr); + +#ifdef CONFIG_SHADOW_CALL_STACK +DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr); +#endif + +static void init_irq_scs(void) +{ + int cpu; + + if (!scs_is_enabled()) + return; + + for_each_possible_cpu(cpu) + per_cpu(irq_shadow_call_stack_ptr, cpu) = + scs_alloc(cpu_to_node(cpu)); +} + +#ifdef CONFIG_VMAP_STACK +static void init_irq_stacks(void) +{ + int cpu; + unsigned long *p; + + for_each_possible_cpu(cpu) { + p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu)); + per_cpu(irq_stack_ptr, cpu) = p; + } +} +#else +/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ +DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack); + +static void init_irq_stacks(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); +} +#endif + +#ifndef CONFIG_PREEMPT_RT +static void ____do_softirq(struct pt_regs *regs) +{ + __do_softirq(); +} + +void do_softirq_own_stack(void) +{ + call_on_irq_stack(NULL, ____do_softirq); +} +#endif + +static void default_handle_irq(struct pt_regs *regs) +{ + panic("IRQ taken without a root IRQ handler\n"); +} + +static void default_handle_fiq(struct pt_regs *regs) +{ + panic("FIQ taken without a root FIQ handler\n"); +} + +void (*handle_arch_irq)(struct pt_regs *) __ro_after_init = default_handle_irq; +void (*handle_arch_fiq)(struct pt_regs *) __ro_after_init = default_handle_fiq; + +int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) +{ + if (handle_arch_irq != default_handle_irq) + return -EBUSY; + + handle_arch_irq = handle_irq; + pr_info("Root IRQ handler: %ps\n", handle_irq); + return 0; +} + +int __init set_handle_fiq(void (*handle_fiq)(struct pt_regs *)) +{ + if (handle_arch_fiq != default_handle_fiq) + return -EBUSY; + + handle_arch_fiq = handle_fiq; + pr_info("Root FIQ handler: %ps\n", handle_fiq); + return 0; +} + +void __init init_IRQ(void) +{ + init_irq_stacks(); + init_irq_scs(); + irqchip_init(); + + if (system_uses_irq_prio_masking()) { + /* + * Now that we have a stack for our IRQ handler, set + * the PMR/PSR pair to a consistent state. + */ + WARN_ON(read_sysreg(daif) & PSR_A_BIT); + local_daif_restore(DAIF_PROCCTX_NOIRQ); + } +} diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c new file mode 100644 index 0000000000..faf88ec9c4 --- /dev/null +++ b/arch/arm64/kernel/jump_label.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu + * + * Based on arch/arm/kernel/jump_label.c + */ +#include +#include +#include +#include + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + void *addr = (void *)jump_entry_code(entry); + u32 insn; + + if (type == JUMP_LABEL_JMP) { + insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry), + jump_entry_target(entry), + AARCH64_INSN_BRANCH_NOLINK); + } else { + insn = aarch64_insn_gen_nop(); + } + + aarch64_insn_patch_text_nosync(addr, insn); +} diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c new file mode 100644 index 0000000000..94a269cd1f --- /dev/null +++ b/arch/arm64/kernel/kaslr.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2016 Linaro Ltd + */ + +#include +#include +#include + +#include +#include + +u16 __initdata memstart_offset_seed; + +bool __ro_after_init __kaslr_is_enabled = false; + +void __init kaslr_init(void) +{ + if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val & + arm64_sw_feature_override.mask, + ARM64_SW_FEATURE_OVERRIDE_NOKASLR)) { + pr_info("KASLR disabled on command line\n"); + return; + } + + /* + * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical + * placement of the image rather than from the seed, so a displacement + * of less than MIN_KIMG_ALIGN means that no seed was provided. + */ + if (kaslr_offset() < MIN_KIMG_ALIGN) { + pr_warn("KASLR disabled due to lack of seed\n"); + return; + } + + pr_info("KASLR enabled\n"); + __kaslr_is_enabled = true; +} diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c new file mode 100644 index 0000000000..636be67151 --- /dev/null +++ b/arch/arm64/kernel/kexec_image.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kexec image loader + + * Copyright (C) 2018 Linaro Limited + * Author: AKASHI Takahiro + */ + +#define pr_fmt(fmt) "kexec_file(Image): " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int image_probe(const char *kernel_buf, unsigned long kernel_len) +{ + const struct arm64_image_header *h = + (const struct arm64_image_header *)(kernel_buf); + + if (!h || (kernel_len < sizeof(*h))) + return -EINVAL; + + if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic))) + return -EINVAL; + + return 0; +} + +static void *image_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + struct arm64_image_header *h; + u64 flags, value; + bool be_image, be_kernel; + struct kexec_buf kbuf; + unsigned long text_offset, kernel_segment_number; + struct kexec_segment *kernel_segment; + int ret; + + /* + * We require a kernel with an unambiguous Image header. Per + * Documentation/arch/arm64/booting.rst, this is the case when image_size + * is non-zero (practically speaking, since v3.17). + */ + h = (struct arm64_image_header *)kernel; + if (!h->image_size) + return ERR_PTR(-EINVAL); + + /* Check cpu features */ + flags = le64_to_cpu(h->flags); + be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE); + be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + if ((be_image != be_kernel) && !system_supports_mixed_endian()) + return ERR_PTR(-EINVAL); + + value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE); + if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) && + !system_supports_4kb_granule()) || + ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) && + !system_supports_64kb_granule()) || + ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) && + !system_supports_16kb_granule())) + return ERR_PTR(-EINVAL); + + /* Load the kernel */ + kbuf.image = image; + kbuf.buf_min = 0; + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = false; + + kbuf.buffer = kernel; + kbuf.bufsz = kernel_len; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = le64_to_cpu(h->image_size); + text_offset = le64_to_cpu(h->text_offset); + kbuf.buf_align = MIN_KIMG_ALIGN; + + /* Adjust kernel segment with TEXT_OFFSET */ + kbuf.memsz += text_offset; + + kernel_segment_number = image->nr_segments; + + /* + * The location of the kernel segment may make it impossible to satisfy + * the other segment requirements, so we try repeatedly to find a + * location that will work. + */ + while ((ret = kexec_add_buffer(&kbuf)) == 0) { + /* Try to load additional data */ + kernel_segment = &image->segment[kernel_segment_number]; + ret = load_other_segments(image, kernel_segment->mem, + kernel_segment->memsz, initrd, + initrd_len, cmdline); + if (!ret) + break; + + /* + * We couldn't find space for the other segments; erase the + * kernel segment and try the next available hole. + */ + image->nr_segments -= 1; + kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + } + + if (ret) { + pr_err("Could not find any suitable kernel location!"); + return ERR_PTR(ret); + } + + kernel_segment = &image->segment[kernel_segment_number]; + kernel_segment->mem += text_offset; + kernel_segment->memsz -= text_offset; + image->start = kernel_segment->mem; + + pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + kernel_segment->mem, kbuf.bufsz, + kernel_segment->memsz); + + return NULL; +} + +const struct kexec_file_ops kexec_image_ops = { + .probe = image_probe, + .load = image_load, +#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG + .verify_sig = kexec_kernel_verify_pe_sig, +#endif +}; diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c new file mode 100644 index 0000000000..4e1f983df3 --- /dev/null +++ b/arch/arm64/kernel/kgdb.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AArch64 KGDB support + * + * Based on arch/arm/kernel/kgdb.c + * + * Copyright (C) 2013 Cavium Inc. + * Author: Vijaya Kumar K + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + { "x0", 8, offsetof(struct pt_regs, regs[0])}, + { "x1", 8, offsetof(struct pt_regs, regs[1])}, + { "x2", 8, offsetof(struct pt_regs, regs[2])}, + { "x3", 8, offsetof(struct pt_regs, regs[3])}, + { "x4", 8, offsetof(struct pt_regs, regs[4])}, + { "x5", 8, offsetof(struct pt_regs, regs[5])}, + { "x6", 8, offsetof(struct pt_regs, regs[6])}, + { "x7", 8, offsetof(struct pt_regs, regs[7])}, + { "x8", 8, offsetof(struct pt_regs, regs[8])}, + { "x9", 8, offsetof(struct pt_regs, regs[9])}, + { "x10", 8, offsetof(struct pt_regs, regs[10])}, + { "x11", 8, offsetof(struct pt_regs, regs[11])}, + { "x12", 8, offsetof(struct pt_regs, regs[12])}, + { "x13", 8, offsetof(struct pt_regs, regs[13])}, + { "x14", 8, offsetof(struct pt_regs, regs[14])}, + { "x15", 8, offsetof(struct pt_regs, regs[15])}, + { "x16", 8, offsetof(struct pt_regs, regs[16])}, + { "x17", 8, offsetof(struct pt_regs, regs[17])}, + { "x18", 8, offsetof(struct pt_regs, regs[18])}, + { "x19", 8, offsetof(struct pt_regs, regs[19])}, + { "x20", 8, offsetof(struct pt_regs, regs[20])}, + { "x21", 8, offsetof(struct pt_regs, regs[21])}, + { "x22", 8, offsetof(struct pt_regs, regs[22])}, + { "x23", 8, offsetof(struct pt_regs, regs[23])}, + { "x24", 8, offsetof(struct pt_regs, regs[24])}, + { "x25", 8, offsetof(struct pt_regs, regs[25])}, + { "x26", 8, offsetof(struct pt_regs, regs[26])}, + { "x27", 8, offsetof(struct pt_regs, regs[27])}, + { "x28", 8, offsetof(struct pt_regs, regs[28])}, + { "x29", 8, offsetof(struct pt_regs, regs[29])}, + { "x30", 8, offsetof(struct pt_regs, regs[30])}, + { "sp", 8, offsetof(struct pt_regs, sp)}, + { "pc", 8, offsetof(struct pt_regs, pc)}, + /* + * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote + * protocol disagrees. Therefore we must extract only the lower + * 32-bits. Look for the big comment in asm/kgdb.h for more + * detail. + */ + { "pstate", 4, offsetof(struct pt_regs, pstate) +#ifdef CONFIG_CPU_BIG_ENDIAN + + 4 +#endif + }, + { "v0", 16, -1 }, + { "v1", 16, -1 }, + { "v2", 16, -1 }, + { "v3", 16, -1 }, + { "v4", 16, -1 }, + { "v5", 16, -1 }, + { "v6", 16, -1 }, + { "v7", 16, -1 }, + { "v8", 16, -1 }, + { "v9", 16, -1 }, + { "v10", 16, -1 }, + { "v11", 16, -1 }, + { "v12", 16, -1 }, + { "v13", 16, -1 }, + { "v14", 16, -1 }, + { "v15", 16, -1 }, + { "v16", 16, -1 }, + { "v17", 16, -1 }, + { "v18", 16, -1 }, + { "v19", 16, -1 }, + { "v20", 16, -1 }, + { "v21", 16, -1 }, + { "v22", 16, -1 }, + { "v23", 16, -1 }, + { "v24", 16, -1 }, + { "v25", 16, -1 }, + { "v26", 16, -1 }, + { "v27", 16, -1 }, + { "v28", 16, -1 }, + { "v29", 16, -1 }, + { "v30", 16, -1 }, + { "v31", 16, -1 }, + { "fpsr", 4, -1 }, + { "fpcr", 4, -1 }, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + struct cpu_context *cpu_context = &task->thread.cpu_context; + + /* Initialize to zero */ + memset((char *)gdb_regs, 0, NUMREGBYTES); + + gdb_regs[19] = cpu_context->x19; + gdb_regs[20] = cpu_context->x20; + gdb_regs[21] = cpu_context->x21; + gdb_regs[22] = cpu_context->x22; + gdb_regs[23] = cpu_context->x23; + gdb_regs[24] = cpu_context->x24; + gdb_regs[25] = cpu_context->x25; + gdb_regs[26] = cpu_context->x26; + gdb_regs[27] = cpu_context->x27; + gdb_regs[28] = cpu_context->x28; + gdb_regs[29] = cpu_context->fp; + + gdb_regs[31] = cpu_context->sp; + gdb_regs[32] = cpu_context->pc; +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->pc = pc; +} + +static int compiled_break; + +static void kgdb_arch_update_addr(struct pt_regs *regs, + char *remcom_in_buffer) +{ + unsigned long addr; + char *ptr; + + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &addr)) + kgdb_arch_set_pc(regs, addr); + else if (compiled_break == 1) + kgdb_arch_set_pc(regs, regs->pc + 4); + + compiled_break = 0; +} + +int kgdb_arch_handle_exception(int exception_vector, int signo, + int err_code, char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *linux_regs) +{ + int err; + + switch (remcom_in_buffer[0]) { + case 'D': + case 'k': + /* + * Packet D (Detach), k (kill). No special handling + * is required here. Handle same as c packet. + */ + case 'c': + /* + * Packet c (Continue) to continue executing. + * Set pc to required address. + * Try to read optional parameter and set pc. + * If this was a compiled breakpoint, we need to move + * to the next instruction else we will just breakpoint + * over and over again. + */ + kgdb_arch_update_addr(linux_regs, remcom_in_buffer); + atomic_set(&kgdb_cpu_doing_single_step, -1); + kgdb_single_step = 0; + + /* + * Received continue command, disable single step + */ + if (kernel_active_single_step()) + kernel_disable_single_step(); + + err = 0; + break; + case 's': + /* + * Update step address value with address passed + * with step packet. + * On debug exception return PC is copied to ELR + * So just update PC. + * If no step address is passed, resume from the address + * pointed by PC. Do not update PC + */ + kgdb_arch_update_addr(linux_regs, remcom_in_buffer); + atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); + kgdb_single_step = 1; + + /* + * Enable single step handling + */ + if (!kernel_active_single_step()) + kernel_enable_single_step(linux_regs); + else + kernel_rewind_single_step(linux_regs); + err = 0; + break; + default: + err = -1; + } + return err; +} + +static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr) +{ + kgdb_handle_exception(1, SIGTRAP, 0, regs); + return DBG_HOOK_HANDLED; +} +NOKPROBE_SYMBOL(kgdb_brk_fn) + +static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr) +{ + compiled_break = 1; + kgdb_handle_exception(1, SIGTRAP, 0, regs); + + return DBG_HOOK_HANDLED; +} +NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); + +static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr) +{ + if (!kgdb_single_step) + return DBG_HOOK_ERROR; + + kgdb_handle_exception(0, SIGTRAP, 0, regs); + return DBG_HOOK_HANDLED; +} +NOKPROBE_SYMBOL(kgdb_step_brk_fn); + +static struct break_hook kgdb_brkpt_hook = { + .fn = kgdb_brk_fn, + .imm = KGDB_DYN_DBG_BRK_IMM, +}; + +static struct break_hook kgdb_compiled_brkpt_hook = { + .fn = kgdb_compiled_brk_fn, + .imm = KGDB_COMPILED_DBG_BRK_IMM, +}; + +static struct step_hook kgdb_step_hook = { + .fn = kgdb_step_brk_fn +}; + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + return NOTIFY_STOP; +} + +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, + /* + * Want to be lowest priority + */ + .priority = -INT_MAX, +}; + +/* + * kgdb_arch_init - Perform any architecture specific initialization. + * This function will handle the initialization of any architecture + * specific callbacks. + */ +int kgdb_arch_init(void) +{ + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + + register_kernel_break_hook(&kgdb_brkpt_hook); + register_kernel_break_hook(&kgdb_compiled_brkpt_hook); + register_kernel_step_hook(&kgdb_step_hook); + return 0; +} + +/* + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +void kgdb_arch_exit(void) +{ + unregister_kernel_break_hook(&kgdb_brkpt_hook); + unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook); + unregister_kernel_step_hook(&kgdb_step_hook); + unregister_die_notifier(&kgdb_notifier); +} + +const struct kgdb_arch arch_kgdb_ops; + +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ + int err; + + BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE); + + err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr); + if (err) + return err; + + return aarch64_insn_write((void *)bpt->bpt_addr, + (u32)AARCH64_BREAK_KGDB_DYN_DBG); +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ + return aarch64_insn_write((void *)bpt->bpt_addr, + *(u32 *)bpt->saved_instr); +} diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S new file mode 100644 index 0000000000..af046ceac2 --- /dev/null +++ b/arch/arm64/kernel/kuser32.S @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AArch32 user helpers. + * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. + * + * Copyright (C) 2005-2011 Nicolas Pitre + * Copyright (C) 2012-2018 ARM Ltd. + * + * The kuser helpers below are mapped at a fixed address by + * aarch32_setup_additional_pages() and are provided for compatibility + * reasons with 32 bit (aarch32) applications that need them. + * + * See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions. + */ + +#include + + .section .rodata + .align 5 + .globl __kuser_helper_start +__kuser_helper_start: + +__kuser_cmpxchg64: // 0xffff0f60 + .inst 0xe92d00f0 // push {r4, r5, r6, r7} + .inst 0xe1c040d0 // ldrd r4, r5, [r0] + .inst 0xe1c160d0 // ldrd r6, r7, [r1] + .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] + .inst 0xe0303004 // eors r3, r0, r4 + .inst 0x00313005 // eoreqs r3, r1, r5 + .inst 0x01a23e96 // stlexdeq r3, r6, [r2] + .inst 0x03330001 // teqeq r3, #1 + .inst 0x0afffff9 // beq 1b + .inst 0xf57ff05b // dmb ish + .inst 0xe2730000 // rsbs r0, r3, #0 + .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} + .inst 0xe12fff1e // bx lr + + .align 5 +__kuser_memory_barrier: // 0xffff0fa0 + .inst 0xf57ff05b // dmb ish + .inst 0xe12fff1e // bx lr + + .align 5 +__kuser_cmpxchg: // 0xffff0fc0 + .inst 0xe1923f9f // 1: ldrex r3, [r2] + .inst 0xe0533000 // subs r3, r3, r0 + .inst 0x01823e91 // stlexeq r3, r1, [r2] + .inst 0x03330001 // teqeq r3, #1 + .inst 0x0afffffa // beq 1b + .inst 0xf57ff05b // dmb ish + .inst 0xe2730000 // rsbs r0, r3, #0 + .inst 0xe12fff1e // bx lr + + .align 5 +__kuser_get_tls: // 0xffff0fe0 + .inst 0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3 + .inst 0xe12fff1e // bx lr + .rep 5 + .word 0 + .endr + +__kuser_helper_version: // 0xffff0ffc + .word ((__kuser_helper_end - __kuser_helper_start) >> 5) + .globl __kuser_helper_end +__kuser_helper_end: diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c new file mode 100644 index 0000000000..078910db77 --- /dev/null +++ b/arch/arm64/kernel/machine_kexec.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * Copyright (C) Huawei Futurewei Technologies. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * kexec_image_info - For debugging output. + */ +#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i) +static void _kexec_image_info(const char *func, int line, + const struct kimage *kimage) +{ + unsigned long i; + + pr_debug("%s:%d:\n", func, line); + pr_debug(" kexec kimage info:\n"); + pr_debug(" type: %d\n", kimage->type); + pr_debug(" start: %lx\n", kimage->start); + pr_debug(" head: %lx\n", kimage->head); + pr_debug(" nr_segments: %lu\n", kimage->nr_segments); + pr_debug(" dtb_mem: %pa\n", &kimage->arch.dtb_mem); + pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc); + pr_debug(" el2_vectors: %pa\n", &kimage->arch.el2_vectors); + + for (i = 0; i < kimage->nr_segments; i++) { + pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", + i, + kimage->segment[i].mem, + kimage->segment[i].mem + kimage->segment[i].memsz, + kimage->segment[i].memsz, + kimage->segment[i].memsz / PAGE_SIZE); + } +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ + /* Empty routine needed to avoid build errors. */ +} + +/** + * machine_kexec_prepare - Prepare for a kexec reboot. + * + * Called from the core kexec code when a kernel image is loaded. + * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus + * are stuck in the kernel. This avoids a panic once we hit machine_kexec(). + */ +int machine_kexec_prepare(struct kimage *kimage) +{ + if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) { + pr_err("Can't kexec: CPUs are stuck in the kernel.\n"); + return -EBUSY; + } + + return 0; +} + +/** + * kexec_segment_flush - Helper to flush the kimage segments to PoC. + */ +static void kexec_segment_flush(const struct kimage *kimage) +{ + unsigned long i; + + pr_debug("%s:\n", __func__); + + for (i = 0; i < kimage->nr_segments; i++) { + pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", + i, + kimage->segment[i].mem, + kimage->segment[i].mem + kimage->segment[i].memsz, + kimage->segment[i].memsz, + kimage->segment[i].memsz / PAGE_SIZE); + + dcache_clean_inval_poc( + (unsigned long)phys_to_virt(kimage->segment[i].mem), + (unsigned long)phys_to_virt(kimage->segment[i].mem) + + kimage->segment[i].memsz); + } +} + +/* Allocates pages for kexec page table */ +static void *kexec_page_alloc(void *arg) +{ + struct kimage *kimage = arg; + struct page *page = kimage_alloc_control_pages(kimage, 0); + void *vaddr = NULL; + + if (!page) + return NULL; + + vaddr = page_address(page); + memset(vaddr, 0, PAGE_SIZE); + + return vaddr; +} + +int machine_kexec_post_load(struct kimage *kimage) +{ + int rc; + pgd_t *trans_pgd; + void *reloc_code = page_to_virt(kimage->control_code_page); + long reloc_size; + struct trans_pgd_info info = { + .trans_alloc_page = kexec_page_alloc, + .trans_alloc_arg = kimage, + }; + + /* If in place, relocation is not used, only flush next kernel */ + if (kimage->head & IND_DONE) { + kexec_segment_flush(kimage); + kexec_image_info(kimage); + return 0; + } + + kimage->arch.el2_vectors = 0; + if (is_hyp_nvhe()) { + rc = trans_pgd_copy_el2_vectors(&info, + &kimage->arch.el2_vectors); + if (rc) + return rc; + } + + /* Create a copy of the linear map */ + trans_pgd = kexec_page_alloc(kimage); + if (!trans_pgd) + return -ENOMEM; + rc = trans_pgd_create_copy(&info, &trans_pgd, PAGE_OFFSET, PAGE_END); + if (rc) + return rc; + kimage->arch.ttbr1 = __pa(trans_pgd); + kimage->arch.zero_page = __pa_symbol(empty_zero_page); + + reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start; + memcpy(reloc_code, __relocate_new_kernel_start, reloc_size); + kimage->arch.kern_reloc = __pa(reloc_code); + rc = trans_pgd_idmap_page(&info, &kimage->arch.ttbr0, + &kimage->arch.t0sz, reloc_code); + if (rc) + return rc; + kimage->arch.phys_offset = virt_to_phys(kimage) - (long)kimage; + + /* Flush the reloc_code in preparation for its execution. */ + dcache_clean_inval_poc((unsigned long)reloc_code, + (unsigned long)reloc_code + reloc_size); + icache_inval_pou((uintptr_t)reloc_code, + (uintptr_t)reloc_code + reloc_size); + kexec_image_info(kimage); + + return 0; +} + +/** + * machine_kexec - Do the kexec reboot. + * + * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC. + */ +void machine_kexec(struct kimage *kimage) +{ + bool in_kexec_crash = (kimage == kexec_crash_image); + bool stuck_cpus = cpus_are_stuck_in_kernel(); + + /* + * New cpus may have become stuck_in_kernel after we loaded the image. + */ + BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1))); + WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()), + "Some CPUs may be stale, kdump will be unreliable.\n"); + + pr_info("Bye!\n"); + + local_daif_mask(); + + /* + * Both restart and kernel_reloc will shutdown the MMU, disable data + * caches. However, restart will start new kernel or purgatory directly, + * kernel_reloc contains the body of arm64_relocate_new_kernel + * In kexec case, kimage->start points to purgatory assuming that + * kernel entry and dtb address are embedded in purgatory by + * userspace (kexec-tools). + * In kexec_file case, the kernel starts directly without purgatory. + */ + if (kimage->head & IND_DONE) { + typeof(cpu_soft_restart) *restart; + + cpu_install_idmap(); + restart = (void *)__pa_symbol(cpu_soft_restart); + restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem, + 0, 0); + } else { + void (*kernel_reloc)(struct kimage *kimage); + + if (is_hyp_nvhe()) + __hyp_set_vectors(kimage->arch.el2_vectors); + cpu_install_ttbr0(kimage->arch.ttbr0, kimage->arch.t0sz); + kernel_reloc = (void *)kimage->arch.kern_reloc; + kernel_reloc(kimage); + } + + BUG(); /* Should never get here. */ +} + +static void machine_kexec_mask_interrupts(void) +{ + unsigned int i; + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_chip *chip; + int ret; + + chip = irq_desc_get_chip(desc); + if (!chip) + continue; + + /* + * First try to remove the active state. If this + * fails, try to EOI the interrupt. + */ + ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false); + + if (ret && irqd_irq_inprogress(&desc->irq_data) && + chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + + if (chip->irq_mask) + chip->irq_mask(&desc->irq_data); + + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) + chip->irq_disable(&desc->irq_data); + } +} + +/** + * machine_crash_shutdown - shutdown non-crashing cpus and save registers + */ +void machine_crash_shutdown(struct pt_regs *regs) +{ + local_irq_disable(); + + /* shutdown non-crashing cpus */ + crash_smp_send_stop(); + + /* for crashing cpu */ + crash_save_cpu(regs, smp_processor_id()); + machine_kexec_mask_interrupts(); + + pr_info("Starting crashdump kernel...\n"); +} + +#ifdef CONFIG_HIBERNATION +/* + * To preserve the crash dump kernel image, the relevant memory segments + * should be mapped again around the hibernation. + */ +void crash_prepare_suspend(void) +{ + if (kexec_crash_image) + arch_kexec_unprotect_crashkres(); +} + +void crash_post_resume(void) +{ + if (kexec_crash_image) + arch_kexec_protect_crashkres(); +} + +/* + * crash_is_nosave + * + * Return true only if a page is part of reserved memory for crash dump kernel, + * but does not hold any data of loaded kernel image. + * + * Note that all the pages in crash dump kernel memory have been initially + * marked as Reserved as memory was allocated via memblock_reserve(). + * + * In hibernation, the pages which are Reserved and yet "nosave" are excluded + * from the hibernation iamge. crash_is_nosave() does thich check for crash + * dump kernel and will reduce the total size of hibernation image. + */ + +bool crash_is_nosave(unsigned long pfn) +{ + int i; + phys_addr_t addr; + + if (!crashk_res.end) + return false; + + /* in reserved memory? */ + addr = __pfn_to_phys(pfn); + if ((addr < crashk_res.start) || (crashk_res.end < addr)) { + if (!crashk_low_res.end) + return false; + + if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr)) + return false; + } + + if (!kexec_crash_image) + return true; + + /* not part of loaded kernel image? */ + for (i = 0; i < kexec_crash_image->nr_segments; i++) + if (addr >= kexec_crash_image->segment[i].mem && + addr < (kexec_crash_image->segment[i].mem + + kexec_crash_image->segment[i].memsz)) + return false; + + return true; +} + +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) +{ + unsigned long addr; + struct page *page; + + for (addr = begin; addr < end; addr += PAGE_SIZE) { + page = phys_to_page(addr); + free_reserved_page(page); + } +} +#endif /* CONFIG_HIBERNATION */ diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c new file mode 100644 index 0000000000..a11a6e14ba --- /dev/null +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * kexec_file for arm64 + * + * Copyright (C) 2018 Linaro Limited + * Author: AKASHI Takahiro + * + * Most code is derived from arm64 port of kexec-tools + */ + +#define pr_fmt(fmt) "kexec_file: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const struct kexec_file_ops * const kexec_file_loaders[] = { + &kexec_image_ops, + NULL +}; + +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + kvfree(image->arch.dtb); + image->arch.dtb = NULL; + + vfree(image->elf_headers); + image->elf_headers = NULL; + image->elf_headers_sz = 0; + + return kexec_image_post_load_cleanup_default(image); +} + +static int prepare_elf_headers(void **addr, unsigned long *sz) +{ + struct crash_mem *cmem; + unsigned int nr_ranges; + int ret; + u64 i; + phys_addr_t start, end; + + nr_ranges = 2; /* for exclusion of crashkernel region */ + for_each_mem_range(i, &start, &end) + nr_ranges++; + + cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); + if (!cmem) + return -ENOMEM; + + cmem->max_nr_ranges = nr_ranges; + cmem->nr_ranges = 0; + for_each_mem_range(i, &start, &end) { + cmem->ranges[cmem->nr_ranges].start = start; + cmem->ranges[cmem->nr_ranges].end = end - 1; + cmem->nr_ranges++; + } + + /* Exclude crashkernel region */ + ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); + if (ret) + goto out; + + if (crashk_low_res.end) { + ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); + if (ret) + goto out; + } + + ret = crash_prepare_elf64_headers(cmem, true, addr, sz); + +out: + kfree(cmem); + return ret; +} + +/* + * Tries to add the initrd and DTB to the image. If it is not possible to find + * valid locations, this function will undo changes to the image and return non + * zero. + */ +int load_other_segments(struct kimage *image, + unsigned long kernel_load_addr, + unsigned long kernel_size, + char *initrd, unsigned long initrd_len, + char *cmdline) +{ + struct kexec_buf kbuf; + void *headers, *dtb = NULL; + unsigned long headers_sz, initrd_load_addr = 0, dtb_len, + orig_segments = image->nr_segments; + int ret = 0; + + kbuf.image = image; + /* not allocate anything below the kernel */ + kbuf.buf_min = kernel_load_addr + kernel_size; + + /* load elf core header */ + if (image->type == KEXEC_TYPE_CRASH) { + ret = prepare_elf_headers(&headers, &headers_sz); + if (ret) { + pr_err("Preparing elf core header failed\n"); + goto out_err; + } + + kbuf.buffer = headers; + kbuf.bufsz = headers_sz; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = headers_sz; + kbuf.buf_align = SZ_64K; /* largest supported page size */ + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = true; + + ret = kexec_add_buffer(&kbuf); + if (ret) { + vfree(headers); + goto out_err; + } + image->elf_headers = headers; + image->elf_load_addr = kbuf.mem; + image->elf_headers_sz = headers_sz; + + pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + image->elf_load_addr, kbuf.bufsz, kbuf.memsz); + } + + /* load initrd */ + if (initrd) { + kbuf.buffer = initrd; + kbuf.bufsz = initrd_len; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = initrd_len; + kbuf.buf_align = 0; + /* within 1GB-aligned window of up to 32GB in size */ + kbuf.buf_max = round_down(kernel_load_addr, SZ_1G) + + (unsigned long)SZ_1G * 32; + kbuf.top_down = false; + + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out_err; + initrd_load_addr = kbuf.mem; + + pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + initrd_load_addr, kbuf.bufsz, kbuf.memsz); + } + + /* load dtb */ + dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr, + initrd_len, cmdline, 0); + if (!dtb) { + pr_err("Preparing for new dtb failed\n"); + ret = -EINVAL; + goto out_err; + } + + /* trim it */ + fdt_pack(dtb); + dtb_len = fdt_totalsize(dtb); + kbuf.buffer = dtb; + kbuf.bufsz = dtb_len; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = dtb_len; + /* not across 2MB boundary */ + kbuf.buf_align = SZ_2M; + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = true; + + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out_err; + image->arch.dtb = dtb; + image->arch.dtb_mem = kbuf.mem; + + pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + kbuf.mem, kbuf.bufsz, kbuf.memsz); + + return 0; + +out_err: + image->nr_segments = orig_segments; + kvfree(dtb); + return ret; +} diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c new file mode 100644 index 0000000000..79200f21e1 --- /dev/null +++ b/arch/arm64/kernel/module-plts.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2017 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include + +static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc, + enum aarch64_insn_register reg) +{ + u32 adrp, add; + + adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP); + add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_ADSB_ADD); + + return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) }; +} + +struct plt_entry get_plt_entry(u64 dst, void *pc) +{ + struct plt_entry plt; + static u32 br; + + if (!br) + br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16, + AARCH64_INSN_BRANCH_NOLINK); + + plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16); + plt.br = cpu_to_le32(br); + + return plt; +} + +static bool plt_entries_equal(const struct plt_entry *a, + const struct plt_entry *b) +{ + u64 p, q; + + /* + * Check whether both entries refer to the same target: + * do the cheapest checks first. + * If the 'add' or 'br' opcodes are different, then the target + * cannot be the same. + */ + if (a->add != b->add || a->br != b->br) + return false; + + p = ALIGN_DOWN((u64)a, SZ_4K); + q = ALIGN_DOWN((u64)b, SZ_4K); + + /* + * If the 'adrp' opcodes are the same then we just need to check + * that they refer to the same 4k region. + */ + if (a->adrp == b->adrp && p == q) + return true; + + return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) == + (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp))); +} + +u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, const Elf64_Rela *rela, + Elf64_Sym *sym) +{ + struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ? + &mod->arch.core : &mod->arch.init; + struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; + int i = pltsec->plt_num_entries; + int j = i - 1; + u64 val = sym->st_value + rela->r_addend; + + if (is_forbidden_offset_for_adrp(&plt[i].adrp)) + i++; + + plt[i] = get_plt_entry(val, &plt[i]); + + /* + * Check if the entry we just created is a duplicate. Given that the + * relocations are sorted, this will be the last entry we allocated. + * (if one exists). + */ + if (j >= 0 && plt_entries_equal(plt + i, plt + j)) + return (u64)&plt[j]; + + pltsec->plt_num_entries += i - j; + if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) + return 0; + + return (u64)&plt[i]; +} + +#ifdef CONFIG_ARM64_ERRATUM_843419 +u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, u64 val) +{ + struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ? + &mod->arch.core : &mod->arch.init; + struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; + int i = pltsec->plt_num_entries++; + u32 br; + int rd; + + if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) + return 0; + + if (is_forbidden_offset_for_adrp(&plt[i].adrp)) + i = pltsec->plt_num_entries++; + + /* get the destination register of the ADRP instruction */ + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, + le32_to_cpup((__le32 *)loc)); + + br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4, + AARCH64_INSN_BRANCH_NOLINK); + + plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd); + plt[i].br = cpu_to_le32(br); + + return (u64)&plt[i]; +} +#endif + +#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b)) + +static int cmp_rela(const void *a, const void *b) +{ + const Elf64_Rela *x = a, *y = b; + int i; + + /* sort by type, symbol index and addend */ + i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info)); + if (i == 0) + i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info)); + if (i == 0) + i = cmp_3way(x->r_addend, y->r_addend); + return i; +} + +static bool duplicate_rel(const Elf64_Rela *rela, int num) +{ + /* + * Entries are sorted by type, symbol index and addend. That means + * that, if a duplicate entry exists, it must be in the preceding + * slot. + */ + return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0; +} + +static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, + Elf64_Word dstidx, Elf_Shdr *dstsec) +{ + unsigned int ret = 0; + Elf64_Sym *s; + int i; + + for (i = 0; i < num; i++) { + u64 min_align; + + switch (ELF64_R_TYPE(rela[i].r_info)) { + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + /* + * We only have to consider branch targets that resolve + * to symbols that are defined in a different section. + * This is not simply a heuristic, it is a fundamental + * limitation, since there is no guaranteed way to emit + * PLT entries sufficiently close to the branch if the + * section size exceeds the range of a branch + * instruction. So ignore relocations against defined + * symbols if they live in the same section as the + * relocation target. + */ + s = syms + ELF64_R_SYM(rela[i].r_info); + if (s->st_shndx == dstidx) + break; + + /* + * Jump relocations with non-zero addends against + * undefined symbols are supported by the ELF spec, but + * do not occur in practice (e.g., 'jump n bytes past + * the entry point of undefined function symbol f'). + * So we need to support them, but there is no need to + * take them into consideration when trying to optimize + * this code. So let's only check for duplicates when + * the addend is zero: this allows us to record the PLT + * entry address in the symbol table itself, rather than + * having to search the list for duplicates each time we + * emit one. + */ + if (rela[i].r_addend != 0 || !duplicate_rel(rela, i)) + ret++; + break; + case R_AARCH64_ADR_PREL_PG_HI21_NC: + case R_AARCH64_ADR_PREL_PG_HI21: + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || + !cpus_have_const_cap(ARM64_WORKAROUND_843419)) + break; + + /* + * Determine the minimal safe alignment for this ADRP + * instruction: the section alignment at which it is + * guaranteed not to appear at a vulnerable offset. + * + * This comes down to finding the least significant zero + * bit in bits [11:3] of the section offset, and + * increasing the section's alignment so that the + * resulting address of this instruction is guaranteed + * to equal the offset in that particular bit (as well + * as all less significant bits). This ensures that the + * address modulo 4 KB != 0xfff8 or 0xfffc (which would + * have all ones in bits [11:3]) + */ + min_align = 2ULL << ffz(rela[i].r_offset | 0x7); + + /* + * Allocate veneer space for each ADRP that may appear + * at a vulnerable offset nonetheless. At relocation + * time, some of these will remain unused since some + * ADRP instructions can be patched to ADR instructions + * instead. + */ + if (min_align > SZ_4K) + ret++; + else + dstsec->sh_addralign = max(dstsec->sh_addralign, + min_align); + break; + } + } + + if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && + cpus_have_const_cap(ARM64_WORKAROUND_843419)) + /* + * Add some slack so we can skip PLT slots that may trigger + * the erratum due to the placement of the ADRP instruction. + */ + ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry))); + + return ret; +} + +static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela, + Elf64_Word dstidx) +{ + + Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info); + + if (s->st_shndx == dstidx) + return false; + + return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 || + ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26; +} + +/* Group branch PLT relas at the front end of the array. */ +static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela, + int numrels, Elf64_Word dstidx) +{ + int i = 0, j = numrels - 1; + + while (i < j) { + if (branch_rela_needs_plt(syms, &rela[i], dstidx)) + i++; + else if (branch_rela_needs_plt(syms, &rela[j], dstidx)) + swap(rela[i], rela[j]); + else + j--; + } + + return i; +} + +int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + unsigned long core_plts = 0; + unsigned long init_plts = 0; + Elf64_Sym *syms = NULL; + Elf_Shdr *pltsec, *tramp = NULL; + int i; + + /* + * Find the empty .plt section so we can expand it to store the PLT + * entries. Record the symtab address as well. + */ + for (i = 0; i < ehdr->e_shnum; i++) { + if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) + mod->arch.core.plt_shndx = i; + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) + mod->arch.init.plt_shndx = i; + else if (!strcmp(secstrings + sechdrs[i].sh_name, + ".text.ftrace_trampoline")) + tramp = sechdrs + i; + else if (sechdrs[i].sh_type == SHT_SYMTAB) + syms = (Elf64_Sym *)sechdrs[i].sh_addr; + } + + if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) { + pr_err("%s: module PLT section(s) missing\n", mod->name); + return -ENOEXEC; + } + if (!syms) { + pr_err("%s: module symtab section missing\n", mod->name); + return -ENOEXEC; + } + + for (i = 0; i < ehdr->e_shnum; i++) { + Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset; + int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela); + Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info; + + if (sechdrs[i].sh_type != SHT_RELA) + continue; + + /* ignore relocations that operate on non-exec sections */ + if (!(dstsec->sh_flags & SHF_EXECINSTR)) + continue; + + /* + * sort branch relocations requiring a PLT by type, symbol index + * and addend + */ + nents = partition_branch_plt_relas(syms, rels, numrels, + sechdrs[i].sh_info); + if (nents) + sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL); + + if (!module_init_layout_section(secstrings + dstsec->sh_name)) + core_plts += count_plts(syms, rels, numrels, + sechdrs[i].sh_info, dstsec); + else + init_plts += count_plts(syms, rels, numrels, + sechdrs[i].sh_info, dstsec); + } + + pltsec = sechdrs + mod->arch.core.plt_shndx; + pltsec->sh_type = SHT_NOBITS; + pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + pltsec->sh_addralign = L1_CACHE_BYTES; + pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry); + mod->arch.core.plt_num_entries = 0; + mod->arch.core.plt_max_entries = core_plts; + + pltsec = sechdrs + mod->arch.init.plt_shndx; + pltsec->sh_type = SHT_NOBITS; + pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + pltsec->sh_addralign = L1_CACHE_BYTES; + pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry); + mod->arch.init.plt_num_entries = 0; + mod->arch.init.plt_max_entries = init_plts; + + if (tramp) { + tramp->sh_type = SHT_NOBITS; + tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + tramp->sh_addralign = __alignof__(struct plt_entry); + tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry); + } + + return 0; +} diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c new file mode 100644 index 0000000000..dd85129759 --- /dev/null +++ b/arch/arm64/kernel/module.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AArch64 loadable module support. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + */ + +#define pr_fmt(fmt) "Modules: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static u64 module_direct_base __ro_after_init = 0; +static u64 module_plt_base __ro_after_init = 0; + +/* + * Choose a random page-aligned base address for a window of 'size' bytes which + * entirely contains the interval [start, end - 1]. + */ +static u64 __init random_bounding_box(u64 size, u64 start, u64 end) +{ + u64 max_pgoff, pgoff; + + if ((end - start) >= size) + return 0; + + max_pgoff = (size - (end - start)) / PAGE_SIZE; + pgoff = get_random_u32_inclusive(0, max_pgoff); + + return start - pgoff * PAGE_SIZE; +} + +/* + * Modules may directly reference data and text anywhere within the kernel + * image and other modules. References using PREL32 relocations have a +/-2G + * range, and so we need to ensure that the entire kernel image and all modules + * fall within a 2G window such that these are always within range. + * + * Modules may directly branch to functions and code within the kernel text, + * and to functions and code within other modules. These branches will use + * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure + * that the entire kernel text and all module text falls within a 128M window + * such that these are always within range. With PLTs, we can expand this to a + * 2G window. + * + * We chose the 128M region to surround the entire kernel image (rather than + * just the text) as using the same bounds for the 128M and 2G regions ensures + * by construction that we never select a 128M region that is not a subset of + * the 2G region. For very large and unusual kernel configurations this means + * we may fall back to PLTs where they could have been avoided, but this keeps + * the logic significantly simpler. + */ +static int __init module_init_limits(void) +{ + u64 kernel_end = (u64)_end; + u64 kernel_start = (u64)_text; + u64 kernel_size = kernel_end - kernel_start; + + /* + * The default modules region is placed immediately below the kernel + * image, and is large enough to use the full 2G relocation range. + */ + BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END); + BUILD_BUG_ON(MODULES_VSIZE < SZ_2G); + + if (!kaslr_enabled()) { + if (kernel_size < SZ_128M) + module_direct_base = kernel_end - SZ_128M; + if (kernel_size < SZ_2G) + module_plt_base = kernel_end - SZ_2G; + } else { + u64 min = kernel_start; + u64 max = kernel_end; + + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { + pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n"); + } else { + module_direct_base = random_bounding_box(SZ_128M, min, max); + if (module_direct_base) { + min = module_direct_base; + max = module_direct_base + SZ_128M; + } + } + + module_plt_base = random_bounding_box(SZ_2G, min, max); + } + + pr_info("%llu pages in range for non-PLT usage", + module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0); + pr_info("%llu pages in range for PLT usage", + module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0); + + return 0; +} +subsys_initcall(module_init_limits); + +void *module_alloc(unsigned long size) +{ + void *p = NULL; + + /* + * Where possible, prefer to allocate within direct branch range of the + * kernel such that no PLTs are necessary. + */ + if (module_direct_base) { + p = __vmalloc_node_range(size, MODULE_ALIGN, + module_direct_base, + module_direct_base + SZ_128M, + GFP_KERNEL | __GFP_NOWARN, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); + } + + if (!p && module_plt_base) { + p = __vmalloc_node_range(size, MODULE_ALIGN, + module_plt_base, + module_plt_base + SZ_2G, + GFP_KERNEL | __GFP_NOWARN, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); + } + + if (!p) { + pr_warn_ratelimited("%s: unable to allocate memory\n", + __func__); + } + + if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) { + vfree(p); + return NULL; + } + + /* Memory is intended to be executable, reset the pointer tag. */ + return kasan_reset_tag(p); +} + +enum aarch64_reloc_op { + RELOC_OP_NONE, + RELOC_OP_ABS, + RELOC_OP_PREL, + RELOC_OP_PAGE, +}; + +static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) +{ + switch (reloc_op) { + case RELOC_OP_ABS: + return val; + case RELOC_OP_PREL: + return val - (u64)place; + case RELOC_OP_PAGE: + return (val & ~0xfff) - ((u64)place & ~0xfff); + case RELOC_OP_NONE: + return 0; + } + + pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); + return 0; +} + +static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) +{ + s64 sval = do_reloc(op, place, val); + + /* + * The ELF psABI for AArch64 documents the 16-bit and 32-bit place + * relative and absolute relocations as having a range of [-2^15, 2^16) + * or [-2^31, 2^32), respectively. However, in order to be able to + * detect overflows reliably, we have to choose whether we interpret + * such quantities as signed or as unsigned, and stick with it. + * The way we organize our address space requires a signed + * interpretation of 32-bit relative references, so let's use that + * for all R_AARCH64_PRELxx relocations. This means our upper + * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. + */ + + switch (len) { + case 16: + *(s16 *)place = sval; + switch (op) { + case RELOC_OP_ABS: + if (sval < 0 || sval > U16_MAX) + return -ERANGE; + break; + case RELOC_OP_PREL: + if (sval < S16_MIN || sval > S16_MAX) + return -ERANGE; + break; + default: + pr_err("Invalid 16-bit data relocation (%d)\n", op); + return 0; + } + break; + case 32: + *(s32 *)place = sval; + switch (op) { + case RELOC_OP_ABS: + if (sval < 0 || sval > U32_MAX) + return -ERANGE; + break; + case RELOC_OP_PREL: + if (sval < S32_MIN || sval > S32_MAX) + return -ERANGE; + break; + default: + pr_err("Invalid 32-bit data relocation (%d)\n", op); + return 0; + } + break; + case 64: + *(s64 *)place = sval; + break; + default: + pr_err("Invalid length (%d) for data relocation\n", len); + return 0; + } + return 0; +} + +enum aarch64_insn_movw_imm_type { + AARCH64_INSN_IMM_MOVNZ, + AARCH64_INSN_IMM_MOVKZ, +}; + +static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, + int lsb, enum aarch64_insn_movw_imm_type imm_type) +{ + u64 imm; + s64 sval; + u32 insn = le32_to_cpu(*place); + + sval = do_reloc(op, place, val); + imm = sval >> lsb; + + if (imm_type == AARCH64_INSN_IMM_MOVNZ) { + /* + * For signed MOVW relocations, we have to manipulate the + * instruction encoding depending on whether or not the + * immediate is less than zero. + */ + insn &= ~(3 << 29); + if (sval >= 0) { + /* >=0: Set the instruction to MOVZ (opcode 10b). */ + insn |= 2 << 29; + } else { + /* + * <0: Set the instruction to MOVN (opcode 00b). + * Since we've masked the opcode already, we + * don't need to do anything other than + * inverting the new immediate field. + */ + imm = ~imm; + } + } + + /* Update the instruction with the new encoding. */ + insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); + *place = cpu_to_le32(insn); + + if (imm > U16_MAX) + return -ERANGE; + + return 0; +} + +static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, + int lsb, int len, enum aarch64_insn_imm_type imm_type) +{ + u64 imm, imm_mask; + s64 sval; + u32 insn = le32_to_cpu(*place); + + /* Calculate the relocation value. */ + sval = do_reloc(op, place, val); + sval >>= lsb; + + /* Extract the value bits and shift them to bit 0. */ + imm_mask = (BIT(lsb + len) - 1) >> lsb; + imm = sval & imm_mask; + + /* Update the instruction's immediate field. */ + insn = aarch64_insn_encode_immediate(imm_type, insn, imm); + *place = cpu_to_le32(insn); + + /* + * Extract the upper value bits (including the sign bit) and + * shift them to bit 0. + */ + sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + + /* + * Overflow has occurred if the upper bits are not all equal to + * the sign bit of the value. + */ + if ((u64)(sval + 1) >= 2) + return -ERANGE; + + return 0; +} + +static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, + __le32 *place, u64 val) +{ + u32 insn; + + if (!is_forbidden_offset_for_adrp(place)) + return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, + AARCH64_INSN_IMM_ADR); + + /* patch ADRP to ADR if it is in range */ + if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, + AARCH64_INSN_IMM_ADR)) { + insn = le32_to_cpu(*place); + insn &= ~BIT(31); + } else { + /* out of range for ADR -> emit a veneer */ + val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); + if (!val) + return -ENOEXEC; + insn = aarch64_insn_gen_branch_imm((u64)place, val, + AARCH64_INSN_BRANCH_NOLINK); + } + + *place = cpu_to_le32(insn); + return 0; +} + +int apply_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + int ovf; + bool overflow_check; + Elf64_Sym *sym; + void *loc; + u64 val; + Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* loc corresponds to P in the AArch64 ELF document. */ + loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + + /* sym is the ELF symbol we're referring to. */ + sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + + ELF64_R_SYM(rel[i].r_info); + + /* val corresponds to (S + A) in the AArch64 ELF document. */ + val = sym->st_value + rel[i].r_addend; + + /* Check for overflow by default. */ + overflow_check = true; + + /* Perform the static relocation. */ + switch (ELF64_R_TYPE(rel[i].r_info)) { + /* Null relocations. */ + case R_ARM_NONE: + case R_AARCH64_NONE: + ovf = 0; + break; + + /* Data relocations. */ + case R_AARCH64_ABS64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); + break; + case R_AARCH64_ABS32: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); + break; + case R_AARCH64_ABS16: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); + break; + case R_AARCH64_PREL64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); + break; + case R_AARCH64_PREL32: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); + break; + case R_AARCH64_PREL16: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); + break; + + /* MOVW instruction relocations. */ + case R_AARCH64_MOVW_UABS_G0_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_MOVW_UABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G1_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_MOVW_UABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G2_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_MOVW_UABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_SABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G0_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G0: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G1_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G1: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G2_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G2: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, + AARCH64_INSN_IMM_MOVNZ); + break; + + /* Immediate instruction relocations. */ + case R_AARCH64_LD_PREL_LO19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + AARCH64_INSN_IMM_19); + break; + case R_AARCH64_ADR_PREL_LO21: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, + AARCH64_INSN_IMM_ADR); + break; + case R_AARCH64_ADR_PREL_PG_HI21_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_ADR_PREL_PG_HI21: + ovf = reloc_insn_adrp(me, sechdrs, loc, val); + if (ovf && ovf != -ERANGE) + return ovf; + break; + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST16_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST32_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST64_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST128_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_TSTBR14: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, + AARCH64_INSN_IMM_14); + break; + case R_AARCH64_CONDBR19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + AARCH64_INSN_IMM_19); + break; + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, + AARCH64_INSN_IMM_26); + if (ovf == -ERANGE) { + val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); + if (!val) + return -ENOEXEC; + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, + 26, AARCH64_INSN_IMM_26); + } + break; + + default: + pr_err("module %s: unsupported RELA relocation: %llu\n", + me->name, ELF64_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + + if (overflow_check && ovf == -ERANGE) + goto overflow; + + } + + return 0; + +overflow: + pr_err("module %s: overflow in relocation type %d val %Lx\n", + me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); + return -ENOEXEC; +} + +static inline void __init_plt(struct plt_entry *plt, unsigned long addr) +{ + *plt = get_plt_entry(addr, plt); +} + +static int module_init_ftrace_plt(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod) +{ +#if defined(CONFIG_DYNAMIC_FTRACE) + const Elf_Shdr *s; + struct plt_entry *plts; + + s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); + if (!s) + return -ENOEXEC; + + plts = (void *)s->sh_addr; + + __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); + + mod->arch.ftrace_trampolines = plts; +#endif + return 0; +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + s = find_section(hdr, sechdrs, ".altinstructions"); + if (s) + apply_alternatives_module((void *)s->sh_addr, s->sh_size); + + if (scs_is_dynamic()) { + s = find_section(hdr, sechdrs, ".init.eh_frame"); + if (s) + scs_patch((void *)s->sh_addr, s->sh_size); + } + + return module_init_ftrace_plt(hdr, sechdrs, me); +} diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c new file mode 100644 index 0000000000..4edecaac8f --- /dev/null +++ b/arch/arm64/kernel/mte.c @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred); + +#ifdef CONFIG_KASAN_HW_TAGS +/* + * The asynchronous and asymmetric MTE modes have the same behavior for + * store operations. This flag is set when either of these modes is enabled. + */ +DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); +EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode); +#endif + +void mte_sync_tags(pte_t pte) +{ + struct page *page = pte_page(pte); + long i, nr_pages = compound_nr(page); + + /* if PG_mte_tagged is set, tags have already been initialised */ + for (i = 0; i < nr_pages; i++, page++) { + if (try_page_mte_tagging(page)) { + mte_clear_page_tags(page_address(page)); + set_page_mte_tagged(page); + } + } + + /* ensure the tags are visible before the PTE is set */ + smp_wmb(); +} + +int memcmp_pages(struct page *page1, struct page *page2) +{ + char *addr1, *addr2; + int ret; + + addr1 = page_address(page1); + addr2 = page_address(page2); + ret = memcmp(addr1, addr2, PAGE_SIZE); + + if (!system_supports_mte() || ret) + return ret; + + /* + * If the page content is identical but at least one of the pages is + * tagged, return non-zero to avoid KSM merging. If only one of the + * pages is tagged, set_pte_at() may zero or change the tags of the + * other page via mte_sync_tags(). + */ + if (page_mte_tagged(page1) || page_mte_tagged(page2)) + return addr1 != addr2; + + return ret; +} + +static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) +{ + /* Enable MTE Sync Mode for EL1. */ + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, + SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf)); + isb(); + + pr_info_once("MTE: enabled in %s mode at EL1\n", mode); +} + +#ifdef CONFIG_KASAN_HW_TAGS +void mte_enable_kernel_sync(void) +{ + /* + * Make sure we enter this function when no PE has set + * async mode previously. + */ + WARN_ONCE(system_uses_mte_async_or_asymm_mode(), + "MTE async mode enabled system wide!"); + + __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC); +} + +void mte_enable_kernel_async(void) +{ + __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC); + + /* + * MTE async mode is set system wide by the first PE that + * executes this function. + * + * Note: If in future KASAN acquires a runtime switching + * mode in between sync and async, this strategy needs + * to be reviewed. + */ + if (!system_uses_mte_async_or_asymm_mode()) + static_branch_enable(&mte_async_or_asymm_mode); +} + +void mte_enable_kernel_asymm(void) +{ + if (cpus_have_cap(ARM64_MTE_ASYMM)) { + __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM); + + /* + * MTE asymm mode behaves as async mode for store + * operations. The mode is set system wide by the + * first PE that executes this function. + * + * Note: If in future KASAN acquires a runtime switching + * mode in between sync and async, this strategy needs + * to be reviewed. + */ + if (!system_uses_mte_async_or_asymm_mode()) + static_branch_enable(&mte_async_or_asymm_mode); + } else { + /* + * If the CPU does not support MTE asymmetric mode the + * kernel falls back on synchronous mode which is the + * default for kasan=on. + */ + mte_enable_kernel_sync(); + } +} +#endif + +#ifdef CONFIG_KASAN_HW_TAGS +void mte_check_tfsr_el1(void) +{ + u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); + + if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { + /* + * Note: isb() is not required after this direct write + * because there is no indirect read subsequent to it + * (per ARM DDI 0487F.c table D13-1). + */ + write_sysreg_s(0, SYS_TFSR_EL1); + + kasan_report_async(); + } +} +#endif + +/* + * This is where we actually resolve the system and process MTE mode + * configuration into an actual value in SCTLR_EL1 that affects + * userspace. + */ +static void mte_update_sctlr_user(struct task_struct *task) +{ + /* + * This must be called with preemption disabled and can only be called + * on the current or next task since the CPU must match where the thread + * is going to run. The caller is responsible for calling + * update_sctlr_el1() later in the same preemption disabled block. + */ + unsigned long sctlr = task->thread.sctlr_user; + unsigned long mte_ctrl = task->thread.mte_ctrl; + unsigned long pref, resolved_mte_tcf; + + pref = __this_cpu_read(mte_tcf_preferred); + /* + * If there is no overlap between the system preferred and + * program requested values go with what was requested. + */ + resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl; + sctlr &= ~SCTLR_EL1_TCF0_MASK; + /* + * Pick an actual setting. The order in which we check for + * set bits and map into register values determines our + * default order. + */ + if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM) + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM); + else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC) + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC); + else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC) + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC); + task->thread.sctlr_user = sctlr; +} + +static void mte_update_gcr_excl(struct task_struct *task) +{ + /* + * SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by + * mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled. + */ + if (kasan_hw_tags_enabled()) + return; + + write_sysreg_s( + ((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) & + SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND, + SYS_GCR_EL1); +} + +#ifdef CONFIG_KASAN_HW_TAGS +/* Only called from assembly, silence sparse */ +void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst); + +void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 1); /* Branch -> NOP */ + + if (kasan_hw_tags_enabled()) + *updptr = cpu_to_le32(aarch64_insn_gen_nop()); +} +#endif + +void mte_thread_init_user(void) +{ + if (!system_supports_mte()) + return; + + /* clear any pending asynchronous tag fault */ + dsb(ish); + write_sysreg_s(0, SYS_TFSRE0_EL1); + clear_thread_flag(TIF_MTE_ASYNC_FAULT); + /* disable tag checking and reset tag generation mask */ + set_mte_ctrl(current, 0); +} + +void mte_thread_switch(struct task_struct *next) +{ + if (!system_supports_mte()) + return; + + mte_update_sctlr_user(next); + mte_update_gcr_excl(next); + + /* TCO may not have been disabled on exception entry for the current task. */ + mte_disable_tco_entry(next); + + /* + * Check if an async tag exception occurred at EL1. + * + * Note: On the context switch path we rely on the dsb() present + * in __switch_to() to guarantee that the indirect writes to TFSR_EL1 + * are synchronized before this point. + */ + isb(); + mte_check_tfsr_el1(); +} + +void mte_cpu_setup(void) +{ + u64 rgsr; + + /* + * CnP must be enabled only after the MAIR_EL1 register has been set + * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may + * lead to the wrong memory type being used for a brief window during + * CPU power-up. + * + * CnP is not a boot feature so MTE gets enabled before CnP, but let's + * make sure that is the case. + */ + BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT); + BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT); + + /* Normal Tagged memory type at the corresponding MAIR index */ + sysreg_clear_set(mair_el1, + MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED), + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED, + MT_NORMAL_TAGGED)); + + write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1); + + /* + * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then + * RGSR_EL1.SEED must be non-zero for IRG to produce + * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we + * must initialize it. + */ + rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) << + SYS_RGSR_EL1_SEED_SHIFT; + if (rgsr == 0) + rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT; + write_sysreg_s(rgsr, SYS_RGSR_EL1); + + /* clear any pending tag check faults in TFSR*_EL1 */ + write_sysreg_s(0, SYS_TFSR_EL1); + write_sysreg_s(0, SYS_TFSRE0_EL1); + + local_flush_tlb_all(); +} + +void mte_suspend_enter(void) +{ + if (!system_supports_mte()) + return; + + /* + * The barriers are required to guarantee that the indirect writes + * to TFSR_EL1 are synchronized before we report the state. + */ + dsb(nsh); + isb(); + + /* Report SYS_TFSR_EL1 before suspend entry */ + mte_check_tfsr_el1(); +} + +void mte_suspend_exit(void) +{ + if (!system_supports_mte()) + return; + + mte_cpu_setup(); +} + +long set_mte_ctrl(struct task_struct *task, unsigned long arg) +{ + u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & + SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT; + + if (!system_supports_mte()) + return 0; + + if (arg & PR_MTE_TCF_ASYNC) + mte_ctrl |= MTE_CTRL_TCF_ASYNC; + if (arg & PR_MTE_TCF_SYNC) + mte_ctrl |= MTE_CTRL_TCF_SYNC; + + /* + * If the system supports it and both sync and async modes are + * specified then implicitly enable asymmetric mode. + * Userspace could see a mix of both sync and async anyway due + * to differing or changing defaults on CPUs. + */ + if (cpus_have_cap(ARM64_MTE_ASYMM) && + (arg & PR_MTE_TCF_ASYNC) && + (arg & PR_MTE_TCF_SYNC)) + mte_ctrl |= MTE_CTRL_TCF_ASYMM; + + task->thread.mte_ctrl = mte_ctrl; + if (task == current) { + preempt_disable(); + mte_update_sctlr_user(task); + mte_update_gcr_excl(task); + update_sctlr_el1(task->thread.sctlr_user); + preempt_enable(); + } + + return 0; +} + +long get_mte_ctrl(struct task_struct *task) +{ + unsigned long ret; + u64 mte_ctrl = task->thread.mte_ctrl; + u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) & + SYS_GCR_EL1_EXCL_MASK; + + if (!system_supports_mte()) + return 0; + + ret = incl << PR_MTE_TAG_SHIFT; + if (mte_ctrl & MTE_CTRL_TCF_ASYNC) + ret |= PR_MTE_TCF_ASYNC; + if (mte_ctrl & MTE_CTRL_TCF_SYNC) + ret |= PR_MTE_TCF_SYNC; + + return ret; +} + +/* + * Access MTE tags in another process' address space as given in mm. Update + * the number of tags copied. Return 0 if any tags copied, error otherwise. + * Inspired by __access_remote_vm(). + */ +static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, + struct iovec *kiov, unsigned int gup_flags) +{ + void __user *buf = kiov->iov_base; + size_t len = kiov->iov_len; + int err = 0; + int write = gup_flags & FOLL_WRITE; + + if (!access_ok(buf, len)) + return -EFAULT; + + if (mmap_read_lock_killable(mm)) + return -EIO; + + while (len) { + struct vm_area_struct *vma; + unsigned long tags, offset; + void *maddr; + struct page *page = get_user_page_vma_remote(mm, addr, + gup_flags, &vma); + + if (IS_ERR_OR_NULL(page)) { + err = page == NULL ? -EIO : PTR_ERR(page); + break; + } + + /* + * Only copy tags if the page has been mapped as PROT_MTE + * (PG_mte_tagged set). Otherwise the tags are not valid and + * not accessible to user. Moreover, an mprotect(PROT_MTE) + * would cause the existing tags to be cleared if the page + * was never mapped with PROT_MTE. + */ + if (!(vma->vm_flags & VM_MTE)) { + err = -EOPNOTSUPP; + put_page(page); + break; + } + WARN_ON_ONCE(!page_mte_tagged(page)); + + /* limit access to the end of the page */ + offset = offset_in_page(addr); + tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE); + + maddr = page_address(page); + if (write) { + tags = mte_copy_tags_from_user(maddr + offset, buf, tags); + set_page_dirty_lock(page); + } else { + tags = mte_copy_tags_to_user(buf, maddr + offset, tags); + } + put_page(page); + + /* error accessing the tracer's buffer */ + if (!tags) + break; + + len -= tags; + buf += tags; + addr += tags * MTE_GRANULE_SIZE; + } + mmap_read_unlock(mm); + + /* return an error if no tags copied */ + kiov->iov_len = buf - kiov->iov_base; + if (!kiov->iov_len) { + /* check for error accessing the tracee's address space */ + if (err) + return -EIO; + else + return -EFAULT; + } + + return 0; +} + +/* + * Copy MTE tags in another process' address space at 'addr' to/from tracer's + * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm(). + */ +static int access_remote_tags(struct task_struct *tsk, unsigned long addr, + struct iovec *kiov, unsigned int gup_flags) +{ + struct mm_struct *mm; + int ret; + + mm = get_task_mm(tsk); + if (!mm) + return -EPERM; + + if (!tsk->ptrace || (current != tsk->parent) || + ((get_dumpable(mm) != SUID_DUMP_USER) && + !ptracer_capable(tsk, mm->user_ns))) { + mmput(mm); + return -EPERM; + } + + ret = __access_remote_tags(mm, addr, kiov, gup_flags); + mmput(mm); + + return ret; +} + +int mte_ptrace_copy_tags(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + int ret; + struct iovec kiov; + struct iovec __user *uiov = (void __user *)data; + unsigned int gup_flags = FOLL_FORCE; + + if (!system_supports_mte()) + return -EIO; + + if (get_user(kiov.iov_base, &uiov->iov_base) || + get_user(kiov.iov_len, &uiov->iov_len)) + return -EFAULT; + + if (request == PTRACE_POKEMTETAGS) + gup_flags |= FOLL_WRITE; + + /* align addr to the MTE tag granule */ + addr &= MTE_GRANULE_MASK; + + ret = access_remote_tags(child, addr, &kiov, gup_flags); + if (!ret) + ret = put_user(kiov.iov_len, &uiov->iov_len); + + return ret; +} + +static ssize_t mte_tcf_preferred_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + switch (per_cpu(mte_tcf_preferred, dev->id)) { + case MTE_CTRL_TCF_ASYNC: + return sysfs_emit(buf, "async\n"); + case MTE_CTRL_TCF_SYNC: + return sysfs_emit(buf, "sync\n"); + case MTE_CTRL_TCF_ASYMM: + return sysfs_emit(buf, "asymm\n"); + default: + return sysfs_emit(buf, "???\n"); + } +} + +static ssize_t mte_tcf_preferred_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u64 tcf; + + if (sysfs_streq(buf, "async")) + tcf = MTE_CTRL_TCF_ASYNC; + else if (sysfs_streq(buf, "sync")) + tcf = MTE_CTRL_TCF_SYNC; + else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm")) + tcf = MTE_CTRL_TCF_ASYMM; + else + return -EINVAL; + + device_lock(dev); + per_cpu(mte_tcf_preferred, dev->id) = tcf; + device_unlock(dev); + + return count; +} +static DEVICE_ATTR_RW(mte_tcf_preferred); + +static int register_mte_tcf_preferred_sysctl(void) +{ + unsigned int cpu; + + if (!system_supports_mte()) + return 0; + + for_each_possible_cpu(cpu) { + per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC; + device_create_file(get_cpu_device(cpu), + &dev_attr_mte_tcf_preferred); + } + + return 0; +} +subsys_initcall(register_mte_tcf_preferred_sysctl); + +/* + * Return 0 on success, the number of bytes not probed otherwise. + */ +size_t mte_probe_user_range(const char __user *uaddr, size_t size) +{ + const char __user *end = uaddr + size; + int err = 0; + char val; + + __raw_get_user(val, uaddr, err); + if (err) + return size; + + uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE); + while (uaddr < end) { + /* + * A read is sufficient for mte, the caller should have probed + * for the pte write permission if required. + */ + __raw_get_user(val, uaddr, err); + if (err) + return end - uaddr; + uaddr += MTE_GRANULE_SIZE; + } + (void)val; + + return 0; +} diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c new file mode 100644 index 0000000000..aa718d6a92 --- /dev/null +++ b/arch/arm64/kernel/paravirt.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2013 Citrix Systems + * + * Author: Stefano Stabellini + */ + +#define pr_fmt(fmt) "arm-pv: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); + +struct pv_time_stolen_time_region { + struct pvclock_vcpu_stolen_time __rcu *kaddr; +}; + +static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region); + +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} + +early_param("no-steal-acc", parse_no_stealacc); + +/* return stolen time in ns by asking the hypervisor */ +static u64 para_steal_clock(int cpu) +{ + struct pvclock_vcpu_stolen_time *kaddr = NULL; + struct pv_time_stolen_time_region *reg; + u64 ret = 0; + + reg = per_cpu_ptr(&stolen_time_region, cpu); + + /* + * paravirt_steal_clock() may be called before the CPU + * online notification callback runs. Until the callback + * has run we just return zero. + */ + rcu_read_lock(); + kaddr = rcu_dereference(reg->kaddr); + if (!kaddr) { + rcu_read_unlock(); + return 0; + } + + ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time)); + rcu_read_unlock(); + return ret; +} + +static int stolen_time_cpu_down_prepare(unsigned int cpu) +{ + struct pvclock_vcpu_stolen_time *kaddr = NULL; + struct pv_time_stolen_time_region *reg; + + reg = this_cpu_ptr(&stolen_time_region); + if (!reg->kaddr) + return 0; + + kaddr = rcu_replace_pointer(reg->kaddr, NULL, true); + synchronize_rcu(); + memunmap(kaddr); + + return 0; +} + +static int stolen_time_cpu_online(unsigned int cpu) +{ + struct pvclock_vcpu_stolen_time *kaddr = NULL; + struct pv_time_stolen_time_region *reg; + struct arm_smccc_res res; + + reg = this_cpu_ptr(&stolen_time_region); + + arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_ST, &res); + + if (res.a0 == SMCCC_RET_NOT_SUPPORTED) + return -EINVAL; + + kaddr = memremap(res.a0, + sizeof(struct pvclock_vcpu_stolen_time), + MEMREMAP_WB); + + rcu_assign_pointer(reg->kaddr, kaddr); + + if (!reg->kaddr) { + pr_warn("Failed to map stolen time data structure\n"); + return -ENOMEM; + } + + if (le32_to_cpu(kaddr->revision) != 0 || + le32_to_cpu(kaddr->attributes) != 0) { + pr_warn_once("Unexpected revision or attributes in stolen time data\n"); + return -ENXIO; + } + + return 0; +} + +static int __init pv_time_init_stolen_time(void) +{ + int ret; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "hypervisor/arm/pvtime:online", + stolen_time_cpu_online, + stolen_time_cpu_down_prepare); + if (ret < 0) + return ret; + return 0; +} + +static bool __init has_pv_steal_clock(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_HV_PV_TIME_FEATURES, &res); + + if (res.a0 != SMCCC_RET_SUCCESS) + return false; + + arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_FEATURES, + ARM_SMCCC_HV_PV_TIME_ST, &res); + + return (res.a0 == SMCCC_RET_SUCCESS); +} + +int __init pv_time_init(void) +{ + int ret; + + if (!has_pv_steal_clock()) + return 0; + + ret = pv_time_init_stolen_time(); + if (ret) + return ret; + + static_call_update(pv_steal_clock, para_steal_clock); + + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); + + pr_info("using stolen time PV\n"); + + return 0; +} diff --git a/arch/arm64/kernel/patch-scs.c b/arch/arm64/kernel/patch-scs.c new file mode 100644 index 0000000000..a1fe4b4ff5 --- /dev/null +++ b/arch/arm64/kernel/patch-scs.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 - Google LLC + * Author: Ard Biesheuvel + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +// +// This minimal DWARF CFI parser is partially based on the code in +// arch/arc/kernel/unwind.c, and on the document below: +// https://refspecs.linuxbase.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html +// + +#define DW_CFA_nop 0x00 +#define DW_CFA_set_loc 0x01 +#define DW_CFA_advance_loc1 0x02 +#define DW_CFA_advance_loc2 0x03 +#define DW_CFA_advance_loc4 0x04 +#define DW_CFA_offset_extended 0x05 +#define DW_CFA_restore_extended 0x06 +#define DW_CFA_undefined 0x07 +#define DW_CFA_same_value 0x08 +#define DW_CFA_register 0x09 +#define DW_CFA_remember_state 0x0a +#define DW_CFA_restore_state 0x0b +#define DW_CFA_def_cfa 0x0c +#define DW_CFA_def_cfa_register 0x0d +#define DW_CFA_def_cfa_offset 0x0e +#define DW_CFA_def_cfa_expression 0x0f +#define DW_CFA_expression 0x10 +#define DW_CFA_offset_extended_sf 0x11 +#define DW_CFA_def_cfa_sf 0x12 +#define DW_CFA_def_cfa_offset_sf 0x13 +#define DW_CFA_val_offset 0x14 +#define DW_CFA_val_offset_sf 0x15 +#define DW_CFA_val_expression 0x16 +#define DW_CFA_lo_user 0x1c +#define DW_CFA_negate_ra_state 0x2d +#define DW_CFA_GNU_args_size 0x2e +#define DW_CFA_GNU_negative_offset_extended 0x2f +#define DW_CFA_hi_user 0x3f + +extern const u8 __eh_frame_start[], __eh_frame_end[]; + +enum { + PACIASP = 0xd503233f, + AUTIASP = 0xd50323bf, + SCS_PUSH = 0xf800865e, + SCS_POP = 0xf85f8e5e, +}; + +static void __always_inline scs_patch_loc(u64 loc) +{ + u32 insn = le32_to_cpup((void *)loc); + + switch (insn) { + case PACIASP: + *(u32 *)loc = cpu_to_le32(SCS_PUSH); + break; + case AUTIASP: + *(u32 *)loc = cpu_to_le32(SCS_POP); + break; + default: + /* + * While the DW_CFA_negate_ra_state directive is guaranteed to + * appear right after a PACIASP/AUTIASP instruction, it may + * also appear after a DW_CFA_restore_state directive that + * restores a state that is only partially accurate, and is + * followed by DW_CFA_negate_ra_state directive to toggle the + * PAC bit again. So we permit other instructions here, and ignore + * them. + */ + return; + } + dcache_clean_pou(loc, loc + sizeof(u32)); +} + +/* + * Skip one uleb128/sleb128 encoded quantity from the opcode stream. All bytes + * except the last one have bit #7 set. + */ +static int __always_inline skip_xleb128(const u8 **opcode, int size) +{ + u8 c; + + do { + c = *(*opcode)++; + size--; + } while (c & BIT(7)); + + return size; +} + +struct eh_frame { + /* + * The size of this frame if 0 < size < U32_MAX, 0 terminates the list. + */ + u32 size; + + /* + * The first frame is a Common Information Entry (CIE) frame, followed + * by one or more Frame Description Entry (FDE) frames. In the former + * case, this field is 0, otherwise it is the negated offset relative + * to the associated CIE frame. + */ + u32 cie_id_or_pointer; + + union { + struct { // CIE + u8 version; + u8 augmentation_string[]; + }; + + struct { // FDE + s32 initial_loc; + s32 range; + u8 opcodes[]; + }; + }; +}; + +static int noinstr scs_handle_fde_frame(const struct eh_frame *frame, + bool fde_has_augmentation_data, + int code_alignment_factor, + bool dry_run) +{ + int size = frame->size - offsetof(struct eh_frame, opcodes) + 4; + u64 loc = (u64)offset_to_ptr(&frame->initial_loc); + const u8 *opcode = frame->opcodes; + + if (fde_has_augmentation_data) { + int l; + + // assume single byte uleb128_t + if (WARN_ON(*opcode & BIT(7))) + return -ENOEXEC; + + l = *opcode++; + opcode += l; + size -= l + 1; + } + + /* + * Starting from 'loc', apply the CFA opcodes that advance the location + * pointer, and identify the locations of the PAC instructions. + */ + while (size-- > 0) { + switch (*opcode++) { + case DW_CFA_nop: + case DW_CFA_remember_state: + case DW_CFA_restore_state: + break; + + case DW_CFA_advance_loc1: + loc += *opcode++ * code_alignment_factor; + size--; + break; + + case DW_CFA_advance_loc2: + loc += *opcode++ * code_alignment_factor; + loc += (*opcode++ << 8) * code_alignment_factor; + size -= 2; + break; + + case DW_CFA_def_cfa: + case DW_CFA_offset_extended: + size = skip_xleb128(&opcode, size); + fallthrough; + case DW_CFA_def_cfa_offset: + case DW_CFA_def_cfa_offset_sf: + case DW_CFA_def_cfa_register: + case DW_CFA_same_value: + case DW_CFA_restore_extended: + case 0x80 ... 0xbf: + size = skip_xleb128(&opcode, size); + break; + + case DW_CFA_negate_ra_state: + if (!dry_run) + scs_patch_loc(loc - 4); + break; + + case 0x40 ... 0x7f: + // advance loc + loc += (opcode[-1] & 0x3f) * code_alignment_factor; + break; + + case 0xc0 ... 0xff: + break; + + default: + pr_err("unhandled opcode: %02x in FDE frame %lx\n", opcode[-1], (uintptr_t)frame); + return -ENOEXEC; + } + } + return 0; +} + +int noinstr scs_patch(const u8 eh_frame[], int size) +{ + const u8 *p = eh_frame; + + while (size > 4) { + const struct eh_frame *frame = (const void *)p; + bool fde_has_augmentation_data = true; + int code_alignment_factor = 1; + int ret; + + if (frame->size == 0 || + frame->size == U32_MAX || + frame->size > size) + break; + + if (frame->cie_id_or_pointer == 0) { + const u8 *p = frame->augmentation_string; + + /* a 'z' in the augmentation string must come first */ + fde_has_augmentation_data = *p == 'z'; + + /* + * The code alignment factor is a uleb128 encoded field + * but given that the only sensible values are 1 or 4, + * there is no point in decoding the whole thing. + */ + p += strlen(p) + 1; + if (!WARN_ON(*p & BIT(7))) + code_alignment_factor = *p; + } else { + ret = scs_handle_fde_frame(frame, + fde_has_augmentation_data, + code_alignment_factor, + true); + if (ret) + return ret; + scs_handle_fde_frame(frame, fde_has_augmentation_data, + code_alignment_factor, false); + } + + p += sizeof(frame->size) + frame->size; + size -= sizeof(frame->size) + frame->size; + } + return 0; +} + +asmlinkage void __init scs_patch_vmlinux(void) +{ + if (!should_patch_pac_into_scs()) + return; + + WARN_ON(scs_patch(__eh_frame_start, __eh_frame_end - __eh_frame_start)); + icache_inval_all_pou(); + isb(); +} diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c new file mode 100644 index 0000000000..b4835f6d59 --- /dev/null +++ b/arch/arm64/kernel/patching.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static DEFINE_RAW_SPINLOCK(patch_lock); + +static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + +static void __kprobes *patch_map(void *addr, int fixmap) +{ + unsigned long uintaddr = (uintptr_t) addr; + bool image = is_image_text(uintaddr); + struct page *page; + + if (image) + page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); + else + return addr; + + BUG_ON(!page); + return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap) +{ + clear_fixmap(fixmap); +} +/* + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always + * little-endian. + */ +int __kprobes aarch64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + unsigned long flags = 0; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes aarch64_insn_write(void *addr, u32 insn) +{ + return __aarch64_insn_write(addr, cpu_to_le32(insn)); +} + +noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val) +{ + u64 *waddr; + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &val, sizeof(val)); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) +{ + u32 *tp = addr; + int ret; + + /* A64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + + ret = aarch64_insn_write(tp, insn); + if (ret == 0) + caches_clean_inval_pou((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); + + return ret; +} + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +static int __kprobes aarch64_insn_patch_text_cb(void *arg) +{ + int i, ret = 0; + struct aarch64_insn_patch *pp = arg; + + /* The last CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) { + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], + pp->new_insns[i]); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); + } else { + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) + cpu_relax(); + isb(); + } + + return ret; +} + +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) +{ + struct aarch64_insn_patch patch = { + .text_addrs = addrs, + .new_insns = insns, + .insn_cnt = cnt, + .cpu_count = ATOMIC_INIT(0), + }; + + if (cnt <= 0) + return -EINVAL; + + return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); +} diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c new file mode 100644 index 0000000000..f872c57e99 --- /dev/null +++ b/arch/arm64/kernel/pci.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Code borrowed from powerpc/kernel/pci-common.c + * + * Copyright (C) 2003 Anton Blanchard , IBM + * Copyright (C) 2014 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_ACPI +/* + * Try to assign the IRQ number when probing a new device + */ +int pcibios_alloc_irq(struct pci_dev *dev) +{ + if (!acpi_disabled) + acpi_pci_irq_enable(dev); + + return 0; +} +#endif + +/* + * raw_pci_read/write - Platform-specific PCI config space access. + */ +int raw_pci_read(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 *val) +{ + struct pci_bus *b = pci_find_bus(domain, bus); + + if (!b) + return PCIBIOS_DEVICE_NOT_FOUND; + return b->ops->read(b, devfn, reg, len, val); +} + +int raw_pci_write(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 val) +{ + struct pci_bus *b = pci_find_bus(domain, bus); + + if (!b) + return PCIBIOS_DEVICE_NOT_FOUND; + return b->ops->write(b, devfn, reg, len, val); +} + +#ifdef CONFIG_NUMA + +int pcibus_to_node(struct pci_bus *bus) +{ + return dev_to_node(&bus->dev); +} +EXPORT_SYMBOL(pcibus_to_node); + +#endif + +#ifdef CONFIG_ACPI + +struct acpi_pci_generic_root_info { + struct acpi_pci_root_info common; + struct pci_config_window *cfg; /* config space mapping */ +}; + +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ + struct pci_config_window *cfg = bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_pci_root *root = acpi_driver_data(adev); + + return root->segment; +} + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_config_window *cfg; + struct acpi_device *adev; + struct device *bus_dev; + + if (acpi_disabled) + return 0; + + cfg = bridge->bus->sysdata; + + /* + * On Hyper-V there is no corresponding ACPI device for a root bridge, + * therefore ->parent is set as NULL by the driver. And set 'adev' as + * NULL in this case because there is no proper ACPI device. + */ + if (!cfg->parent) + adev = NULL; + else + adev = to_acpi_device(cfg->parent); + + bus_dev = &bridge->bus->dev; + + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); + + return 0; +} + +static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) +{ + struct resource_entry *entry, *tmp; + int status; + + status = acpi_pci_probe_root_resources(ci); + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + if (!(entry->res->flags & IORESOURCE_WINDOW)) + resource_list_destroy_entry(entry); + } + return status; +} + +/* + * Lookup the bus range for the domain in MCFG, and set up config space + * mapping. + */ +static struct pci_config_window * +pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +{ + struct device *dev = &root->device->dev; + struct resource *bus_res = &root->secondary; + u16 seg = root->segment; + const struct pci_ecam_ops *ecam_ops; + struct resource cfgres; + struct acpi_device *adev; + struct pci_config_window *cfg; + int ret; + + ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); + if (ret) { + dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); + return NULL; + } + + adev = acpi_resource_consumer(&cfgres); + if (adev) + dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, + dev_name(&adev->dev)); + else + dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", + &cfgres); + + cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); + if (IS_ERR(cfg)) { + dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, + PTR_ERR(cfg)); + return NULL; + } + + return cfg; +} + +/* release_info: free resources allocated by init_info */ +static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) +{ + struct acpi_pci_generic_root_info *ri; + + ri = container_of(ci, struct acpi_pci_generic_root_info, common); + pci_ecam_free(ri->cfg); + kfree(ci->ops); + kfree(ri); +} + +/* Interface called from ACPI code to setup PCI host controller */ +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct acpi_pci_generic_root_info *ri; + struct pci_bus *bus, *child; + struct acpi_pci_root_ops *root_ops; + struct pci_host_bridge *host; + + ri = kzalloc(sizeof(*ri), GFP_KERNEL); + if (!ri) + return NULL; + + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); + if (!root_ops) { + kfree(ri); + return NULL; + } + + ri->cfg = pci_acpi_setup_ecam_mapping(root); + if (!ri->cfg) { + kfree(ri); + kfree(root_ops); + return NULL; + } + + root_ops->release_info = pci_acpi_generic_release_info; + root_ops->prepare_resources = pci_acpi_root_prepare_resources; + root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; + bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); + if (!bus) + return NULL; + + /* If we must preserve the resource configuration, claim now */ + host = pci_find_host_bridge(bus); + if (host->preserve_config) + pci_bus_claim_resources(bus); + + /* + * Assign whatever was left unassigned. If we didn't claim above, + * this will reassign everything. + */ + pci_assign_unassigned_root_bus_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + return bus; +} + +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} + +#endif diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c new file mode 100644 index 0000000000..6d157f3218 --- /dev/null +++ b/arch/arm64/kernel/perf_callchain.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arm64 callchain support + * + * Copyright (C) 2015 ARM Limited + */ +#include +#include +#include + +#include + +struct frame_tail { + struct frame_tail __user *fp; + unsigned long lr; +} __attribute__((packed)); + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static struct frame_tail __user * +user_backtrace(struct frame_tail __user *tail, + struct perf_callchain_entry_ctx *entry) +{ + struct frame_tail buftail; + unsigned long err; + unsigned long lr; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(tail, sizeof(buftail))) + return NULL; + + pagefault_disable(); + err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); + pagefault_enable(); + + if (err) + return NULL; + + lr = ptrauth_strip_user_insn_pac(buftail.lr); + + perf_callchain_store(entry, lr); + + /* + * Frame pointers should strictly progress back up the stack + * (towards higher addresses). + */ + if (tail >= buftail.fp) + return NULL; + + return buftail.fp; +} + +#ifdef CONFIG_COMPAT +/* + * The registers we're interested in are at the end of the variable + * length saved register structure. The fp points at the end of this + * structure so the address of this struct is: + * (struct compat_frame_tail *)(xxx->fp)-1 + * + * This code has been adapted from the ARM OProfile support. + */ +struct compat_frame_tail { + compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */ + u32 sp; + u32 lr; +} __attribute__((packed)); + +static struct compat_frame_tail __user * +compat_user_backtrace(struct compat_frame_tail __user *tail, + struct perf_callchain_entry_ctx *entry) +{ + struct compat_frame_tail buftail; + unsigned long err; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(tail, sizeof(buftail))) + return NULL; + + pagefault_disable(); + err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); + pagefault_enable(); + + if (err) + return NULL; + + perf_callchain_store(entry, buftail.lr); + + /* + * Frame pointers should strictly progress back up the stack + * (towards higher addresses). + */ + if (tail + 1 >= (struct compat_frame_tail __user *) + compat_ptr(buftail.fp)) + return NULL; + + return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; +} +#endif /* CONFIG_COMPAT */ + +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + if (perf_guest_state()) { + /* We don't support guest os callchain now */ + return; + } + + perf_callchain_store(entry, regs->pc); + + if (!compat_user_mode(regs)) { + /* AARCH64 mode */ + struct frame_tail __user *tail; + + tail = (struct frame_tail __user *)regs->regs[29]; + + while (entry->nr < entry->max_stack && + tail && !((unsigned long)tail & 0x7)) + tail = user_backtrace(tail, entry); + } else { +#ifdef CONFIG_COMPAT + /* AARCH32 compat mode */ + struct compat_frame_tail __user *tail; + + tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; + + while ((entry->nr < entry->max_stack) && + tail && !((unsigned long)tail & 0x3)) + tail = compat_user_backtrace(tail, entry); +#endif + } +} + +static bool callchain_trace(void *data, unsigned long pc) +{ + struct perf_callchain_entry_ctx *entry = data; + return perf_callchain_store(entry, pc) == 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + if (perf_guest_state()) { + /* We don't support guest os callchain now */ + return; + } + + arch_stack_walk(callchain_trace, entry, current, regs); +} + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_state()) + return perf_guest_get_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + unsigned int guest_state = perf_guest_state(); + int misc = 0; + + if (guest_state) { + if (guest_state & PERF_GUEST_USER) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c new file mode 100644 index 0000000000..b4eece3eb1 --- /dev/null +++ b/arch/arm64/kernel/perf_regs.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#include +#include + +static u64 perf_ext_regs_value(int idx) +{ + switch (idx) { + case PERF_REG_ARM64_VG: + if (WARN_ON_ONCE(!system_supports_sve())) + return 0; + + /* + * Vector granule is current length in bits of SVE registers + * divided by 64. + */ + return (task_get_sve_vl(current) * 8) / 64; + default: + WARN_ON_ONCE(true); + return 0; + } +} + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_EXTENDED_MAX)) + return 0; + + /* + * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but + * we're stuck with it for ABI compatibility reasons. + * + * For a 32-bit consumer inspecting a 32-bit task, then it will look at + * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). + * These correspond directly to a prefix of the registers saved in our + * 'struct pt_regs', with the exception of the PC, so we copy that down + * (x15 corresponds to SP_hyp in the architecture). + * + * So far, so good. + * + * The oddity arises when a 64-bit consumer looks at a 32-bit task and + * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return + * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and + * PC registers would normally live. The initial idea was to allow a + * 64-bit unwinder to unwind a 32-bit task and, although it's not clear + * how well that works in practice, somebody might be relying on it. + * + * At the time we make a sample, we don't know whether the consumer is + * 32-bit or 64-bit, so we have to cater for both possibilities. + */ + if (compat_user_mode(regs)) { + if ((u32)idx == PERF_REG_ARM64_SP) + return regs->compat_sp; + if ((u32)idx == PERF_REG_ARM64_LR) + return regs->compat_lr; + if (idx == 15) + return regs->pc; + } + + if ((u32)idx == PERF_REG_ARM64_SP) + return regs->sp; + + if ((u32)idx == PERF_REG_ARM64_PC) + return regs->pc; + + if ((u32)idx >= PERF_REG_ARM64_MAX) + return perf_ext_regs_value(idx); + + return regs->regs[idx]; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + u64 reserved_mask = REG_RESERVED; + + if (system_supports_sve()) + reserved_mask &= ~(1ULL << PERF_REG_ARM64_VG); + + if (!mask || mask & reserved_mask) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + if (is_compat_thread(task_thread_info(task))) + return PERF_SAMPLE_REGS_ABI_32; + else + return PERF_SAMPLE_REGS_ABI_64; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile new file mode 100644 index 0000000000..4c0ea3cd4e --- /dev/null +++ b/arch/arm64/kernel/pi/Makefile @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC + +KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \ + -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \ + $(call cc-option,-mbranch-protection=none) \ + -I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \ + -include $(srctree)/include/linux/hidden.h \ + -D__DISABLE_EXPORTS -ffreestanding -D__NO_FORTIFY \ + -fno-asynchronous-unwind-tables -fno-unwind-tables \ + $(call cc-option,-fno-addrsig) + +# remove SCS flags from all objects in this directory +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) +# disable LTO +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS)) + +GCOV_PROFILE := n +KASAN_SANITIZE := n +KCSAN_SANITIZE := n +UBSAN_SANITIZE := n +KCOV_INSTRUMENT := n + +$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \ + --remove-section=.note.gnu.property \ + --prefix-alloc-sections=.init +$(obj)/%.pi.o: $(obj)/%.o FORCE + $(call if_changed,objcopy) + +$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE + $(call if_changed_rule,cc_o_c) + +obj-y := kaslr_early.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o +extra-y := $(patsubst %.pi.o,%.o,$(obj-y)) diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c new file mode 100644 index 0000000000..17bff6e399 --- /dev/null +++ b/arch/arm64/kernel/pi/kaslr_early.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2022 Google LLC +// Author: Ard Biesheuvel + +// NOTE: code in this file runs *very* early, and is not permitted to use +// global variables or anything that relies on absolute addressing. + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* taken from lib/string.c */ +static char *__strstr(const char *s1, const char *s2) +{ + size_t l1, l2; + + l2 = strlen(s2); + if (!l2) + return (char *)s1; + l1 = strlen(s1); + while (l1 >= l2) { + l1--; + if (!memcmp(s1, s2, l2)) + return (char *)s1; + s1++; + } + return NULL; +} +static bool cmdline_contains_nokaslr(const u8 *cmdline) +{ + const u8 *str; + + str = __strstr(cmdline, "nokaslr"); + return str == cmdline || (str > cmdline && *(str - 1) == ' '); +} + +static bool is_kaslr_disabled_cmdline(void *fdt) +{ + if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { + int node; + const u8 *prop; + + node = fdt_path_offset(fdt, "/chosen"); + if (node < 0) + goto out; + + prop = fdt_getprop(fdt, node, "bootargs", NULL); + if (!prop) + goto out; + + if (cmdline_contains_nokaslr(prop)) + return true; + + if (IS_ENABLED(CONFIG_CMDLINE_EXTEND)) + goto out; + + return false; + } +out: + return cmdline_contains_nokaslr(CONFIG_CMDLINE); +} + +static u64 get_kaslr_seed(void *fdt) +{ + int node, len; + fdt64_t *prop; + u64 ret; + + node = fdt_path_offset(fdt, "/chosen"); + if (node < 0) + return 0; + + prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); + if (!prop || len != sizeof(u64)) + return 0; + + ret = fdt64_to_cpu(*prop); + *prop = 0; + return ret; +} + +asmlinkage u64 kaslr_early_init(void *fdt) +{ + u64 seed; + + if (is_kaslr_disabled_cmdline(fdt)) + return 0; + + seed = get_kaslr_seed(fdt); + if (!seed) { + if (!__early_cpu_has_rndr() || + !__arm64_rndr((unsigned long *)&seed)) + return 0; + } + + /* + * OK, so we are proceeding with KASLR enabled. Calculate a suitable + * kernel image offset from the seed. Let's place the kernel in the + * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of + * the lower and upper quarters to avoid colliding with other + * allocations. + */ + return BIT(VA_BITS_MIN - 3) + (seed & GENMASK(VA_BITS_MIN - 3, 0)); +} diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c new file mode 100644 index 0000000000..2708b620b4 --- /dev/null +++ b/arch/arm64/kernel/pointer_auth.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include + +int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) +{ + struct ptrauth_keys_user *keys = &tsk->thread.keys_user; + unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY | + PR_PAC_APDAKEY | PR_PAC_APDBKEY; + unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY; + + if (!system_supports_address_auth() && !system_supports_generic_auth()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + + if (!arg) { + ptrauth_keys_init_user(keys); + return 0; + } + + if (arg & ~key_mask) + return -EINVAL; + + if (((arg & addr_key_mask) && !system_supports_address_auth()) || + ((arg & PR_PAC_APGAKEY) && !system_supports_generic_auth())) + return -EINVAL; + + if (arg & PR_PAC_APIAKEY) + get_random_bytes(&keys->apia, sizeof(keys->apia)); + if (arg & PR_PAC_APIBKEY) + get_random_bytes(&keys->apib, sizeof(keys->apib)); + if (arg & PR_PAC_APDAKEY) + get_random_bytes(&keys->apda, sizeof(keys->apda)); + if (arg & PR_PAC_APDBKEY) + get_random_bytes(&keys->apdb, sizeof(keys->apdb)); + if (arg & PR_PAC_APGAKEY) + get_random_bytes(&keys->apga, sizeof(keys->apga)); + ptrauth_keys_install_user(keys); + + return 0; +} + +static u64 arg_to_enxx_mask(unsigned long arg) +{ + u64 sctlr_enxx_mask = 0; + + WARN_ON(arg & ~PR_PAC_ENABLED_KEYS_MASK); + if (arg & PR_PAC_APIAKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENIA; + if (arg & PR_PAC_APIBKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENIB; + if (arg & PR_PAC_APDAKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENDA; + if (arg & PR_PAC_APDBKEY) + sctlr_enxx_mask |= SCTLR_ELx_ENDB; + return sctlr_enxx_mask; +} + +int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, + unsigned long enabled) +{ + u64 sctlr; + + if (!system_supports_address_auth()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + + if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys)) + return -EINVAL; + + preempt_disable(); + sctlr = tsk->thread.sctlr_user; + sctlr &= ~arg_to_enxx_mask(keys); + sctlr |= arg_to_enxx_mask(enabled); + tsk->thread.sctlr_user = sctlr; + if (tsk == current) + update_sctlr_el1(sctlr); + preempt_enable(); + + return 0; +} + +int ptrauth_get_enabled_keys(struct task_struct *tsk) +{ + int retval = 0; + + if (!system_supports_address_auth()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(tsk))) + return -EINVAL; + + if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA) + retval |= PR_PAC_APIAKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB) + retval |= PR_PAC_APIBKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA) + retval |= PR_PAC_APDAKEY; + if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB) + retval |= PR_PAC_APDBKEY; + + return retval; +} diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile new file mode 100644 index 0000000000..8e4be92e25 --- /dev/null +++ b/arch/arm64/kernel/probes/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ + kprobes_trampoline.o \ + simulate-insn.o +obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \ + simulate-insn.o diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c new file mode 100644 index 0000000000..968d5fffe2 --- /dev/null +++ b/arch/arm64/kernel/probes/decode-insn.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/arm64/kernel/probes/decode-insn.c + * + * Copyright (C) 2013 Linaro Limited. + */ + +#include +#include +#include +#include +#include +#include + +#include "decode-insn.h" +#include "simulate-insn.h" + +static bool __kprobes aarch64_insn_is_steppable(u32 insn) +{ + /* + * Branch instructions will write a new value into the PC which is + * likely to be relative to the XOL address and therefore invalid. + * Deliberate generation of an exception during stepping is also not + * currently safe. Lastly, MSR instructions can do any number of nasty + * things we can't handle during single-stepping. + */ + if (aarch64_insn_is_class_branch_sys(insn)) { + if (aarch64_insn_is_branch(insn) || + aarch64_insn_is_msr_imm(insn) || + aarch64_insn_is_msr_reg(insn) || + aarch64_insn_is_exception(insn) || + aarch64_insn_is_eret(insn) || + aarch64_insn_is_eret_auth(insn)) + return false; + + /* + * The MRS instruction may not return a correct value when + * executing in the single-stepping environment. We do make one + * exception, for reading the DAIF bits. + */ + if (aarch64_insn_is_mrs(insn)) + return aarch64_insn_extract_system_reg(insn) + != AARCH64_INSN_SPCLREG_DAIF; + + /* + * The HINT instruction is steppable only if it is in whitelist + * and the rest of other such instructions are blocked for + * single stepping as they may cause exception or other + * unintended behaviour. + */ + if (aarch64_insn_is_hint(insn)) + return aarch64_insn_is_steppable_hint(insn); + + return true; + } + + /* + * Instructions which load PC relative literals are not going to work + * when executed from an XOL slot. Instructions doing an exclusive + * load/store are not going to complete successfully when single-step + * exception handling happens in the middle of the sequence. + */ + if (aarch64_insn_uses_literal(insn) || + aarch64_insn_is_exclusive(insn)) + return false; + + return true; +} + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD If instruction is supported and uses instruction slot, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + */ +enum probe_insn __kprobes +arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api) +{ + /* + * Instructions reading or modifying the PC won't work from the XOL + * slot. + */ + if (aarch64_insn_is_steppable(insn)) + return INSN_GOOD; + + if (aarch64_insn_is_bcond(insn)) { + api->handler = simulate_b_cond; + } else if (aarch64_insn_is_cbz(insn) || + aarch64_insn_is_cbnz(insn)) { + api->handler = simulate_cbz_cbnz; + } else if (aarch64_insn_is_tbz(insn) || + aarch64_insn_is_tbnz(insn)) { + api->handler = simulate_tbz_tbnz; + } else if (aarch64_insn_is_adr_adrp(insn)) { + api->handler = simulate_adr_adrp; + } else if (aarch64_insn_is_b(insn) || + aarch64_insn_is_bl(insn)) { + api->handler = simulate_b_bl; + } else if (aarch64_insn_is_br(insn) || + aarch64_insn_is_blr(insn) || + aarch64_insn_is_ret(insn)) { + api->handler = simulate_br_blr_ret; + } else if (aarch64_insn_is_ldr_lit(insn)) { + api->handler = simulate_ldr_literal; + } else if (aarch64_insn_is_ldrsw_lit(insn)) { + api->handler = simulate_ldrsw_literal; + } else { + /* + * Instruction cannot be stepped out-of-line and we don't + * (yet) simulate it. + */ + return INSN_REJECTED; + } + + return INSN_GOOD_NO_SLOT; +} + +#ifdef CONFIG_KPROBES +static bool __kprobes +is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) +{ + while (scan_start >= scan_end) { + /* + * atomic region starts from exclusive load and ends with + * exclusive store. + */ + if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start))) + return false; + else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start))) + return true; + scan_start--; + } + + return false; +} + +enum probe_insn __kprobes +arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) +{ + enum probe_insn decoded; + probe_opcode_t insn = le32_to_cpu(*addr); + probe_opcode_t *scan_end = NULL; + unsigned long size = 0, offset = 0; + + /* + * If there's a symbol defined in front of and near enough to + * the probe address assume it is the entry point to this + * code and use it to further limit how far back we search + * when determining if we're in an atomic sequence. If we could + * not find any symbol skip the atomic test altogether as we + * could otherwise end up searching irrelevant text/literals. + * KPROBES depends on KALLSYMS so this last case should never + * happen. + */ + if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) { + if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t))) + scan_end = addr - (offset / sizeof(kprobe_opcode_t)); + else + scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; + } + decoded = arm_probe_decode_insn(insn, &asi->api); + + if (decoded != INSN_REJECTED && scan_end) + if (is_probed_address_atomic(addr - 1, scan_end)) + return INSN_REJECTED; + + return decoded; +} +#endif diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h new file mode 100644 index 0000000000..8b758c5a20 --- /dev/null +++ b/arch/arm64/kernel/probes/decode-insn.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * arch/arm64/kernel/probes/decode-insn.h + * + * Copyright (C) 2013 Linaro Limited. + */ + +#ifndef _ARM_KERNEL_KPROBES_ARM64_H +#define _ARM_KERNEL_KPROBES_ARM64_H + +#include + +/* + * ARM strongly recommends a limit of 128 bytes between LoadExcl and + * StoreExcl instructions in a single thread of execution. So keep the + * max atomic context size as 32. + */ +#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t)) + +enum probe_insn { + INSN_REJECTED, + INSN_GOOD_NO_SLOT, + INSN_GOOD, +}; + +#ifdef CONFIG_KPROBES +enum probe_insn __kprobes +arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi); +#endif +enum probe_insn __kprobes +arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi); + +#endif /* _ARM_KERNEL_KPROBES_ARM64_H */ diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c new file mode 100644 index 0000000000..70b91a8c6b --- /dev/null +++ b/arch/arm64/kernel/probes/kprobes.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/arm64/kernel/probes/kprobes.c + * + * Kprobes support for ARM64 + * + * Copyright (C) 2013 Linaro Limited. + * Author: Sandeepa Prabhu + */ + +#define pr_fmt(fmt) "kprobes: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "decode-insn.h" + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void __kprobes +post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); + +static void __kprobes arch_prepare_ss_slot(struct kprobe *p) +{ + kprobe_opcode_t *addr = p->ainsn.api.insn; + + /* + * Prepare insn slot, Mark Rutland points out it depends on a coupe of + * subtleties: + * + * - That the I-cache maintenance for these instructions is complete + * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync() + * ensures this, but just omits causing a Context-Synchronization-Event + * on all CPUS). + * + * - That the kprobe BRK results in an exception (and consequently a + * Context-Synchronoization-Event), which ensures that the CPU will + * fetch thesingle-step slot instructions *after* this, ensuring that + * the new instructions are used + * + * It supposes to place ISB after patching to guarantee I-cache maintenance + * is observed on all CPUS, however, single-step slot is installed in + * the BRK exception handler, so it is unnecessary to generate + * Contex-Synchronization-Event via ISB again. + */ + aarch64_insn_patch_text_nosync(addr, p->opcode); + aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS); + + /* + * Needs restoring of return address after stepping xol. + */ + p->ainsn.api.restore = (unsigned long) p->addr + + sizeof(kprobe_opcode_t); +} + +static void __kprobes arch_prepare_simulate(struct kprobe *p) +{ + /* This instructions is not executed xol. No need to adjust the PC */ + p->ainsn.api.restore = 0; +} + +static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (p->ainsn.api.handler) + p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); + + /* single step simulated, now go for post processing */ + post_kprobe_handler(p, kcb, regs); +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x3) + return -EINVAL; + + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + if (search_exception_tables(probe_addr)) + return -EINVAL; + + /* decode instruction */ + switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { + case INSN_REJECTED: /* insn not supported */ + return -EINVAL; + + case INSN_GOOD_NO_SLOT: /* insn need simulation */ + p->ainsn.api.insn = NULL; + break; + + case INSN_GOOD: /* instruction uses slot */ + p->ainsn.api.insn = get_insn_slot(); + if (!p->ainsn.api.insn) + return -ENOMEM; + break; + } + + /* prepare the instruction */ + if (p->ainsn.api.insn) + arch_prepare_ss_slot(p); + else + arch_prepare_simulate(p); + + return 0; +} + +void *alloc_insn_page(void) +{ + return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); +} + +/* arm kprobe: install breakpoint in text */ +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + void *addr = p->addr; + u32 insn = BRK64_OPCODE_KPROBES; + + aarch64_insn_patch_text(&addr, &insn, 1); +} + +/* disarm kprobe: remove breakpoint from text */ +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + void *addr = p->addr; + + aarch64_insn_patch_text(&addr, &p->opcode, 1); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.api.insn) { + free_insn_slot(p->ainsn.api.insn, 0); + p->ainsn.api.insn = NULL; + } +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * Mask all of DAIF while executing the instruction out-of-line, to keep things + * simple and avoid nesting exceptions. Interrupts do have to be disabled since + * the kprobe state is per-CPU and doesn't get migrated. + */ +static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + kcb->saved_irqflag = regs->pstate & DAIF_MASK; + regs->pstate |= DAIF_MASK; +} + +static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + regs->pstate &= ~DAIF_MASK; + regs->pstate |= kcb->saved_irqflag; +} + +static void __kprobes setup_singlestep(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + unsigned long slot; + + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + + if (p->ainsn.api.insn) { + /* prepare for single stepping */ + slot = (unsigned long)p->ainsn.api.insn; + + kprobes_save_local_irqflag(kcb, regs); + instruction_pointer_set(regs, slot); + } else { + /* insn simulation */ + arch_simulate_insn(p, regs); + } +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Failed to recover from reentered kprobes.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + + return 1; +} + +static void __kprobes +post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + /* return addr restore if non-branching insn */ + if (cur->ainsn.api.restore != 0) + instruction_pointer_set(regs, cur->ainsn.api.restore); + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return; + } + /* call post handler */ + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (cur->post_handler) + cur->post_handler(cur, regs, 0); + + reset_current_kprobe(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + instruction_pointer_set(regs, (unsigned long) cur->addr); + BUG_ON(!instruction_pointer(regs)); + + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + } else { + kprobes_restore_local_irqflag(kcb, regs); + reset_current_kprobe(); + } + + break; + } + return 0; +} + +static int __kprobes +kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr) +{ + struct kprobe *p, *cur_kprobe; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + kcb = get_kprobe_ctlblk(); + cur_kprobe = kprobe_running(); + + p = get_kprobe((kprobe_opcode_t *) addr); + if (WARN_ON_ONCE(!p)) { + /* + * Something went wrong. This BRK used an immediate reserved + * for kprobes, but we couldn't find any corresponding probe. + */ + return DBG_HOOK_ERROR; + } + + if (cur_kprobe) { + /* Hit a kprobe inside another kprobe */ + if (!reenter_kprobe(p, regs, kcb)) + return DBG_HOOK_ERROR; + } else { + /* Probe hit */ + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and not need to single-step + * Let's just reset current kprobe and exit. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + } + + return DBG_HOOK_HANDLED; +} + +static struct break_hook kprobes_break_hook = { + .imm = KPROBES_BRK_IMM, + .fn = kprobe_breakpoint_handler, +}; + +static int __kprobes +kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long addr = instruction_pointer(regs); + struct kprobe *cur = kprobe_running(); + + if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && + ((unsigned long)&cur->ainsn.api.insn[1] == addr)) { + kprobes_restore_local_irqflag(kcb, regs); + post_kprobe_handler(cur, kcb, regs); + + return DBG_HOOK_HANDLED; + } + + /* not ours, kprobes should ignore it */ + return DBG_HOOK_ERROR; +} + +static struct break_hook kprobes_break_ss_hook = { + .imm = KPROBES_BRK_SS_IMM, + .fn = kprobe_breakpoint_ss_handler, +}; + +/* + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). + */ +int __init arch_populate_kprobe_blacklist(void) +{ + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start, + (unsigned long)__entry_text_end); + if (ret) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + if (ret) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start, + (unsigned long)__hyp_text_end); + if (ret || is_kernel_in_hyp_mode()) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start, + (unsigned long)__hyp_idmap_text_end); + return ret; +} + +void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) +{ + return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; + ri->fp = (void *)regs->regs[29]; + + /* replace return addr (x30) with trampoline */ + regs->regs[30] = (long)&__kretprobe_trampoline; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + +int __init arch_init_kprobes(void) +{ + register_kernel_break_hook(&kprobes_break_hook); + register_kernel_break_hook(&kprobes_break_ss_hook); + + return 0; +} diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S new file mode 100644 index 0000000000..9a6499bed5 --- /dev/null +++ b/arch/arm64/kernel/probes/kprobes_trampoline.S @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * trampoline entry and return code for kretprobes. + */ + +#include +#include +#include + + .text + + .macro save_all_base_regs + stp x0, x1, [sp, #S_X0] + stp x2, x3, [sp, #S_X2] + stp x4, x5, [sp, #S_X4] + stp x6, x7, [sp, #S_X6] + stp x8, x9, [sp, #S_X8] + stp x10, x11, [sp, #S_X10] + stp x12, x13, [sp, #S_X12] + stp x14, x15, [sp, #S_X14] + stp x16, x17, [sp, #S_X16] + stp x18, x19, [sp, #S_X18] + stp x20, x21, [sp, #S_X20] + stp x22, x23, [sp, #S_X22] + stp x24, x25, [sp, #S_X24] + stp x26, x27, [sp, #S_X26] + stp x28, x29, [sp, #S_X28] + add x0, sp, #PT_REGS_SIZE + stp lr, x0, [sp, #S_LR] + /* + * Construct a useful saved PSTATE + */ + mrs x0, nzcv + mrs x1, daif + orr x0, x0, x1 + mrs x1, CurrentEL + orr x0, x0, x1 + mrs x1, SPSel + orr x0, x0, x1 + stp xzr, x0, [sp, #S_PC] + .endm + + .macro restore_all_base_regs + ldr x0, [sp, #S_PSTATE] + and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) + msr nzcv, x0 + ldp x0, x1, [sp, #S_X0] + ldp x2, x3, [sp, #S_X2] + ldp x4, x5, [sp, #S_X4] + ldp x6, x7, [sp, #S_X6] + ldp x8, x9, [sp, #S_X8] + ldp x10, x11, [sp, #S_X10] + ldp x12, x13, [sp, #S_X12] + ldp x14, x15, [sp, #S_X14] + ldp x16, x17, [sp, #S_X16] + ldp x18, x19, [sp, #S_X18] + ldp x20, x21, [sp, #S_X20] + ldp x22, x23, [sp, #S_X22] + ldp x24, x25, [sp, #S_X24] + ldp x26, x27, [sp, #S_X26] + ldp x28, x29, [sp, #S_X28] + .endm + +SYM_CODE_START(__kretprobe_trampoline) + sub sp, sp, #PT_REGS_SIZE + + save_all_base_regs + + /* Setup a frame pointer. */ + add x29, sp, #S_FP + + mov x0, sp + bl trampoline_probe_handler + /* + * Replace trampoline address in lr with actual orig_ret_addr return + * address. + */ + mov lr, x0 + + /* The frame pointer (x29) is restored with other registers. */ + restore_all_base_regs + + add sp, sp, #PT_REGS_SIZE + ret + +SYM_CODE_END(__kretprobe_trampoline) diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c new file mode 100644 index 0000000000..22d0b32524 --- /dev/null +++ b/arch/arm64/kernel/probes/simulate-insn.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/arm64/kernel/probes/simulate-insn.c + * + * Copyright (C) 2013 Linaro Limited. + */ + +#include +#include +#include + +#include +#include + +#include "simulate-insn.h" + +#define bbl_displacement(insn) \ + sign_extend32(((insn) & 0x3ffffff) << 2, 27) + +#define bcond_displacement(insn) \ + sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20) + +#define cbz_displacement(insn) \ + sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20) + +#define tbz_displacement(insn) \ + sign_extend32(((insn >> 5) & 0x3fff) << 2, 15) + +#define ldr_displacement(insn) \ + sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20) + +static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val) +{ + pt_regs_write_reg(regs, reg, val); +} + +static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val) +{ + pt_regs_write_reg(regs, reg, lower_32_bits(val)); +} + +static inline u64 get_x_reg(struct pt_regs *regs, int reg) +{ + return pt_regs_read_reg(regs, reg); +} + +static inline u32 get_w_reg(struct pt_regs *regs, int reg) +{ + return lower_32_bits(pt_regs_read_reg(regs, reg)); +} + +static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + + return (opcode & (1 << 31)) ? + (get_x_reg(regs, xn) == 0) : (get_w_reg(regs, xn) == 0); +} + +static bool __kprobes check_cbnz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + + return (opcode & (1 << 31)) ? + (get_x_reg(regs, xn) != 0) : (get_w_reg(regs, xn) != 0); +} + +static bool __kprobes check_tbz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f); + + return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) == 0; +} + +static bool __kprobes check_tbnz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f); + + return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) != 0; +} + +/* + * instruction simulation functions + */ +void __kprobes +simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs) +{ + long imm, xn, val; + + xn = opcode & 0x1f; + imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3); + imm = sign_extend64(imm, 20); + if (opcode & 0x80000000) + val = (imm<<12) + (addr & 0xfffffffffffff000); + else + val = imm + addr; + + set_x_reg(regs, xn, val); + + instruction_pointer_set(regs, instruction_pointer(regs) + 4); +} + +void __kprobes +simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = bbl_displacement(opcode); + + /* Link register is x30 */ + if (opcode & (1 << 31)) + set_x_reg(regs, 30, addr + 4); + + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = 4; + + if (aarch32_opcode_cond_checks[opcode & 0xf](regs->pstate & 0xffffffff)) + disp = bcond_displacement(opcode); + + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs) +{ + int xn = (opcode >> 5) & 0x1f; + + /* update pc first in case we're doing a "blr lr" */ + instruction_pointer_set(regs, get_x_reg(regs, xn)); + + /* Link register is x30 */ + if (((opcode >> 21) & 0x3) == 1) + set_x_reg(regs, 30, addr + 4); +} + +void __kprobes +simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = 4; + + if (opcode & (1 << 24)) { + if (check_cbnz(opcode, regs)) + disp = cbz_displacement(opcode); + } else { + if (check_cbz(opcode, regs)) + disp = cbz_displacement(opcode); + } + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = 4; + + if (opcode & (1 << 24)) { + if (check_tbnz(opcode, regs)) + disp = tbz_displacement(opcode); + } else { + if (check_tbz(opcode, regs)) + disp = tbz_displacement(opcode); + } + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs) +{ + u64 *load_addr; + int xn = opcode & 0x1f; + int disp; + + disp = ldr_displacement(opcode); + load_addr = (u64 *) (addr + disp); + + if (opcode & (1 << 30)) /* x0-x30 */ + set_x_reg(regs, xn, *load_addr); + else /* w0-w30 */ + set_w_reg(regs, xn, *load_addr); + + instruction_pointer_set(regs, instruction_pointer(regs) + 4); +} + +void __kprobes +simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs) +{ + s32 *load_addr; + int xn = opcode & 0x1f; + int disp; + + disp = ldr_displacement(opcode); + load_addr = (s32 *) (addr + disp); + + set_x_reg(regs, xn, *load_addr); + + instruction_pointer_set(regs, instruction_pointer(regs) + 4); +} diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h new file mode 100644 index 0000000000..e065dc9221 --- /dev/null +++ b/arch/arm64/kernel/probes/simulate-insn.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * arch/arm64/kernel/probes/simulate-insn.h + * + * Copyright (C) 2013 Linaro Limited + */ + +#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H +#define _ARM_KERNEL_KPROBES_SIMULATE_INSN_H + +void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs); +void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs); +void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs); +void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs); +void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs); +void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs); +void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs); +void simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs); + +#endif /* _ARM_KERNEL_KPROBES_SIMULATE_INSN_H */ diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c new file mode 100644 index 0000000000..d49aef2657 --- /dev/null +++ b/arch/arm64/kernel/probes/uprobes.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2016 Pratyush Anand + */ +#include +#include +#include +#include + +#include "decode-insn.h" + +#define UPROBE_INV_FAULT_CODE UINT_MAX + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + void *xol_page_kaddr = kmap_atomic(page); + void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); + + /* Initialize the slot */ + memcpy(dst, src, len); + + /* flush caches (dcache/icache) */ + sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len); + + kunmap_atomic(xol_page_kaddr); +} + +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, + unsigned long addr) +{ + probe_opcode_t insn; + + /* TODO: Currently we do not support AARCH32 instruction probing */ + if (mm->context.flags & MMCF_AARCH32) + return -EOPNOTSUPP; + else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE)) + return -EINVAL; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + + switch (arm_probe_decode_insn(insn, &auprobe->api)) { + case INSN_REJECTED: + return -EINVAL; + + case INSN_GOOD_NO_SLOT: + auprobe->simulate = true; + break; + + default: + break; + } + + return 0; +} + +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Initialize with an invalid fault code to detect if ol insn trapped */ + current->thread.fault_code = UPROBE_INV_FAULT_CODE; + + /* Instruction points to execute ol */ + instruction_pointer_set(regs, utask->xol_vaddr); + + user_enable_single_step(current); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE); + + /* Instruction points to execute next to breakpoint address */ + instruction_pointer_set(regs, utask->vaddr + 4); + + user_disable_single_step(current); + + return 0; +} +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + /* + * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol + * insn itself is trapped, then detect the case with the help of + * invalid fault code which is being set in arch_uprobe_pre_xol + */ + if (t->thread.fault_code != UPROBE_INV_FAULT_CODE) + return true; + + return false; +} + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + probe_opcode_t insn; + unsigned long addr; + + if (!auprobe->simulate) + return false; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + addr = instruction_pointer(regs); + + if (auprobe->api.handler) + auprobe->api.handler(insn, addr, regs); + + return true; +} + +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* + * Task has received a fatal signal, so reset back to probbed + * address. + */ + instruction_pointer_set(regs, utask->vaddr); + + user_disable_single_step(current); +} + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + /* + * If a simple branch instruction (B) was called for retprobed + * assembly label then return true even when regs->sp and ret->stack + * are same. It will ensure that cleanup and reporting of return + * instances corresponding to callee label is done when + * handle_trampoline for called function is executed. + */ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->sp <= ret->stack; + else + return regs->sp < ret->stack; +} + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long orig_ret_vaddr; + + orig_ret_vaddr = procedure_link_pointer(regs); + /* Replace the return addr with trampoline addr */ + procedure_link_pointer_set(regs, trampoline_vaddr); + + return orig_ret_vaddr; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +static int uprobe_breakpoint_handler(struct pt_regs *regs, + unsigned long esr) +{ + if (uprobe_pre_sstep_notifier(regs)) + return DBG_HOOK_HANDLED; + + return DBG_HOOK_ERROR; +} + +static int uprobe_single_step_handler(struct pt_regs *regs, + unsigned long esr) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4)); + if (uprobe_post_sstep_notifier(regs)) + return DBG_HOOK_HANDLED; + + return DBG_HOOK_ERROR; +} + +/* uprobe breakpoint handler hook */ +static struct break_hook uprobes_break_hook = { + .imm = UPROBES_BRK_IMM, + .fn = uprobe_breakpoint_handler, +}; + +/* uprobe single step handler hook */ +static struct step_hook uprobes_step_hook = { + .fn = uprobe_single_step_handler, +}; + +static int __init arch_init_uprobes(void) +{ + register_user_break_hook(&uprobes_break_hook); + register_user_step_hook(&uprobes_step_hook); + + return 0; +} + +device_initcall(arch_init_uprobes); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c new file mode 100644 index 0000000000..0fcc4eb1a7 --- /dev/null +++ b/arch/arm64/kernel/process.c @@ -0,0 +1,760 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/process.c + * + * Original Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1996-2000 Russell King - Converted to ARM. + * Copyright (C) 2012 ARM Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) +#include +unsigned long __stack_chk_guard __ro_after_init; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + +/* + * Function pointers to optional machine specific functions + */ +void (*pm_power_off)(void); +EXPORT_SYMBOL_GPL(pm_power_off); + +#ifdef CONFIG_HOTPLUG_CPU +void __noreturn arch_cpu_idle_dead(void) +{ + cpu_die(); +} +#endif + +/* + * Called by kexec, immediately prior to machine_kexec(). + * + * This must completely disable all secondary CPUs; simply causing those CPUs + * to execute e.g. a RAM-based pin loop is not sufficient. This allows the + * kexec'd kernel to use any and all RAM as it sees fit, without having to + * avoid any code or data used by any SW CPU pin loop. The CPU hotplug + * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. + */ +void machine_shutdown(void) +{ + smp_shutdown_nonboot_cpus(reboot_cpu); +} + +/* + * Halting simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. + */ +void machine_halt(void) +{ + local_irq_disable(); + smp_send_stop(); + while (1); +} + +/* + * Power-off simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. When the system power is turned off, it will take all CPUs + * with it. + */ +void machine_power_off(void) +{ + local_irq_disable(); + smp_send_stop(); + do_kernel_power_off(); +} + +/* + * Restart requires that the secondary CPUs stop performing any activity + * while the primary CPU resets the system. Systems with multiple CPUs must + * provide a HW restart implementation, to ensure that all CPUs reset at once. + * This is required so that any code running after reset on the primary CPU + * doesn't have to co-ordinate with other CPUs to ensure they aren't still + * executing pre-reset code, and using RAM that the primary CPU's code wishes + * to use. Implementing such co-ordination would be essentially impossible. + */ +void machine_restart(char *cmd) +{ + /* Disable interrupts first */ + local_irq_disable(); + smp_send_stop(); + + /* + * UpdateCapsule() depends on the system being reset via + * ResetSystem(). + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_reboot(reboot_mode, NULL); + + /* Now call the architecture specific reboot code. */ + do_kernel_restart(cmd); + + /* + * Whoops - the architecture was unable to reboot. + */ + printk("Reboot failed -- System halted\n"); + while (1); +} + +#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str +static const char *const btypes[] = { + bstr(NONE, "--"), + bstr( JC, "jc"), + bstr( C, "-c"), + bstr( J , "j-") +}; +#undef bstr + +static void print_pstate(struct pt_regs *regs) +{ + u64 pstate = regs->pstate; + + if (compat_user_mode(regs)) { + printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n", + pstate, + pstate & PSR_AA32_N_BIT ? 'N' : 'n', + pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', + pstate & PSR_AA32_C_BIT ? 'C' : 'c', + pstate & PSR_AA32_V_BIT ? 'V' : 'v', + pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', + pstate & PSR_AA32_T_BIT ? "T32" : "A32", + pstate & PSR_AA32_E_BIT ? "BE" : "LE", + pstate & PSR_AA32_A_BIT ? 'A' : 'a', + pstate & PSR_AA32_I_BIT ? 'I' : 'i', + pstate & PSR_AA32_F_BIT ? 'F' : 'f', + pstate & PSR_AA32_DIT_BIT ? '+' : '-', + pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); + } else { + const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> + PSR_BTYPE_SHIFT]; + + printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n", + pstate, + pstate & PSR_N_BIT ? 'N' : 'n', + pstate & PSR_Z_BIT ? 'Z' : 'z', + pstate & PSR_C_BIT ? 'C' : 'c', + pstate & PSR_V_BIT ? 'V' : 'v', + pstate & PSR_D_BIT ? 'D' : 'd', + pstate & PSR_A_BIT ? 'A' : 'a', + pstate & PSR_I_BIT ? 'I' : 'i', + pstate & PSR_F_BIT ? 'F' : 'f', + pstate & PSR_PAN_BIT ? '+' : '-', + pstate & PSR_UAO_BIT ? '+' : '-', + pstate & PSR_TCO_BIT ? '+' : '-', + pstate & PSR_DIT_BIT ? '+' : '-', + pstate & PSR_SSBS_BIT ? '+' : '-', + btype_str); + } +} + +void __show_regs(struct pt_regs *regs) +{ + int i, top_reg; + u64 lr, sp; + + if (compat_user_mode(regs)) { + lr = regs->compat_lr; + sp = regs->compat_sp; + top_reg = 12; + } else { + lr = regs->regs[30]; + sp = regs->sp; + top_reg = 29; + } + + show_regs_print_info(KERN_DEFAULT); + print_pstate(regs); + + if (!user_mode(regs)) { + printk("pc : %pS\n", (void *)regs->pc); + printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr)); + } else { + printk("pc : %016llx\n", regs->pc); + printk("lr : %016llx\n", lr); + } + + printk("sp : %016llx\n", sp); + + if (system_uses_irq_prio_masking()) + printk("pmr_save: %08llx\n", regs->pmr_save); + + i = top_reg; + + while (i >= 0) { + printk("x%-2d: %016llx", i, regs->regs[i]); + + while (i-- % 3) + pr_cont(" x%-2d: %016llx", i, regs->regs[i]); + + pr_cont("\n"); + } +} + +void show_regs(struct pt_regs *regs) +{ + __show_regs(regs); + dump_backtrace(regs, NULL, KERN_DEFAULT); +} + +static void tls_thread_flush(void) +{ + write_sysreg(0, tpidr_el0); + if (system_supports_tpidr2()) + write_sysreg_s(0, SYS_TPIDR2_EL0); + + if (is_compat_task()) { + current->thread.uw.tp_value = 0; + + /* + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + */ + barrier(); + write_sysreg(0, tpidrro_el0); + } +} + +static void flush_tagged_addr_state(void) +{ + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) + clear_thread_flag(TIF_TAGGED_ADDR); +} + +void flush_thread(void) +{ + fpsimd_flush_thread(); + tls_thread_flush(); + flush_ptrace_hw_breakpoint(current); + flush_tagged_addr_state(); +} + +void arch_release_task_struct(struct task_struct *tsk) +{ + fpsimd_release_task(tsk); +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + if (current->mm) + fpsimd_preserve_current_state(); + *dst = *src; + + /* We rely on the above assignment to initialize dst's thread_flags: */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); + + /* + * Detach src's sve_state (if any) from dst so that it does not + * get erroneously used or freed prematurely. dst's copies + * will be allocated on demand later on if dst uses SVE. + * For consistency, also clear TIF_SVE here: this could be done + * later in copy_process(), but to avoid tripping up future + * maintainers it is best not to leave TIF flags and buffers in + * an inconsistent state, even temporarily. + */ + dst->thread.sve_state = NULL; + clear_tsk_thread_flag(dst, TIF_SVE); + + /* + * In the unlikely event that we create a new thread with ZA + * enabled we should retain the ZA and ZT state so duplicate + * it here. This may be shortly freed if we exec() or if + * CLONE_SETTLS but it's simpler to do it here. To avoid + * confusing the rest of the code ensure that we have a + * sve_state allocated whenever sme_state is allocated. + */ + if (thread_za_enabled(&src->thread)) { + dst->thread.sve_state = kzalloc(sve_state_size(src), + GFP_KERNEL); + if (!dst->thread.sve_state) + return -ENOMEM; + + dst->thread.sme_state = kmemdup(src->thread.sme_state, + sme_state_size(src), + GFP_KERNEL); + if (!dst->thread.sme_state) { + kfree(dst->thread.sve_state); + dst->thread.sve_state = NULL; + return -ENOMEM; + } + } else { + dst->thread.sme_state = NULL; + clear_tsk_thread_flag(dst, TIF_SME); + } + + dst->thread.fp_type = FP_STATE_FPSIMD; + + /* clear any pending asynchronous tag fault raised by the parent */ + clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); + + return 0; +} + +asmlinkage void ret_from_fork(void) asm("ret_from_fork"); + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long stack_start = args->stack; + unsigned long tls = args->tls; + struct pt_regs *childregs = task_pt_regs(p); + + memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); + + /* + * In case p was allocated the same task_struct pointer as some + * other recently-exited task, make sure p is disassociated from + * any cpu that may have run that now-exited task recently. + * Otherwise we could erroneously skip reloading the FPSIMD + * registers for p. + */ + fpsimd_flush_task_state(p); + + ptrauth_thread_init_kernel(p); + + if (likely(!args->fn)) { + *childregs = *current_pt_regs(); + childregs->regs[0] = 0; + + /* + * Read the current TLS pointer from tpidr_el0 as it may be + * out-of-sync with the saved value. + */ + *task_user_tls(p) = read_sysreg(tpidr_el0); + if (system_supports_tpidr2()) + p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); + + if (stack_start) { + if (is_compat_thread(task_thread_info(p))) + childregs->compat_sp = stack_start; + else + childregs->sp = stack_start; + } + + /* + * If a TLS pointer was passed to clone, use it for the new + * thread. We also reset TPIDR2 if it's in use. + */ + if (clone_flags & CLONE_SETTLS) { + p->thread.uw.tp_value = tls; + p->thread.tpidr2_el0 = 0; + } + } else { + /* + * A kthread has no context to ERET to, so ensure any buggy + * ERET is treated as an illegal exception return. + * + * When a user task is created from a kthread, childregs will + * be initialized by start_thread() or start_compat_thread(). + */ + memset(childregs, 0, sizeof(struct pt_regs)); + childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; + + p->thread.cpu_context.x19 = (unsigned long)args->fn; + p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; + } + p->thread.cpu_context.pc = (unsigned long)ret_from_fork; + p->thread.cpu_context.sp = (unsigned long)childregs; + /* + * For the benefit of the unwinder, set up childregs->stackframe + * as the final frame for the new task. + */ + p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; + + ptrace_hw_copy_thread(p); + + return 0; +} + +void tls_preserve_current_state(void) +{ + *task_user_tls(current) = read_sysreg(tpidr_el0); + if (system_supports_tpidr2() && !is_compat_task()) + current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); +} + +static void tls_thread_switch(struct task_struct *next) +{ + tls_preserve_current_state(); + + if (is_compat_thread(task_thread_info(next))) + write_sysreg(next->thread.uw.tp_value, tpidrro_el0); + else if (!arm64_kernel_unmapped_at_el0()) + write_sysreg(0, tpidrro_el0); + + write_sysreg(*task_user_tls(next), tpidr_el0); + if (system_supports_tpidr2()) + write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); +} + +/* + * Force SSBS state on context-switch, since it may be lost after migrating + * from a CPU which treats the bit as RES0 in a heterogeneous system. + */ +static void ssbs_thread_switch(struct task_struct *next) +{ + /* + * Nothing to do for kernel threads, but 'regs' may be junk + * (e.g. idle task) so check the flags and bail early. + */ + if (unlikely(next->flags & PF_KTHREAD)) + return; + + /* + * If all CPUs implement the SSBS extension, then we just need to + * context-switch the PSTATE field. + */ + if (cpus_have_const_cap(ARM64_SSBS)) + return; + + spectre_v4_enable_task_mitigation(next); +} + +/* + * We store our current task in sp_el0, which is clobbered by userspace. Keep a + * shadow copy so that we can restore this upon entry from userspace. + * + * This is *only* for exception entry from EL0, and is not valid until we + * __switch_to() a user task. + */ +DEFINE_PER_CPU(struct task_struct *, __entry_task); + +static void entry_task_switch(struct task_struct *next) +{ + __this_cpu_write(__entry_task, next); +} + +/* + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. + * Ensure access is disabled when switching to a 32bit task, ensure + * access is enabled when switching to a 64bit task. + */ +static void erratum_1418040_thread_switch(struct task_struct *next) +{ + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || + !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) + return; + + if (is_compat_thread(task_thread_info(next))) + sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); + else + sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); +} + +static void erratum_1418040_new_exec(void) +{ + preempt_disable(); + erratum_1418040_thread_switch(current); + preempt_enable(); +} + +/* + * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore + * this function must be called with preemption disabled and the update to + * sctlr_user must be made in the same preemption disabled block so that + * __switch_to() does not see the variable update before the SCTLR_EL1 one. + */ +void update_sctlr_el1(u64 sctlr) +{ + /* + * EnIA must not be cleared while in the kernel as this is necessary for + * in-kernel PAC. It will be cleared on kernel exit if needed. + */ + sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); + + /* ISB required for the kernel uaccess routines when setting TCF0. */ + isb(); +} + +/* + * Thread switching. + */ +__notrace_funcgraph __sched +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next) +{ + struct task_struct *last; + + fpsimd_thread_switch(next); + tls_thread_switch(next); + hw_breakpoint_thread_switch(next); + contextidr_thread_switch(next); + entry_task_switch(next); + ssbs_thread_switch(next); + erratum_1418040_thread_switch(next); + ptrauth_thread_switch_user(next); + + /* + * Complete any pending TLB or cache maintenance on this CPU in case + * the thread migrates to a different CPU. + * This full barrier is also required by the membarrier system + * call. + */ + dsb(ish); + + /* + * MTE thread switching must happen after the DSB above to ensure that + * any asynchronous tag check faults have been logged in the TFSR*_EL1 + * registers. + */ + mte_thread_switch(next); + /* avoid expensive SCTLR_EL1 accesses if no change */ + if (prev->thread.sctlr_user != next->thread.sctlr_user) + update_sctlr_el1(next->thread.sctlr_user); + + /* the actual thread switch */ + last = cpu_switch_to(prev, next); + + return last; +} + +struct wchan_info { + unsigned long pc; + int count; +}; + +static bool get_wchan_cb(void *arg, unsigned long pc) +{ + struct wchan_info *wchan_info = arg; + + if (!in_sched_functions(pc)) { + wchan_info->pc = pc; + return false; + } + return wchan_info->count++ < 16; +} + +unsigned long __get_wchan(struct task_struct *p) +{ + struct wchan_info wchan_info = { + .pc = 0, + .count = 0, + }; + + if (!try_get_task_stack(p)) + return 0; + + arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL); + + put_task_stack(p); + + return wchan_info.pc; +} + +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_u32_below(PAGE_SIZE); + return sp & ~0xf; +} + +#ifdef CONFIG_COMPAT +int compat_elf_check_arch(const struct elf32_hdr *hdr) +{ + if (!system_supports_32bit_el0()) + return false; + + if ((hdr)->e_machine != EM_ARM) + return false; + + if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) + return false; + + /* + * Prevent execve() of a 32-bit program from a deadline task + * if the restricted affinity mask would be inadmissible on an + * asymmetric system. + */ + return !static_branch_unlikely(&arm64_mismatched_32bit_el0) || + !dl_task_check_affinity(current, system_32bit_el0_cpumask()); +} +#endif + +/* + * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. + */ +void arch_setup_new_exec(void) +{ + unsigned long mmflags = 0; + + if (is_compat_task()) { + mmflags = MMCF_AARCH32; + + /* + * Restrict the CPU affinity mask for a 32-bit task so that + * it contains only 32-bit-capable CPUs. + * + * From the perspective of the task, this looks similar to + * what would happen if the 64-bit-only CPUs were hot-unplugged + * at the point of execve(), although we try a bit harder to + * honour the cpuset hierarchy. + */ + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) + force_compatible_cpus_allowed_ptr(current); + } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { + relax_compatible_cpus_allowed_ptr(current); + } + + current->mm->context.flags = mmflags; + ptrauth_thread_init_user(); + mte_thread_init_user(); + erratum_1418040_new_exec(); + + if (task_spec_ssb_noexec(current)) { + arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, + PR_SPEC_ENABLE); + } +} + +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_disabled; + +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) +{ + unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; + struct thread_info *ti = task_thread_info(task); + + if (is_compat_thread(ti)) + return -EINVAL; + + if (system_supports_mte()) + valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \ + | PR_MTE_TAG_MASK; + + if (arg & ~valid_mask) + return -EINVAL; + + /* + * Do not allow the enabling of the tagged address ABI if globally + * disabled via sysctl abi.tagged_addr_disabled. + */ + if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) + return -EINVAL; + + if (set_mte_ctrl(task, arg) != 0) + return -EINVAL; + + update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); + + return 0; +} + +long get_tagged_addr_ctrl(struct task_struct *task) +{ + long ret = 0; + struct thread_info *ti = task_thread_info(task); + + if (is_compat_thread(ti)) + return -EINVAL; + + if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) + ret = PR_TAGGED_ADDR_ENABLE; + + ret |= get_mte_ctrl(task); + + return ret; +} + +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr_disabled", + .mode = 0644, + .data = &tagged_addr_disabled, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { } +}; + +static int __init tagged_addr_init(void) +{ + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + return 0; +} + +core_initcall(tagged_addr_init); +#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ + +#ifdef CONFIG_BINFMT_ELF +int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, + bool has_interp, bool is_interp) +{ + /* + * For dynamically linked executables the interpreter is + * responsible for setting PROT_BTI on everything except + * itself. + */ + if (is_interp != has_interp) + return prot; + + if (!(state->flags & ARM64_ELF_BTI)) + return prot; + + if (prot & PROT_EXEC) + prot |= PROT_BTI; + + return prot; +} +#endif diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c new file mode 100644 index 0000000000..05f40c4e18 --- /dev/null +++ b/arch/arm64/kernel/proton-pack.c @@ -0,0 +1,1156 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as + * detailed at: + * + * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability + * + * This code was originally written hastily under an awful lot of stress and so + * aspects of it are somewhat hacky. Unfortunately, changing anything in here + * instantly makes me feel ill. Thanks, Jann. Thann. + * + * Copyright (C) 2018 ARM Ltd, All Rights Reserved. + * Copyright (C) 2020 Google LLC + * + * "If there's something strange in your neighbourhood, who you gonna call?" + * + * Authors: Will Deacon and Marc Zyngier + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * We try to ensure that the mitigation state can never change as the result of + * onlining a late CPU. + */ +static void update_mitigation_state(enum mitigation_state *oldp, + enum mitigation_state new) +{ + enum mitigation_state state; + + do { + state = READ_ONCE(*oldp); + if (new <= state) + break; + + /* Userspace almost certainly can't deal with this. */ + if (WARN_ON(system_capabilities_finalized())) + break; + } while (cmpxchg_relaxed(oldp, state, new) != state); +} + +/* + * Spectre v1. + * + * The kernel can't protect userspace for this one: it's each person for + * themselves. Advertise what we're doing and be done with it. + */ +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +/* + * Spectre v2. + * + * This one sucks. A CPU is either: + * + * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2. + * - Mitigated in hardware and listed in our "safe list". + * - Mitigated in software by firmware. + * - Mitigated in software by a CPU-specific dance in the kernel and a + * firmware call at EL2. + * - Vulnerable. + * + * It's not unlikely for different CPUs in a big.LITTLE system to fall into + * different camps. + */ +static enum mitigation_state spectre_v2_state; + +static bool __read_mostly __nospectre_v2; +static int __init parse_spectre_v2_param(char *str) +{ + __nospectre_v2 = true; + return 0; +} +early_param("nospectre_v2", parse_spectre_v2_param); + +static bool spectre_v2_mitigations_off(void) +{ + bool ret = __nospectre_v2 || cpu_mitigations_off(); + + if (ret) + pr_info_once("spectre-v2 mitigation disabled by command line option\n"); + + return ret; +} + +static const char *get_bhb_affected_string(enum mitigation_state bhb_state) +{ + switch (bhb_state) { + case SPECTRE_UNAFFECTED: + return ""; + default: + case SPECTRE_VULNERABLE: + return ", but not BHB"; + case SPECTRE_MITIGATED: + return ", BHB"; + } +} + +static bool _unprivileged_ebpf_enabled(void) +{ +#ifdef CONFIG_BPF_SYSCALL + return !sysctl_unprivileged_bpf_disabled; +#else + return false; +#endif +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + enum mitigation_state bhb_state = arm64_get_spectre_bhb_state(); + const char *bhb_str = get_bhb_affected_string(bhb_state); + const char *v2_str = "Branch predictor hardening"; + + switch (spectre_v2_state) { + case SPECTRE_UNAFFECTED: + if (bhb_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "Not affected\n"); + + /* + * Platforms affected by Spectre-BHB can't report + * "Not affected" for Spectre-v2. + */ + v2_str = "CSV2"; + fallthrough; + case SPECTRE_MITIGATED: + if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); + + return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str); + case SPECTRE_VULNERABLE: + fallthrough; + default: + return sprintf(buf, "Vulnerable\n"); + } +} + +static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) +{ + u64 pfr0; + static const struct midr_range spectre_v2_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), + { /* sentinel */ } + }; + + /* If the CPU has CSV2 set, we're safe */ + pfr0 = read_cpuid(ID_AA64PFR0_EL1); + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT)) + return SPECTRE_UNAFFECTED; + + /* Alternatively, we have a list of unaffected CPUs */ + if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) + return SPECTRE_UNAFFECTED; + + return SPECTRE_VULNERABLE; +} + +static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + default: + fallthrough; + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED) + return false; + + if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED) + return false; + + return true; +} + +enum mitigation_state arm64_get_spectre_v2_state(void) +{ + return spectre_v2_state; +} + +DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +static void install_bp_hardening_cb(bp_hardening_cb_t fn) +{ + __this_cpu_write(bp_hardening_data.fn, fn); + + /* + * Vinz Clortho takes the hyp_vecs start/end "keys" at + * the door when we're a guest. Skip the hyp-vectors work. + */ + if (!is_hyp_mode_available()) + return; + + __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT); +} + +/* Called during entry so must be noinstr */ +static noinstr void call_smc_arch_workaround_1(void) +{ + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +/* Called during entry so must be noinstr */ +static noinstr void call_hvc_arch_workaround_1(void) +{ + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +/* Called during entry so must be noinstr */ +static noinstr void qcom_link_stack_sanitisation(void) +{ + u64 tmp; + + asm volatile("mov %0, x30 \n" + ".rept 16 \n" + "bl . + 4 \n" + ".endr \n" + "mov x30, %0 \n" + : "=&r" (tmp)); +} + +static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void) +{ + u32 midr = read_cpuid_id(); + if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) && + ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1)) + return NULL; + + return qcom_link_stack_sanitisation; +} + +static enum mitigation_state spectre_v2_enable_fw_mitigation(void) +{ + bp_hardening_cb_t cb; + enum mitigation_state state; + + state = spectre_v2_get_cpu_fw_mitigation_state(); + if (state != SPECTRE_MITIGATED) + return state; + + if (spectre_v2_mitigations_off()) + return SPECTRE_VULNERABLE; + + switch (arm_smccc_1_1_get_conduit()) { + case SMCCC_CONDUIT_HVC: + cb = call_hvc_arch_workaround_1; + break; + + case SMCCC_CONDUIT_SMC: + cb = call_smc_arch_workaround_1; + break; + + default: + return SPECTRE_VULNERABLE; + } + + /* + * Prefer a CPU-specific workaround if it exists. Note that we + * still rely on firmware for the mitigation at EL2. + */ + cb = spectre_v2_get_sw_mitigation_cb() ?: cb; + install_bp_hardening_cb(cb); + return SPECTRE_MITIGATED; +} + +void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) +{ + enum mitigation_state state; + + WARN_ON(preemptible()); + + state = spectre_v2_get_cpu_hw_mitigation_state(); + if (state == SPECTRE_VULNERABLE) + state = spectre_v2_enable_fw_mitigation(); + + update_mitigation_state(&spectre_v2_state, state); +} + +/* + * Spectre-v3a. + * + * Phew, there's not an awful lot to do here! We just instruct EL2 to use + * an indirect trampoline for the hyp vectors so that guests can't read + * VBAR_EL2 to defeat randomisation of the hypervisor VA layout. + */ +bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope) +{ + static const struct midr_range spectre_v3a_unsafe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + {}, + }; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list); +} + +void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused) +{ + struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); + + if (this_cpu_has_cap(ARM64_SPECTRE_V3A)) + data->slot += HYP_VECTOR_INDIRECT; +} + +/* + * Spectre v4. + * + * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is + * either: + * + * - Mitigated in hardware and listed in our "safe list". + * - Mitigated in hardware via PSTATE.SSBS. + * - Mitigated in software by firmware (sometimes referred to as SSBD). + * + * Wait, that doesn't sound so bad, does it? Keep reading... + * + * A major source of headaches is that the software mitigation is enabled both + * on a per-task basis, but can also be forced on for the kernel, necessitating + * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs + * allow EL0 to toggle SSBS directly, which can end up with the prctl() state + * being stale when re-entering the kernel. The usual big.LITTLE caveats apply, + * so you can have systems that have both firmware and SSBS mitigations. This + * means we actually have to reject late onlining of CPUs with mitigations if + * all of the currently onlined CPUs are safelisted, as the mitigation tends to + * be opt-in for userspace. Yes, really, the cure is worse than the disease. + * + * The only good part is that if the firmware mitigation is present, then it is + * present for all CPUs, meaning we don't have to worry about late onlining of a + * vulnerable CPU if one of the boot CPUs is using the firmware mitigation. + * + * Give me a VAX-11/780 any day of the week... + */ +static enum mitigation_state spectre_v4_state; + +/* This is the per-cpu state tracking whether we need to talk to firmware */ +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +enum spectre_v4_policy { + SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, + SPECTRE_V4_POLICY_MITIGATION_ENABLED, + SPECTRE_V4_POLICY_MITIGATION_DISABLED, +}; + +static enum spectre_v4_policy __read_mostly __spectre_v4_policy; + +static const struct spectre_v4_param { + const char *str; + enum spectre_v4_policy policy; +} spectre_v4_params[] = { + { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, }, + { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, }, + { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, }, +}; +static int __init parse_spectre_v4_param(char *str) +{ + int i; + + if (!str || !str[0]) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) { + const struct spectre_v4_param *param = &spectre_v4_params[i]; + + if (strncmp(str, param->str, strlen(param->str))) + continue; + + __spectre_v4_policy = param->policy; + return 0; + } + + return -EINVAL; +} +early_param("ssbd", parse_spectre_v4_param); + +/* + * Because this was all written in a rush by people working in different silos, + * we've ended up with multiple command line options to control the same thing. + * Wrap these up in some helpers, which prefer disabling the mitigation if faced + * with contradictory parameters. The mitigation is always either "off", + * "dynamic" or "on". + */ +static bool spectre_v4_mitigations_off(void) +{ + bool ret = cpu_mitigations_off() || + __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED; + + if (ret) + pr_info_once("spectre-v4 mitigation disabled by command-line option\n"); + + return ret; +} + +/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */ +static bool spectre_v4_mitigations_dynamic(void) +{ + return !spectre_v4_mitigations_off() && + __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC; +} + +static bool spectre_v4_mitigations_on(void) +{ + return !spectre_v4_mitigations_off() && + __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED; +} + +ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + switch (spectre_v4_state) { + case SPECTRE_UNAFFECTED: + return sprintf(buf, "Not affected\n"); + case SPECTRE_MITIGATED: + return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n"); + case SPECTRE_VULNERABLE: + fallthrough; + default: + return sprintf(buf, "Vulnerable\n"); + } +} + +enum mitigation_state arm64_get_spectre_v4_state(void) +{ + return spectre_v4_state; +} + +static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void) +{ + static const struct midr_range spectre_v4_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), + { /* sentinel */ }, + }; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list)) + return SPECTRE_UNAFFECTED; + + /* CPU features are detected first */ + if (this_cpu_has_cap(ARM64_SSBS)) + return SPECTRE_MITIGATED; + + return SPECTRE_VULNERABLE; +} + +static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + fallthrough; + case SMCCC_RET_NOT_REQUIRED: + return SPECTRE_UNAFFECTED; + default: + fallthrough; + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope) +{ + enum mitigation_state state; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + state = spectre_v4_get_cpu_hw_mitigation_state(); + if (state == SPECTRE_VULNERABLE) + state = spectre_v4_get_cpu_fw_mitigation_state(); + + return state != SPECTRE_UNAFFECTED; +} + +bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr) +{ + const u32 instr_mask = ~(1U << PSTATE_Imm_shift); + const u32 instr_val = 0xd500401f | PSTATE_SSBS; + + if ((instr & instr_mask) != instr_val) + return false; + + if (instr & BIT(PSTATE_Imm_shift)) + regs->pstate |= PSR_SSBS_BIT; + else + regs->pstate &= ~PSR_SSBS_BIT; + + arm64_skip_faulting_instruction(regs, 4); + return true; +} + +static enum mitigation_state spectre_v4_enable_hw_mitigation(void) +{ + enum mitigation_state state; + + /* + * If the system is mitigated but this CPU doesn't have SSBS, then + * we must be on the safelist and there's nothing more to do. + */ + state = spectre_v4_get_cpu_hw_mitigation_state(); + if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS)) + return state; + + if (spectre_v4_mitigations_off()) { + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); + set_pstate_ssbs(1); + return SPECTRE_VULNERABLE; + } + + /* SCTLR_EL1.DSSBS was initialised to 0 during boot */ + set_pstate_ssbs(0); + return SPECTRE_MITIGATED; +} + +/* + * Patch a branch over the Spectre-v4 mitigation code with a NOP so that + * we fallthrough and check whether firmware needs to be called on this CPU. + */ +void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 1); /* Branch -> NOP */ + + if (spectre_v4_mitigations_off()) + return; + + if (cpus_have_cap(ARM64_SSBS)) + return; + + if (spectre_v4_mitigations_dynamic()) + *updptr = cpu_to_le32(aarch64_insn_gen_nop()); +} + +/* + * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction + * to call into firmware to adjust the mitigation state. + */ +void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + u32 insn; + + BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */ + + switch (arm_smccc_1_1_get_conduit()) { + case SMCCC_CONDUIT_HVC: + insn = aarch64_insn_get_hvc_value(); + break; + case SMCCC_CONDUIT_SMC: + insn = aarch64_insn_get_smc_value(); + break; + default: + return; + } + + *updptr = cpu_to_le32(insn); +} + +static enum mitigation_state spectre_v4_enable_fw_mitigation(void) +{ + enum mitigation_state state; + + state = spectre_v4_get_cpu_fw_mitigation_state(); + if (state != SPECTRE_MITIGATED) + return state; + + if (spectre_v4_mitigations_off()) { + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL); + return SPECTRE_VULNERABLE; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL); + + if (spectre_v4_mitigations_dynamic()) + __this_cpu_write(arm64_ssbd_callback_required, 1); + + return SPECTRE_MITIGATED; +} + +void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused) +{ + enum mitigation_state state; + + WARN_ON(preemptible()); + + state = spectre_v4_enable_hw_mitigation(); + if (state == SPECTRE_VULNERABLE) + state = spectre_v4_enable_fw_mitigation(); + + update_mitigation_state(&spectre_v4_state, state); +} + +static void __update_pstate_ssbs(struct pt_regs *regs, bool state) +{ + u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; + + if (state) + regs->pstate |= bit; + else + regs->pstate &= ~bit; +} + +void spectre_v4_enable_task_mitigation(struct task_struct *tsk) +{ + struct pt_regs *regs = task_pt_regs(tsk); + bool ssbs = false, kthread = tsk->flags & PF_KTHREAD; + + if (spectre_v4_mitigations_off()) + ssbs = true; + else if (spectre_v4_mitigations_dynamic() && !kthread) + ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD); + + __update_pstate_ssbs(regs, ssbs); +} + +/* + * The Spectre-v4 mitigation can be controlled via a prctl() from userspace. + * This is interesting because the "speculation disabled" behaviour can be + * configured so that it is preserved across exec(), which means that the + * prctl() may be necessary even when PSTATE.SSBS can be toggled directly + * from userspace. + */ +static void ssbd_prctl_enable_mitigation(struct task_struct *task) +{ + task_clear_spec_ssb_noexec(task); + task_set_spec_ssb_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); +} + +static void ssbd_prctl_disable_mitigation(struct task_struct *task) +{ + task_clear_spec_ssb_noexec(task); + task_clear_spec_ssb_disable(task); + clear_tsk_thread_flag(task, TIF_SSBD); +} + +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + switch (ctrl) { + case PR_SPEC_ENABLE: + /* Enable speculation: disable mitigation */ + /* + * Force disabled speculation prevents it from being + * re-enabled. + */ + if (task_spec_ssb_force_disable(task)) + return -EPERM; + + /* + * If the mitigation is forced on, then speculation is forced + * off and we again prevent it from being re-enabled. + */ + if (spectre_v4_mitigations_on()) + return -EPERM; + + ssbd_prctl_disable_mitigation(task); + break; + case PR_SPEC_FORCE_DISABLE: + /* Force disable speculation: force enable mitigation */ + /* + * If the mitigation is forced off, then speculation is forced + * on and we prevent it from being disabled. + */ + if (spectre_v4_mitigations_off()) + return -EPERM; + + task_set_spec_ssb_force_disable(task); + fallthrough; + case PR_SPEC_DISABLE: + /* Disable speculation: enable mitigation */ + /* Same as PR_SPEC_FORCE_DISABLE */ + if (spectre_v4_mitigations_off()) + return -EPERM; + + ssbd_prctl_enable_mitigation(task); + break; + case PR_SPEC_DISABLE_NOEXEC: + /* Disable speculation until execve(): enable mitigation */ + /* + * If the mitigation state is forced one way or the other, then + * we must fail now before we try to toggle it on execve(). + */ + if (task_spec_ssb_force_disable(task) || + spectre_v4_mitigations_off() || + spectre_v4_mitigations_on()) { + return -EPERM; + } + + ssbd_prctl_enable_mitigation(task); + task_set_spec_ssb_noexec(task); + break; + default: + return -ERANGE; + } + + spectre_v4_enable_task_mitigation(task); + return 0; +} + +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_set(task, ctrl); + default: + return -ENODEV; + } +} + +static int ssbd_prctl_get(struct task_struct *task) +{ + switch (spectre_v4_state) { + case SPECTRE_UNAFFECTED: + return PR_SPEC_NOT_AFFECTED; + case SPECTRE_MITIGATED: + if (spectre_v4_mitigations_on()) + return PR_SPEC_NOT_AFFECTED; + + if (spectre_v4_mitigations_dynamic()) + break; + + /* Mitigations are disabled, so we're vulnerable. */ + fallthrough; + case SPECTRE_VULNERABLE: + fallthrough; + default: + return PR_SPEC_ENABLE; + } + + /* Check the mitigation state for this task */ + if (task_spec_ssb_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + + if (task_spec_ssb_noexec(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; + + if (task_spec_ssb_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_get(task); + default: + return -ENODEV; + } +} + +/* + * Spectre BHB. + * + * A CPU is either: + * - Mitigated by a branchy loop a CPU specific number of times, and listed + * in our "loop mitigated list". + * - Mitigated in software by the firmware Spectre v2 call. + * - Has the ClearBHB instruction to perform the mitigation. + * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no + * software mitigation in the vectors is needed. + * - Has CSV2.3, so is unaffected. + */ +static enum mitigation_state spectre_bhb_state; + +enum mitigation_state arm64_get_spectre_bhb_state(void) +{ + return spectre_bhb_state; +} + +enum bhb_mitigation_bits { + BHB_LOOP, + BHB_FW, + BHB_HW, + BHB_INSN, +}; +static unsigned long system_bhb_mitigations; + +/* + * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any + * SCOPE_SYSTEM call will give the right answer. + */ +u8 spectre_bhb_loop_affected(int scope) +{ + u8 k = 0; + static u8 max_bhb_k; + + if (scope == SCOPE_LOCAL_CPU) { + static const struct midr_range spectre_bhb_k32_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + {}, + }; + static const struct midr_range spectre_bhb_k24_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + {}, + }; + static const struct midr_range spectre_bhb_k11_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + {}, + }; + static const struct midr_range spectre_bhb_k8_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + {}, + }; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + k = 32; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + k = 11; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + k = 8; + + max_bhb_k = max(max_bhb_k, k); + } else { + k = max_bhb_k; + } + + return k; +} + +static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_3, &res); + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + default: + fallthrough; + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +static bool is_spectre_bhb_fw_affected(int scope) +{ + static bool system_affected; + enum mitigation_state fw_state; + bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; + static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + {}, + }; + bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), + spectre_bhb_firmware_mitigated_list); + + if (scope != SCOPE_LOCAL_CPU) + return system_affected; + + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { + system_affected = true; + return true; + } + + return false; +} + +static bool supports_ecbhb(int scope) +{ + u64 mmfr1; + + if (scope == SCOPE_LOCAL_CPU) + mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); + else + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_EL1_ECBHB_SHIFT); +} + +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, + int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (supports_csv2p3(scope)) + return false; + + if (supports_clearbhb(scope)) + return true; + + if (spectre_bhb_loop_affected(scope)) + return true; + + if (is_spectre_bhb_fw_affected(scope)) + return true; + + return false; +} + +static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) +{ + const char *v = arm64_get_bp_hardening_vector(slot); + + __this_cpu_write(this_cpu_vector, v); + + /* + * When KPTI is in use, the vectors are switched when exiting to + * user-space. + */ + if (arm64_kernel_unmapped_at_el0()) + return; + + write_sysreg(v, vbar_el1); + isb(); +} + +static bool __read_mostly __nospectre_bhb; +static int __init parse_spectre_bhb_param(char *str) +{ + __nospectre_bhb = true; + return 0; +} +early_param("nospectre_bhb", parse_spectre_bhb_param); + +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) +{ + bp_hardening_cb_t cpu_cb; + enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; + struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); + + if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) + return; + + if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) { + /* No point mitigating Spectre-BHB alone. */ + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { + pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); + } else if (cpu_mitigations_off() || __nospectre_bhb) { + pr_info_once("spectre-bhb mitigation disabled by command line option\n"); + } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { + state = SPECTRE_MITIGATED; + set_bit(BHB_HW, &system_bhb_mitigations); + } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) { + /* + * Ensure KVM uses the indirect vector which will have ClearBHB + * added. + */ + if (!data->slot) + data->slot = HYP_VECTOR_INDIRECT; + + this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); + state = SPECTRE_MITIGATED; + set_bit(BHB_INSN, &system_bhb_mitigations); + } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { + /* + * Ensure KVM uses the indirect vector which will have the + * branchy-loop added. A57/A72-r0 will already have selected + * the spectre-indirect vector, which is sufficient for BHB + * too. + */ + if (!data->slot) + data->slot = HYP_VECTOR_INDIRECT; + + this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); + state = SPECTRE_MITIGATED; + set_bit(BHB_LOOP, &system_bhb_mitigations); + } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (fw_state == SPECTRE_MITIGATED) { + /* + * Ensure KVM uses one of the spectre bp_hardening + * vectors. The indirect vector doesn't include the EL3 + * call, so needs upgrading to + * HYP_VECTOR_SPECTRE_INDIRECT. + */ + if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) + data->slot += 1; + + this_cpu_set_vectors(EL1_VECTOR_BHB_FW); + + /* + * The WA3 call in the vectors supersedes the WA1 call + * made during context-switch. Uninstall any firmware + * bp_hardening callback. + */ + cpu_cb = spectre_v2_get_sw_mitigation_cb(); + if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) + __this_cpu_write(bp_hardening_data.fn, NULL); + + state = SPECTRE_MITIGATED; + set_bit(BHB_FW, &system_bhb_mitigations); + } + } + + update_mitigation_state(&spectre_bhb_state, state); +} + +/* Patched to NOP when enabled */ +void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 1); + + if (test_bit(BHB_LOOP, &system_bhb_mitigations)) + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); +} + +/* Patched to NOP when enabled */ +void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 1); + + if (test_bit(BHB_FW, &system_bhb_mitigations)) + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); +} + +/* Patched to correct the immediate */ +void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + u8 rd; + u32 insn; + u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); + + BUG_ON(nr_inst != 1); /* MOV -> MOV */ + + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) + return; + + insn = le32_to_cpu(*origptr); + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); + insn = aarch64_insn_gen_movewide(rd, loop_count, 0, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_MOVEWIDE_ZERO); + *updptr++ = cpu_to_le32(insn); +} + +/* Patched to mov WA3 when supported */ +void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + u8 rd; + u32 insn; + + BUG_ON(nr_inst != 1); /* MOV -> MOV */ + + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) || + !test_bit(BHB_FW, &system_bhb_mitigations)) + return; + + insn = le32_to_cpu(*origptr); + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); + + insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR, + AARCH64_INSN_VARIANT_32BIT, + AARCH64_INSN_REG_ZR, rd, + ARM_SMCCC_ARCH_WORKAROUND_3); + if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT)) + return; + + *updptr++ = cpu_to_le32(insn); +} + +/* Patched to NOP when not supported */ +void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 2); + + if (test_bit(BHB_INSN, &system_bhb_mitigations)) + return; + + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); + *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); +} + +#ifdef CONFIG_BPF_SYSCALL +#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n" +void unpriv_ebpf_notify(int new_state) +{ + if (spectre_v2_state == SPECTRE_VULNERABLE || + spectre_bhb_state != SPECTRE_MITIGATED) + return; + + if (!new_state) + pr_err("WARNING: %s", EBPF_WARN); +} +#endif diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c new file mode 100644 index 0000000000..29a8e444db --- /dev/null +++ b/arch/arm64/kernel/psci.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2013 ARM Limited + * + * Author: Will Deacon + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +static int __init cpu_psci_cpu_init(unsigned int cpu) +{ + return 0; +} + +static int __init cpu_psci_cpu_prepare(unsigned int cpu) +{ + if (!psci_ops.cpu_on) { + pr_err("no cpu_on method, not booting CPU%d\n", cpu); + return -ENODEV; + } + + return 0; +} + +static int cpu_psci_cpu_boot(unsigned int cpu) +{ + phys_addr_t pa_secondary_entry = __pa_symbol(secondary_entry); + int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry); + if (err) + pr_err("failed to boot CPU%d (%d)\n", cpu, err); + + return err; +} + +#ifdef CONFIG_HOTPLUG_CPU +static bool cpu_psci_cpu_can_disable(unsigned int cpu) +{ + return !psci_tos_resident_on(cpu); +} + +static int cpu_psci_cpu_disable(unsigned int cpu) +{ + /* Fail early if we don't have CPU_OFF support */ + if (!psci_ops.cpu_off) + return -EOPNOTSUPP; + + /* Trusted OS will deny CPU_OFF */ + if (psci_tos_resident_on(cpu)) + return -EPERM; + + return 0; +} + +static void cpu_psci_cpu_die(unsigned int cpu) +{ + /* + * There are no known implementations of PSCI actually using the + * power state field, pass a sensible default for now. + */ + u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << + PSCI_0_2_POWER_STATE_TYPE_SHIFT; + + psci_ops.cpu_off(state); +} + +static int cpu_psci_cpu_kill(unsigned int cpu) +{ + int err; + unsigned long start, end; + + if (!psci_ops.affinity_info) + return 0; + /* + * cpu_kill could race with cpu_die and we can + * potentially end up declaring this cpu undead + * while it is dying. So, try again a few times. + */ + + start = jiffies; + end = start + msecs_to_jiffies(100); + do { + err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); + if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { + pr_info("CPU%d killed (polled %d ms)\n", cpu, + jiffies_to_msecs(jiffies - start)); + return 0; + } + + usleep_range(100, 1000); + } while (time_before(jiffies, end)); + + pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", + cpu, err); + return -ETIMEDOUT; +} +#endif + +const struct cpu_operations cpu_psci_ops = { + .name = "psci", + .cpu_init = cpu_psci_cpu_init, + .cpu_prepare = cpu_psci_cpu_prepare, + .cpu_boot = cpu_psci_cpu_boot, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_can_disable = cpu_psci_cpu_can_disable, + .cpu_disable = cpu_psci_cpu_disable, + .cpu_die = cpu_psci_cpu_die, + .cpu_kill = cpu_psci_cpu_kill, +#endif +}; + diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c new file mode 100644 index 0000000000..b3f64144b5 --- /dev/null +++ b/arch/arm64/kernel/ptrace.c @@ -0,0 +1,2309 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/ptrace.c + * + * By Ross Biro 1/23/92 + * edited by Linus Torvalds + * ARM modifications Copyright (C) 2000 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} +#define GPR_OFFSET_NAME(r) \ + {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} + +static const struct pt_regs_offset regoffset_table[] = { + GPR_OFFSET_NAME(0), + GPR_OFFSET_NAME(1), + GPR_OFFSET_NAME(2), + GPR_OFFSET_NAME(3), + GPR_OFFSET_NAME(4), + GPR_OFFSET_NAME(5), + GPR_OFFSET_NAME(6), + GPR_OFFSET_NAME(7), + GPR_OFFSET_NAME(8), + GPR_OFFSET_NAME(9), + GPR_OFFSET_NAME(10), + GPR_OFFSET_NAME(11), + GPR_OFFSET_NAME(12), + GPR_OFFSET_NAME(13), + GPR_OFFSET_NAME(14), + GPR_OFFSET_NAME(15), + GPR_OFFSET_NAME(16), + GPR_OFFSET_NAME(17), + GPR_OFFSET_NAME(18), + GPR_OFFSET_NAME(19), + GPR_OFFSET_NAME(20), + GPR_OFFSET_NAME(21), + GPR_OFFSET_NAME(22), + GPR_OFFSET_NAME(23), + GPR_OFFSET_NAME(24), + GPR_OFFSET_NAME(25), + GPR_OFFSET_NAME(26), + GPR_OFFSET_NAME(27), + GPR_OFFSET_NAME(28), + GPR_OFFSET_NAME(29), + GPR_OFFSET_NAME(30), + {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(pstate), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || + on_irq_stack(addr, sizeof(unsigned long)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +/* + * TODO: does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Called by kernel/ptrace.c when detaching.. + */ +void ptrace_disable(struct task_struct *child) +{ + /* + * This would be better off in core code, but PTRACE_DETACH has + * grown its fair share of arch-specific worts and changing it + * is likely to cause regressions on obscure architectures. + */ + user_disable_single_step(child); +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +/* + * Handle hitting a HW-breakpoint. + */ +static void ptrace_hbptriggered(struct perf_event *bp, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); + const char *desc = "Hardware breakpoint trap (ptrace)"; + +#ifdef CONFIG_COMPAT + if (is_compat_task()) { + int si_errno = 0; + int i; + + for (i = 0; i < ARM_MAX_BRP; ++i) { + if (current->thread.debug.hbp_break[i] == bp) { + si_errno = (i << 1) + 1; + break; + } + } + + for (i = 0; i < ARM_MAX_WRP; ++i) { + if (current->thread.debug.hbp_watch[i] == bp) { + si_errno = -((i << 1) + 1); + break; + } + } + arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, + desc); + return; + } +#endif + arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); +} + +/* + * Unregister breakpoints from this task and reset the pointers in + * the thread_struct. + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *t = &tsk->thread; + + for (i = 0; i < ARM_MAX_BRP; i++) { + if (t->debug.hbp_break[i]) { + unregister_hw_breakpoint(t->debug.hbp_break[i]); + t->debug.hbp_break[i] = NULL; + } + } + + for (i = 0; i < ARM_MAX_WRP; i++) { + if (t->debug.hbp_watch[i]) { + unregister_hw_breakpoint(t->debug.hbp_watch[i]); + t->debug.hbp_watch[i] = NULL; + } + } +} + +void ptrace_hw_copy_thread(struct task_struct *tsk) +{ + memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); +} + +static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx) +{ + struct perf_event *bp = ERR_PTR(-EINVAL); + + switch (note_type) { + case NT_ARM_HW_BREAK: + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + bp = tsk->thread.debug.hbp_break[idx]; + break; + case NT_ARM_HW_WATCH: + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + bp = tsk->thread.debug.hbp_watch[idx]; + break; + } + +out: + return bp; +} + +static int ptrace_hbp_set_event(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + struct perf_event *bp) +{ + int err = -EINVAL; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + tsk->thread.debug.hbp_break[idx] = bp; + err = 0; + break; + case NT_ARM_HW_WATCH: + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + tsk->thread.debug.hbp_watch[idx] = bp; + err = 0; + break; + } + +out: + return err; +} + +static struct perf_event *ptrace_hbp_create(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx) +{ + struct perf_event *bp; + struct perf_event_attr attr; + int err, type; + + switch (note_type) { + case NT_ARM_HW_BREAK: + type = HW_BREAKPOINT_X; + break; + case NT_ARM_HW_WATCH: + type = HW_BREAKPOINT_RW; + break; + default: + return ERR_PTR(-EINVAL); + } + + ptrace_breakpoint_init(&attr); + + /* + * Initialise fields to sane defaults + * (i.e. values that will pass validation). + */ + attr.bp_addr = 0; + attr.bp_len = HW_BREAKPOINT_LEN_4; + attr.bp_type = type; + attr.disabled = 1; + + bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); + if (IS_ERR(bp)) + return bp; + + err = ptrace_hbp_set_event(note_type, tsk, idx, bp); + if (err) + return ERR_PTR(err); + + return bp; +} + +static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, + struct arch_hw_breakpoint_ctrl ctrl, + struct perf_event_attr *attr) +{ + int err, len, type, offset, disabled = !ctrl.enabled; + + attr->disabled = disabled; + if (disabled) + return 0; + + err = arch_bp_generic_fields(ctrl, &len, &type, &offset); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) + return -EINVAL; + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: + return -EINVAL; + } + + attr->bp_len = len; + attr->bp_type = type; + attr->bp_addr += offset; + + return 0; +} + +static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) +{ + u8 num; + u32 reg = 0; + + switch (note_type) { + case NT_ARM_HW_BREAK: + num = hw_breakpoint_slots(TYPE_INST); + break; + case NT_ARM_HW_WATCH: + num = hw_breakpoint_slots(TYPE_DATA); + break; + default: + return -EINVAL; + } + + reg |= debug_monitors_arch(); + reg <<= 8; + reg |= num; + + *info = reg; + return 0; +} + +static int ptrace_hbp_get_ctrl(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u32 *ctrl) +{ + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); + + if (IS_ERR(bp)) + return PTR_ERR(bp); + + *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; + return 0; +} + +static int ptrace_hbp_get_addr(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u64 *addr) +{ + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); + + if (IS_ERR(bp)) + return PTR_ERR(bp); + + *addr = bp ? counter_arch_bp(bp)->address : 0; + return 0; +} + +static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx) +{ + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); + + if (!bp) + bp = ptrace_hbp_create(note_type, tsk, idx); + + return bp; +} + +static int ptrace_hbp_set_ctrl(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u32 uctrl) +{ + int err; + struct perf_event *bp; + struct perf_event_attr attr; + struct arch_hw_breakpoint_ctrl ctrl; + + bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + return err; + } + + attr = bp->attr; + decode_ctrl_reg(uctrl, &ctrl); + err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); + if (err) + return err; + + return modify_user_hw_breakpoint(bp, &attr); +} + +static int ptrace_hbp_set_addr(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u64 addr) +{ + int err; + struct perf_event *bp; + struct perf_event_attr attr; + + bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + return err; + } + + attr = bp->attr; + attr.bp_addr = addr; + err = modify_user_hw_breakpoint(bp, &attr); + return err; +} + +#define PTRACE_HBP_ADDR_SZ sizeof(u64) +#define PTRACE_HBP_CTRL_SZ sizeof(u32) +#define PTRACE_HBP_PAD_SZ sizeof(u32) + +static int hw_break_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + unsigned int note_type = regset->core_note_type; + int ret, idx = 0; + u32 info, ctrl; + u64 addr; + + /* Resource info */ + ret = ptrace_hbp_get_resource_info(note_type, &info); + if (ret) + return ret; + + membuf_write(&to, &info, sizeof(info)); + membuf_zero(&to, sizeof(u32)); + /* (address, ctrl) registers */ + while (to.left) { + ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); + if (ret) + return ret; + ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); + if (ret) + return ret; + membuf_store(&to, addr); + membuf_store(&to, ctrl); + membuf_zero(&to, sizeof(u32)); + idx++; + } + return 0; +} + +static int hw_break_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned int note_type = regset->core_note_type; + int ret, idx = 0, offset, limit; + u32 ctrl; + u64 addr; + + /* Resource info and pad */ + offset = offsetof(struct user_hwdebug_state, dbg_regs); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); + + /* (address, ctrl) registers */ + limit = regset->n * regset->size; + while (count && offset < limit) { + if (count < PTRACE_HBP_ADDR_SZ) + return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, + offset, offset + PTRACE_HBP_ADDR_SZ); + if (ret) + return ret; + ret = ptrace_hbp_set_addr(note_type, target, idx, addr); + if (ret) + return ret; + offset += PTRACE_HBP_ADDR_SZ; + + if (!count) + break; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, + offset, offset + PTRACE_HBP_CTRL_SZ); + if (ret) + return ret; + ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); + if (ret) + return ret; + offset += PTRACE_HBP_CTRL_SZ; + + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + offset, offset + PTRACE_HBP_PAD_SZ); + offset += PTRACE_HBP_PAD_SZ; + idx++; + } + + return 0; +} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; + return membuf_write(&to, uregs, sizeof(*uregs)); +} + +static int gpr_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_pt_regs newregs = task_pt_regs(target)->user_regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); + if (ret) + return ret; + + if (!valid_user_regs(&newregs, target)) + return -EINVAL; + + task_pt_regs(target)->user_regs = newregs; + return 0; +} + +static int fpr_active(struct task_struct *target, const struct user_regset *regset) +{ + if (!system_supports_fpsimd()) + return -ENODEV; + return regset->n; +} + +/* + * TODO: update fp accessors for lazy context switching (sync/flush hwstate) + */ +static int __fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_fpsimd_state *uregs; + + sve_sync_to_fpsimd(target); + + uregs = &target->thread.uw.fpsimd_state; + + return membuf_write(&to, uregs, sizeof(*uregs)); +} + +static int fpr_get(struct task_struct *target, const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_fpsimd()) + return -EINVAL; + + if (target == current) + fpsimd_preserve_current_state(); + + return __fpr_get(target, regset, to); +} + +static int __fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf, + unsigned int start_pos) +{ + int ret; + struct user_fpsimd_state newstate; + + /* + * Ensure target->thread.uw.fpsimd_state is up to date, so that a + * short copyin can't resurrect stale data. + */ + sve_sync_to_fpsimd(target); + + newstate = target->thread.uw.fpsimd_state; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, + start_pos, start_pos + sizeof(newstate)); + if (ret) + return ret; + + target->thread.uw.fpsimd_state = newstate; + + return ret; +} + +static int fpr_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!system_supports_fpsimd()) + return -EINVAL; + + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); + if (ret) + return ret; + + sve_sync_from_fpsimd_zeropad(target); + fpsimd_flush_task_state(target); + + return ret; +} + +static int tls_get(struct task_struct *target, const struct user_regset *regset, + struct membuf to) +{ + int ret; + + if (target == current) + tls_preserve_current_state(); + + ret = membuf_store(&to, target->thread.uw.tp_value); + if (system_supports_tpidr2()) + ret = membuf_store(&to, target->thread.tpidr2_el0); + else + ret = membuf_zero(&to, sizeof(u64)); + + return ret; +} + +static int tls_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + unsigned long tls[2]; + + tls[0] = target->thread.uw.tp_value; + if (system_supports_tpidr2()) + tls[1] = target->thread.tpidr2_el0; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); + if (ret) + return ret; + + target->thread.uw.tp_value = tls[0]; + if (system_supports_tpidr2()) + target->thread.tpidr2_el0 = tls[1]; + + return ret; +} + +static int system_call_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_store(&to, task_pt_regs(target)->syscallno); +} + +static int system_call_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int syscallno = task_pt_regs(target)->syscallno; + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); + if (ret) + return ret; + + task_pt_regs(target)->syscallno = syscallno; + return ret; +} + +#ifdef CONFIG_ARM64_SVE + +static void sve_init_header_from_task(struct user_sve_header *header, + struct task_struct *target, + enum vec_type type) +{ + unsigned int vq; + bool active; + bool fpsimd_only; + enum vec_type task_type; + + memset(header, 0, sizeof(*header)); + + /* Check if the requested registers are active for the task */ + if (thread_sm_enabled(&target->thread)) + task_type = ARM64_VEC_SME; + else + task_type = ARM64_VEC_SVE; + active = (task_type == type); + + switch (type) { + case ARM64_VEC_SVE: + if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) + header->flags |= SVE_PT_VL_INHERIT; + fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE); + break; + case ARM64_VEC_SME: + if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) + header->flags |= SVE_PT_VL_INHERIT; + fpsimd_only = false; + break; + default: + WARN_ON_ONCE(1); + return; + } + + if (active) { + if (fpsimd_only) { + header->flags |= SVE_PT_REGS_FPSIMD; + } else { + header->flags |= SVE_PT_REGS_SVE; + } + } + + header->vl = task_get_vl(target, type); + vq = sve_vq_from_vl(header->vl); + + header->max_vl = vec_max_vl(type); + header->size = SVE_PT_SIZE(vq, header->flags); + header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), + SVE_PT_REGS_SVE); +} + +static unsigned int sve_size_from_header(struct user_sve_header const *header) +{ + return ALIGN(header->size, SVE_VQ_BYTES); +} + +static int sve_get_common(struct task_struct *target, + const struct user_regset *regset, + struct membuf to, + enum vec_type type) +{ + struct user_sve_header header; + unsigned int vq; + unsigned long start, end; + + /* Header */ + sve_init_header_from_task(&header, target, type); + vq = sve_vq_from_vl(header.vl); + + membuf_write(&to, &header, sizeof(header)); + + if (target == current) + fpsimd_preserve_current_state(); + + BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); + + switch ((header.flags & SVE_PT_REGS_MASK)) { + case SVE_PT_REGS_FPSIMD: + return __fpr_get(target, regset, to); + + case SVE_PT_REGS_SVE: + start = SVE_PT_SVE_OFFSET; + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); + membuf_write(&to, target->thread.sve_state, end - start); + + start = end; + end = SVE_PT_SVE_FPSR_OFFSET(vq); + membuf_zero(&to, end - start); + + /* + * Copy fpsr, and fpcr which must follow contiguously in + * struct fpsimd_state: + */ + start = end; + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; + membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, + end - start); + + start = end; + end = sve_size_from_header(&header); + return membuf_zero(&to, end - start); + + default: + return 0; + } +} + +static int sve_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_sve()) + return -EINVAL; + + return sve_get_common(target, regset, to, ARM64_VEC_SVE); +} + +static int sve_set_common(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf, + enum vec_type type) +{ + int ret; + struct user_sve_header header; + unsigned int vq; + unsigned long start, end; + + /* Header */ + if (count < sizeof(header)) + return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, + 0, sizeof(header)); + if (ret) + goto out; + + /* + * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by + * vec_set_vector_length(), which will also validate them for us: + */ + ret = vec_set_vector_length(target, type, header.vl, + ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); + if (ret) + goto out; + + /* Actual VL set may be less than the user asked for: */ + vq = sve_vq_from_vl(task_get_vl(target, type)); + + /* Enter/exit streaming mode */ + if (system_supports_sme()) { + u64 old_svcr = target->thread.svcr; + + switch (type) { + case ARM64_VEC_SVE: + target->thread.svcr &= ~SVCR_SM_MASK; + break; + case ARM64_VEC_SME: + target->thread.svcr |= SVCR_SM_MASK; + + /* + * Disable traps and ensure there is SME storage but + * preserve any currently set values in ZA/ZT. + */ + sme_alloc(target, false); + set_tsk_thread_flag(target, TIF_SME); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + goto out; + } + + /* + * If we switched then invalidate any existing SVE + * state and ensure there's storage. + */ + if (target->thread.svcr != old_svcr) + sve_alloc(target, true); + } + + /* Registers: FPSIMD-only case */ + + BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); + if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, + SVE_PT_FPSIMD_OFFSET); + clear_tsk_thread_flag(target, TIF_SVE); + target->thread.fp_type = FP_STATE_FPSIMD; + goto out; + } + + /* + * Otherwise: no registers or full SVE case. For backwards + * compatibility reasons we treat empty flags as SVE registers. + */ + + /* + * If setting a different VL from the requested VL and there is + * register data, the data layout will be wrong: don't even + * try to set the registers in this case. + */ + if (count && vq != sve_vq_from_vl(header.vl)) { + ret = -EIO; + goto out; + } + + sve_alloc(target, true); + if (!target->thread.sve_state) { + ret = -ENOMEM; + clear_tsk_thread_flag(target, TIF_SVE); + target->thread.fp_type = FP_STATE_FPSIMD; + goto out; + } + + /* + * Ensure target->thread.sve_state is up to date with target's + * FPSIMD regs, so that a short copyin leaves trailing + * registers unmodified. Only enable SVE if we are + * configuring normal SVE, a system with streaming SVE may not + * have normal SVE. + */ + fpsimd_sync_to_sve(target); + if (type == ARM64_VEC_SVE) + set_tsk_thread_flag(target, TIF_SVE); + target->thread.fp_type = FP_STATE_SVE; + + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); + start = SVE_PT_SVE_OFFSET; + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + target->thread.sve_state, + start, end); + if (ret) + goto out; + + start = end; + end = SVE_PT_SVE_FPSR_OFFSET(vq); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); + + /* + * Copy fpsr, and fpcr which must follow contiguously in + * struct fpsimd_state: + */ + start = end; + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.uw.fpsimd_state.fpsr, + start, end); + +out: + fpsimd_flush_task_state(target); + return ret; +} + +static int sve_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + if (!system_supports_sve()) + return -EINVAL; + + return sve_set_common(target, regset, pos, count, kbuf, ubuf, + ARM64_VEC_SVE); +} + +#endif /* CONFIG_ARM64_SVE */ + +#ifdef CONFIG_ARM64_SME + +static int ssve_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_sme()) + return -EINVAL; + + return sve_get_common(target, regset, to, ARM64_VEC_SME); +} + +static int ssve_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + if (!system_supports_sme()) + return -EINVAL; + + return sve_set_common(target, regset, pos, count, kbuf, ubuf, + ARM64_VEC_SME); +} + +static int za_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_za_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sme()) + return -EINVAL; + + /* Header */ + memset(&header, 0, sizeof(header)); + + if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) + header.flags |= ZA_PT_VL_INHERIT; + + header.vl = task_get_sme_vl(target); + vq = sve_vq_from_vl(header.vl); + header.max_vl = sme_max_vl(); + header.max_size = ZA_PT_SIZE(vq); + + /* If ZA is not active there is only the header */ + if (thread_za_enabled(&target->thread)) + header.size = ZA_PT_SIZE(vq); + else + header.size = ZA_PT_ZA_OFFSET; + + membuf_write(&to, &header, sizeof(header)); + + BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); + end = ZA_PT_ZA_OFFSET; + + if (target == current) + fpsimd_preserve_current_state(); + + /* Any register data to include? */ + if (thread_za_enabled(&target->thread)) { + start = end; + end = ZA_PT_SIZE(vq); + membuf_write(&to, target->thread.sme_state, end - start); + } + + /* Zero any trailing padding */ + start = end; + end = ALIGN(header.size, SVE_VQ_BYTES); + return membuf_zero(&to, end - start); +} + +static int za_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_za_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sme()) + return -EINVAL; + + /* Header */ + if (count < sizeof(header)) + return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, + 0, sizeof(header)); + if (ret) + goto out; + + /* + * All current ZA_PT_* flags are consumed by + * vec_set_vector_length(), which will also validate them for + * us: + */ + ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, + ((unsigned long)header.flags) << 16); + if (ret) + goto out; + + /* Actual VL set may be less than the user asked for: */ + vq = sve_vq_from_vl(task_get_sme_vl(target)); + + /* Ensure there is some SVE storage for streaming mode */ + if (!target->thread.sve_state) { + sve_alloc(target, false); + if (!target->thread.sve_state) { + ret = -ENOMEM; + goto out; + } + } + + /* + * Only flush the storage if PSTATE.ZA was not already set, + * otherwise preserve any existing data. + */ + sme_alloc(target, !thread_za_enabled(&target->thread)); + if (!target->thread.sme_state) + return -ENOMEM; + + /* If there is no data then disable ZA */ + if (!count) { + target->thread.svcr &= ~SVCR_ZA_MASK; + goto out; + } + + /* + * If setting a different VL from the requested VL and there is + * register data, the data layout will be wrong: don't even + * try to set the registers in this case. + */ + if (vq != sve_vq_from_vl(header.vl)) { + ret = -EIO; + goto out; + } + + BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); + start = ZA_PT_ZA_OFFSET; + end = ZA_PT_SIZE(vq); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + target->thread.sme_state, + start, end); + if (ret) + goto out; + + /* Mark ZA as active and let userspace use it */ + set_tsk_thread_flag(target, TIF_SME); + target->thread.svcr |= SVCR_ZA_MASK; + +out: + fpsimd_flush_task_state(target); + return ret; +} + +static int zt_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_sme2()) + return -EINVAL; + + /* + * If PSTATE.ZA is not set then ZT will be zeroed when it is + * enabled so report the current register value as zero. + */ + if (thread_za_enabled(&target->thread)) + membuf_write(&to, thread_zt_state(&target->thread), + ZT_SIG_REG_BYTES); + else + membuf_zero(&to, ZT_SIG_REG_BYTES); + + return 0; +} + +static int zt_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!system_supports_sme2()) + return -EINVAL; + + /* Ensure SVE storage in case this is first use of SME */ + sve_alloc(target, false); + if (!target->thread.sve_state) + return -ENOMEM; + + if (!thread_za_enabled(&target->thread)) { + sme_alloc(target, true); + if (!target->thread.sme_state) + return -ENOMEM; + } + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + thread_zt_state(&target->thread), + 0, ZT_SIG_REG_BYTES); + if (ret == 0) { + target->thread.svcr |= SVCR_ZA_MASK; + set_tsk_thread_flag(target, TIF_SME); + } + + fpsimd_flush_task_state(target); + + return ret; +} + +#endif /* CONFIG_ARM64_SME */ + +#ifdef CONFIG_ARM64_PTR_AUTH +static int pac_mask_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + /* + * The PAC bits can differ across data and instruction pointers + * depending on TCR_EL1.TBID*, which we may make use of in future, so + * we expose separate masks. + */ + unsigned long mask = ptrauth_user_pac_mask(); + struct user_pac_mask uregs = { + .data_mask = mask, + .insn_mask = mask, + }; + + if (!system_supports_address_auth()) + return -EINVAL; + + return membuf_write(&to, &uregs, sizeof(uregs)); +} + +static int pac_enabled_keys_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long enabled_keys = ptrauth_get_enabled_keys(target); + + if (IS_ERR_VALUE(enabled_keys)) + return enabled_keys; + + return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); +} + +static int pac_enabled_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + long enabled_keys = ptrauth_get_enabled_keys(target); + + if (IS_ERR_VALUE(enabled_keys)) + return enabled_keys; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, + sizeof(long)); + if (ret) + return ret; + + return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, + enabled_keys); +} + +#ifdef CONFIG_CHECKPOINT_RESTORE +static __uint128_t pac_key_to_user(const struct ptrauth_key *key) +{ + return (__uint128_t)key->hi << 64 | key->lo; +} + +static struct ptrauth_key pac_key_from_user(__uint128_t ukey) +{ + struct ptrauth_key key = { + .lo = (unsigned long)ukey, + .hi = (unsigned long)(ukey >> 64), + }; + + return key; +} + +static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, + const struct ptrauth_keys_user *keys) +{ + ukeys->apiakey = pac_key_to_user(&keys->apia); + ukeys->apibkey = pac_key_to_user(&keys->apib); + ukeys->apdakey = pac_key_to_user(&keys->apda); + ukeys->apdbkey = pac_key_to_user(&keys->apdb); +} + +static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, + const struct user_pac_address_keys *ukeys) +{ + keys->apia = pac_key_from_user(ukeys->apiakey); + keys->apib = pac_key_from_user(ukeys->apibkey); + keys->apda = pac_key_from_user(ukeys->apdakey); + keys->apdb = pac_key_from_user(ukeys->apdbkey); +} + +static int pac_address_keys_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct ptrauth_keys_user *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + + return membuf_write(&to, &user_keys, sizeof(user_keys)); +} + +static int pac_address_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys_user *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + int ret; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_address_keys_from_user(keys, &user_keys); + + return 0; +} + +static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, + const struct ptrauth_keys_user *keys) +{ + ukeys->apgakey = pac_key_to_user(&keys->apga); +} + +static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, + const struct user_pac_generic_keys *ukeys) +{ + keys->apga = pac_key_from_user(ukeys->apgakey); +} + +static int pac_generic_keys_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct ptrauth_keys_user *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + + return membuf_write(&to, &user_keys, sizeof(user_keys)); +} + +static int pac_generic_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys_user *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + int ret; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_generic_keys_from_user(keys, &user_keys); + + return 0; +} +#endif /* CONFIG_CHECKPOINT_RESTORE */ +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +static int tagged_addr_ctrl_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long ctrl = get_tagged_addr_ctrl(target); + + if (IS_ERR_VALUE(ctrl)) + return ctrl; + + return membuf_write(&to, &ctrl, sizeof(ctrl)); +} + +static int tagged_addr_ctrl_set(struct task_struct *target, const struct + user_regset *regset, unsigned int pos, + unsigned int count, const void *kbuf, const + void __user *ubuf) +{ + int ret; + long ctrl; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); + if (ret) + return ret; + + return set_tagged_addr_ctrl(target, ctrl); +} +#endif + +enum aarch64_regset { + REGSET_GPR, + REGSET_FPR, + REGSET_TLS, +#ifdef CONFIG_HAVE_HW_BREAKPOINT + REGSET_HW_BREAK, + REGSET_HW_WATCH, +#endif + REGSET_SYSTEM_CALL, +#ifdef CONFIG_ARM64_SVE + REGSET_SVE, +#endif +#ifdef CONFIG_ARM64_SME + REGSET_SSVE, + REGSET_ZA, + REGSET_ZT, +#endif +#ifdef CONFIG_ARM64_PTR_AUTH + REGSET_PAC_MASK, + REGSET_PAC_ENABLED_KEYS, +#ifdef CONFIG_CHECKPOINT_RESTORE + REGSET_PACA_KEYS, + REGSET_PACG_KEYS, +#endif +#endif +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI + REGSET_TAGGED_ADDR_CTRL, +#endif +}; + +static const struct user_regset aarch64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct user_pt_regs) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = gpr_get, + .set = gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpsimd_state) / sizeof(u32), + /* + * We pretend we have 32-bit registers because the fpsr and + * fpcr are 32-bits wide. + */ + .size = sizeof(u32), + .align = sizeof(u32), + .active = fpr_active, + .regset_get = fpr_get, + .set = fpr_set + }, + [REGSET_TLS] = { + .core_note_type = NT_ARM_TLS, + .n = 2, + .size = sizeof(void *), + .align = sizeof(void *), + .regset_get = tls_get, + .set = tls_set, + }, +#ifdef CONFIG_HAVE_HW_BREAKPOINT + [REGSET_HW_BREAK] = { + .core_note_type = NT_ARM_HW_BREAK, + .n = sizeof(struct user_hwdebug_state) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = hw_break_get, + .set = hw_break_set, + }, + [REGSET_HW_WATCH] = { + .core_note_type = NT_ARM_HW_WATCH, + .n = sizeof(struct user_hwdebug_state) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = hw_break_get, + .set = hw_break_set, + }, +#endif + [REGSET_SYSTEM_CALL] = { + .core_note_type = NT_ARM_SYSTEM_CALL, + .n = 1, + .size = sizeof(int), + .align = sizeof(int), + .regset_get = system_call_get, + .set = system_call_set, + }, +#ifdef CONFIG_ARM64_SVE + [REGSET_SVE] = { /* Scalable Vector Extension */ + .core_note_type = NT_ARM_SVE, + .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), + SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .regset_get = sve_get, + .set = sve_set, + }, +#endif +#ifdef CONFIG_ARM64_SME + [REGSET_SSVE] = { /* Streaming mode SVE */ + .core_note_type = NT_ARM_SSVE, + .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), + SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .regset_get = ssve_get, + .set = ssve_set, + }, + [REGSET_ZA] = { /* SME ZA */ + .core_note_type = NT_ARM_ZA, + /* + * ZA is a single register but it's variably sized and + * the ptrace core requires that the size of any data + * be an exact multiple of the configured register + * size so report as though we had SVE_VQ_BYTES + * registers. These values aren't exposed to + * userspace. + */ + .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .regset_get = za_get, + .set = za_set, + }, + [REGSET_ZT] = { /* SME ZT */ + .core_note_type = NT_ARM_ZT, + .n = 1, + .size = ZT_SIG_REG_BYTES, + .align = sizeof(u64), + .regset_get = zt_get, + .set = zt_set, + }, +#endif +#ifdef CONFIG_ARM64_PTR_AUTH + [REGSET_PAC_MASK] = { + .core_note_type = NT_ARM_PAC_MASK, + .n = sizeof(struct user_pac_mask) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = pac_mask_get, + /* this cannot be set dynamically */ + }, + [REGSET_PAC_ENABLED_KEYS] = { + .core_note_type = NT_ARM_PAC_ENABLED_KEYS, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = pac_enabled_keys_get, + .set = pac_enabled_keys_set, + }, +#ifdef CONFIG_CHECKPOINT_RESTORE + [REGSET_PACA_KEYS] = { + .core_note_type = NT_ARM_PACA_KEYS, + .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .regset_get = pac_address_keys_get, + .set = pac_address_keys_set, + }, + [REGSET_PACG_KEYS] = { + .core_note_type = NT_ARM_PACG_KEYS, + .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .regset_get = pac_generic_keys_get, + .set = pac_generic_keys_set, + }, +#endif +#endif +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI + [REGSET_TAGGED_ADDR_CTRL] = { + .core_note_type = NT_ARM_TAGGED_ADDR_CTRL, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = tagged_addr_ctrl_get, + .set = tagged_addr_ctrl_set, + }, +#endif +}; + +static const struct user_regset_view user_aarch64_view = { + .name = "aarch64", .e_machine = EM_AARCH64, + .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) +}; + +#ifdef CONFIG_COMPAT +enum compat_regset { + REGSET_COMPAT_GPR, + REGSET_COMPAT_VFP, +}; + +static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) +{ + struct pt_regs *regs = task_pt_regs(task); + + switch (idx) { + case 15: + return regs->pc; + case 16: + return pstate_to_compat_psr(regs->pstate); + case 17: + return regs->orig_x0; + default: + return regs->regs[idx]; + } +} + +static int compat_gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + int i = 0; + + while (to.left) + membuf_store(&to, compat_get_user_reg(target, i++)); + return 0; +} + +static int compat_gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs newregs; + int ret = 0; + unsigned int i, start, num_regs; + + /* Calculate the number of AArch32 registers contained in count */ + num_regs = count / regset->size; + + /* Convert pos into an register number */ + start = pos / regset->size; + + if (start + num_regs > regset->n) + return -EIO; + + newregs = *task_pt_regs(target); + + for (i = 0; i < num_regs; ++i) { + unsigned int idx = start + i; + compat_ulong_t reg; + + if (kbuf) { + memcpy(®, kbuf, sizeof(reg)); + kbuf += sizeof(reg); + } else { + ret = copy_from_user(®, ubuf, sizeof(reg)); + if (ret) { + ret = -EFAULT; + break; + } + + ubuf += sizeof(reg); + } + + switch (idx) { + case 15: + newregs.pc = reg; + break; + case 16: + reg = compat_psr_to_pstate(reg); + newregs.pstate = reg; + break; + case 17: + newregs.orig_x0 = reg; + break; + default: + newregs.regs[idx] = reg; + } + + } + + if (valid_user_regs(&newregs.user_regs, target)) + *task_pt_regs(target) = newregs; + else + ret = -EINVAL; + + return ret; +} + +static int compat_vfp_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_fpsimd_state *uregs; + compat_ulong_t fpscr; + + if (!system_supports_fpsimd()) + return -EINVAL; + + uregs = &target->thread.uw.fpsimd_state; + + if (target == current) + fpsimd_preserve_current_state(); + + /* + * The VFP registers are packed into the fpsimd_state, so they all sit + * nicely together for us. We just need to create the fpscr separately. + */ + membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); + fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | + (uregs->fpcr & VFP_FPSCR_CTRL_MASK); + return membuf_store(&to, fpscr); +} + +static int compat_vfp_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_fpsimd_state *uregs; + compat_ulong_t fpscr; + int ret, vregs_end_pos; + + if (!system_supports_fpsimd()) + return -EINVAL; + + uregs = &target->thread.uw.fpsimd_state; + + vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, + vregs_end_pos); + + if (count && !ret) { + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, + vregs_end_pos, VFP_STATE_SIZE); + if (!ret) { + uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; + uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; + } + } + + fpsimd_flush_task_state(target); + return ret; +} + +static int compat_tls_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); +} + +static int compat_tls_set(struct task_struct *target, + const struct user_regset *regset, unsigned int pos, + unsigned int count, const void *kbuf, + const void __user *ubuf) +{ + int ret; + compat_ulong_t tls = target->thread.uw.tp_value; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) + return ret; + + target->thread.uw.tp_value = tls; + return ret; +} + +static const struct user_regset aarch32_regsets[] = { + [REGSET_COMPAT_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = COMPAT_ELF_NGREG, + .size = sizeof(compat_elf_greg_t), + .align = sizeof(compat_elf_greg_t), + .regset_get = compat_gpr_get, + .set = compat_gpr_set + }, + [REGSET_COMPAT_VFP] = { + .core_note_type = NT_ARM_VFP, + .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), + .size = sizeof(compat_ulong_t), + .align = sizeof(compat_ulong_t), + .active = fpr_active, + .regset_get = compat_vfp_get, + .set = compat_vfp_set + }, +}; + +static const struct user_regset_view user_aarch32_view = { + .name = "aarch32", .e_machine = EM_ARM, + .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) +}; + +static const struct user_regset aarch32_ptrace_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = COMPAT_ELF_NGREG, + .size = sizeof(compat_elf_greg_t), + .align = sizeof(compat_elf_greg_t), + .regset_get = compat_gpr_get, + .set = compat_gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_ARM_VFP, + .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), + .size = sizeof(compat_ulong_t), + .align = sizeof(compat_ulong_t), + .regset_get = compat_vfp_get, + .set = compat_vfp_set + }, + [REGSET_TLS] = { + .core_note_type = NT_ARM_TLS, + .n = 1, + .size = sizeof(compat_ulong_t), + .align = sizeof(compat_ulong_t), + .regset_get = compat_tls_get, + .set = compat_tls_set, + }, +#ifdef CONFIG_HAVE_HW_BREAKPOINT + [REGSET_HW_BREAK] = { + .core_note_type = NT_ARM_HW_BREAK, + .n = sizeof(struct user_hwdebug_state) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = hw_break_get, + .set = hw_break_set, + }, + [REGSET_HW_WATCH] = { + .core_note_type = NT_ARM_HW_WATCH, + .n = sizeof(struct user_hwdebug_state) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = hw_break_get, + .set = hw_break_set, + }, +#endif + [REGSET_SYSTEM_CALL] = { + .core_note_type = NT_ARM_SYSTEM_CALL, + .n = 1, + .size = sizeof(int), + .align = sizeof(int), + .regset_get = system_call_get, + .set = system_call_set, + }, +}; + +static const struct user_regset_view user_aarch32_ptrace_view = { + .name = "aarch32", .e_machine = EM_ARM, + .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) +}; + +static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, + compat_ulong_t __user *ret) +{ + compat_ulong_t tmp; + + if (off & 3) + return -EIO; + + if (off == COMPAT_PT_TEXT_ADDR) + tmp = tsk->mm->start_code; + else if (off == COMPAT_PT_DATA_ADDR) + tmp = tsk->mm->start_data; + else if (off == COMPAT_PT_TEXT_END_ADDR) + tmp = tsk->mm->end_code; + else if (off < sizeof(compat_elf_gregset_t)) + tmp = compat_get_user_reg(tsk, off >> 2); + else if (off >= COMPAT_USER_SZ) + return -EIO; + else + tmp = 0; + + return put_user(tmp, ret); +} + +static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, + compat_ulong_t val) +{ + struct pt_regs newregs = *task_pt_regs(tsk); + unsigned int idx = off / 4; + + if (off & 3 || off >= COMPAT_USER_SZ) + return -EIO; + + if (off >= sizeof(compat_elf_gregset_t)) + return 0; + + switch (idx) { + case 15: + newregs.pc = val; + break; + case 16: + newregs.pstate = compat_psr_to_pstate(val); + break; + case 17: + newregs.orig_x0 = val; + break; + default: + newregs.regs[idx] = val; + } + + if (!valid_user_regs(&newregs.user_regs, tsk)) + return -EINVAL; + + *task_pt_regs(tsk) = newregs; + return 0; +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + +/* + * Convert a virtual register number into an index for a thread_info + * breakpoint array. Breakpoints are identified using positive numbers + * whilst watchpoints are negative. The registers are laid out as pairs + * of (address, control), each pair mapping to a unique hw_breakpoint struct. + * Register 0 is reserved for describing resource information. + */ +static int compat_ptrace_hbp_num_to_idx(compat_long_t num) +{ + return (abs(num) - 1) >> 1; +} + +static int compat_ptrace_hbp_get_resource_info(u32 *kdata) +{ + u8 num_brps, num_wrps, debug_arch, wp_len; + u32 reg = 0; + + num_brps = hw_breakpoint_slots(TYPE_INST); + num_wrps = hw_breakpoint_slots(TYPE_DATA); + + debug_arch = debug_monitors_arch(); + wp_len = 8; + reg |= debug_arch; + reg <<= 8; + reg |= wp_len; + reg <<= 8; + reg |= num_wrps; + reg <<= 8; + reg |= num_brps; + + *kdata = reg; + return 0; +} + +static int compat_ptrace_hbp_get(unsigned int note_type, + struct task_struct *tsk, + compat_long_t num, + u32 *kdata) +{ + u64 addr = 0; + u32 ctrl = 0; + + int err, idx = compat_ptrace_hbp_num_to_idx(num); + + if (num & 1) { + err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); + *kdata = (u32)addr; + } else { + err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); + *kdata = ctrl; + } + + return err; +} + +static int compat_ptrace_hbp_set(unsigned int note_type, + struct task_struct *tsk, + compat_long_t num, + u32 *kdata) +{ + u64 addr; + u32 ctrl; + + int err, idx = compat_ptrace_hbp_num_to_idx(num); + + if (num & 1) { + addr = *kdata; + err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); + } else { + ctrl = *kdata; + err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); + } + + return err; +} + +static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, + compat_ulong_t __user *data) +{ + int ret; + u32 kdata; + + /* Watchpoint */ + if (num < 0) { + ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); + /* Resource info */ + } else if (num == 0) { + ret = compat_ptrace_hbp_get_resource_info(&kdata); + /* Breakpoint */ + } else { + ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); + } + + if (!ret) + ret = put_user(kdata, data); + + return ret; +} + +static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, + compat_ulong_t __user *data) +{ + int ret; + u32 kdata = 0; + + if (num == 0) + return 0; + + ret = get_user(kdata, data); + if (ret) + return ret; + + if (num < 0) + ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); + else + ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); + + return ret; +} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + unsigned long addr = caddr; + unsigned long data = cdata; + void __user *datap = compat_ptr(data); + int ret; + + switch (request) { + case PTRACE_PEEKUSR: + ret = compat_ptrace_read_user(child, addr, datap); + break; + + case PTRACE_POKEUSR: + ret = compat_ptrace_write_user(child, addr, data); + break; + + case COMPAT_PTRACE_GETREGS: + ret = copy_regset_to_user(child, + &user_aarch32_view, + REGSET_COMPAT_GPR, + 0, sizeof(compat_elf_gregset_t), + datap); + break; + + case COMPAT_PTRACE_SETREGS: + ret = copy_regset_from_user(child, + &user_aarch32_view, + REGSET_COMPAT_GPR, + 0, sizeof(compat_elf_gregset_t), + datap); + break; + + case COMPAT_PTRACE_GET_THREAD_AREA: + ret = put_user((compat_ulong_t)child->thread.uw.tp_value, + (compat_ulong_t __user *)datap); + break; + + case COMPAT_PTRACE_SET_SYSCALL: + task_pt_regs(child)->syscallno = data; + ret = 0; + break; + + case COMPAT_PTRACE_GETVFPREGS: + ret = copy_regset_to_user(child, + &user_aarch32_view, + REGSET_COMPAT_VFP, + 0, VFP_STATE_SIZE, + datap); + break; + + case COMPAT_PTRACE_SETVFPREGS: + ret = copy_regset_from_user(child, + &user_aarch32_view, + REGSET_COMPAT_VFP, + 0, VFP_STATE_SIZE, + datap); + break; + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + case COMPAT_PTRACE_GETHBPREGS: + ret = compat_ptrace_gethbpregs(child, addr, datap); + break; + + case COMPAT_PTRACE_SETHBPREGS: + ret = compat_ptrace_sethbpregs(child, addr, datap); + break; +#endif + + default: + ret = compat_ptrace_request(child, request, addr, + data); + break; + } + + return ret; +} +#endif /* CONFIG_COMPAT */ + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ +#ifdef CONFIG_COMPAT + /* + * Core dumping of 32-bit tasks or compat ptrace requests must use the + * user_aarch32_view compatible with arm32. Native ptrace requests on + * 32-bit children use an extended user_aarch32_ptrace_view to allow + * access to the TLS register. + */ + if (is_compat_task()) + return &user_aarch32_view; + else if (is_compat_thread(task_thread_info(task))) + return &user_aarch32_ptrace_view; +#endif + return &user_aarch64_view; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + switch (request) { + case PTRACE_PEEKMTETAGS: + case PTRACE_POKEMTETAGS: + return mte_ptrace_copy_tags(child, request, addr, data); + } + + return ptrace_request(child, request, addr, data); +} + +enum ptrace_syscall_dir { + PTRACE_SYSCALL_ENTER = 0, + PTRACE_SYSCALL_EXIT, +}; + +static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) +{ + int regno; + unsigned long saved_reg; + + /* + * We have some ABI weirdness here in the way that we handle syscall + * exit stops because we indicate whether or not the stop has been + * signalled from syscall entry or syscall exit by clobbering a general + * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee + * and restoring its old value after the stop. This means that: + * + * - Any writes by the tracer to this register during the stop are + * ignored/discarded. + * + * - The actual value of the register is not available during the stop, + * so the tracer cannot save it and restore it later. + * + * - Syscall stops behave differently to seccomp and pseudo-step traps + * (the latter do not nobble any registers). + */ + regno = (is_compat_task() ? 12 : 7); + saved_reg = regs->regs[regno]; + regs->regs[regno] = dir; + + if (dir == PTRACE_SYSCALL_ENTER) { + if (ptrace_report_syscall_entry(regs)) + forget_syscall(regs); + regs->regs[regno] = saved_reg; + } else if (!test_thread_flag(TIF_SINGLESTEP)) { + ptrace_report_syscall_exit(regs, 0); + regs->regs[regno] = saved_reg; + } else { + regs->regs[regno] = saved_reg; + + /* + * Signal a pseudo-step exception since we are stepping but + * tracer modifications to the registers may have rewound the + * state machine. + */ + ptrace_report_syscall_exit(regs, 1); + } +} + +int syscall_trace_enter(struct pt_regs *regs) +{ + unsigned long flags = read_thread_flags(); + + if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { + report_syscall(regs, PTRACE_SYSCALL_ENTER); + if (flags & _TIF_SYSCALL_EMU) + return NO_SYSCALL; + } + + /* Do the secure computing after ptrace; failures should be fast. */ + if (secure_computing() == -1) + return NO_SYSCALL; + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, regs->syscallno); + + audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], + regs->regs[2], regs->regs[3]); + + return regs->syscallno; +} + +void syscall_trace_exit(struct pt_regs *regs) +{ + unsigned long flags = read_thread_flags(); + + audit_syscall_exit(regs); + + if (flags & _TIF_SYSCALL_TRACEPOINT) + trace_sys_exit(regs, syscall_get_return_value(current, regs)); + + if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) + report_syscall(regs, PTRACE_SYSCALL_EXIT); + + rseq_syscall(regs); +} + +/* + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is + * not described in ARM DDI 0487D.a. + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may + * be allocated an EL0 meaning in future. + * Userspace cannot use these until they have an architectural meaning. + * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. + * We also reserve IL for the kernel; SS is handled dynamically. + */ +#define SPSR_EL1_AARCH64_RES0_BITS \ + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) +#define SPSR_EL1_AARCH32_RES0_BITS \ + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) + +static int valid_compat_regs(struct user_pt_regs *regs) +{ + regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; + + if (!system_supports_mixed_endian_el0()) { + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + regs->pstate |= PSR_AA32_E_BIT; + else + regs->pstate &= ~PSR_AA32_E_BIT; + } + + if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && + (regs->pstate & PSR_AA32_A_BIT) == 0 && + (regs->pstate & PSR_AA32_I_BIT) == 0 && + (regs->pstate & PSR_AA32_F_BIT) == 0) { + return 1; + } + + /* + * Force PSR to a valid 32-bit EL0t, preserving the same bits as + * arch/arm. + */ + regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | + PSR_AA32_C_BIT | PSR_AA32_V_BIT | + PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | + PSR_AA32_GE_MASK | PSR_AA32_E_BIT | + PSR_AA32_T_BIT; + regs->pstate |= PSR_MODE32_BIT; + + return 0; +} + +static int valid_native_regs(struct user_pt_regs *regs) +{ + regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; + + if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && + (regs->pstate & PSR_D_BIT) == 0 && + (regs->pstate & PSR_A_BIT) == 0 && + (regs->pstate & PSR_I_BIT) == 0 && + (regs->pstate & PSR_F_BIT) == 0) { + return 1; + } + + /* Force PSR to a valid 64-bit EL0t */ + regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; + + return 0; +} + +/* + * Are the current registers suitable for user mode? (used to maintain + * security in signal handlers) + */ +int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) +{ + /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ + user_regs_reset_single_step(regs, task); + + if (is_compat_thread(task_thread_info(task))) + return valid_compat_regs(regs); + else + return valid_native_regs(regs); +} diff --git a/arch/arm64/kernel/reloc_test_core.c b/arch/arm64/kernel/reloc_test_core.c new file mode 100644 index 0000000000..99f2ffe9fc --- /dev/null +++ b/arch/arm64/kernel/reloc_test_core.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017 Linaro, Ltd. + */ + +#include + +int sym64_rel; + +#define SYM64_ABS_VAL 0xffff880000cccccc +#define SYM32_ABS_VAL 0xf800cccc +#define SYM16_ABS_VAL 0xf8cc + +#define __SET_ABS(name, val) asm(".globl " #name "; .set "#name ", " #val) +#define SET_ABS(name, val) __SET_ABS(name, val) + +SET_ABS(sym64_abs, SYM64_ABS_VAL); +SET_ABS(sym32_abs, SYM32_ABS_VAL); +SET_ABS(sym16_abs, SYM16_ABS_VAL); + +asmlinkage u64 absolute_data64(void); +asmlinkage u64 absolute_data32(void); +asmlinkage u64 absolute_data16(void); +asmlinkage u64 signed_movw(void); +asmlinkage u64 unsigned_movw(void); +asmlinkage u64 relative_adrp(void); +asmlinkage u64 relative_adrp_far(void); +asmlinkage u64 relative_adr(void); +asmlinkage u64 relative_data64(void); +asmlinkage u64 relative_data32(void); +asmlinkage u64 relative_data16(void); + +static struct { + char name[32]; + u64 (*f)(void); + u64 expect; +} const funcs[] = { + { "R_AARCH64_ABS64", absolute_data64, UL(SYM64_ABS_VAL) }, + { "R_AARCH64_ABS32", absolute_data32, UL(SYM32_ABS_VAL) }, + { "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) }, + { "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) }, + { "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) }, + { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel }, + { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp_far, (u64)&memstart_addr }, + { "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel }, + { "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel }, + { "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel }, + { "R_AARCH64_PREL16", relative_data16, (u64)&sym64_rel }, +}; + +static int __init reloc_test_init(void) +{ + int i; + + pr_info("Relocation test:\n"); + pr_info("-------------------------------------------------------\n"); + + for (i = 0; i < ARRAY_SIZE(funcs); i++) { + u64 ret = funcs[i].f(); + + pr_info("%-31s 0x%016llx %s\n", funcs[i].name, ret, + ret == funcs[i].expect ? "pass" : "fail"); + if (ret != funcs[i].expect) + pr_err("Relocation failed, expected 0x%016llx, not 0x%016llx\n", + funcs[i].expect, ret); + } + return 0; +} + +static void __exit reloc_test_exit(void) +{ +} + +module_init(reloc_test_init); +module_exit(reloc_test_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/kernel/reloc_test_syms.S b/arch/arm64/kernel/reloc_test_syms.S new file mode 100644 index 0000000000..c50f45fa29 --- /dev/null +++ b/arch/arm64/kernel/reloc_test_syms.S @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Linaro, Ltd. + */ + +#include + +SYM_FUNC_START(absolute_data64) + ldr x0, 0f + ret +0: .quad sym64_abs +SYM_FUNC_END(absolute_data64) + +SYM_FUNC_START(absolute_data32) + ldr w0, 0f + ret +0: .long sym32_abs +SYM_FUNC_END(absolute_data32) + +SYM_FUNC_START(absolute_data16) + adr x0, 0f + ldrh w0, [x0] + ret +0: .short sym16_abs, 0 +SYM_FUNC_END(absolute_data16) + +SYM_FUNC_START(signed_movw) + movz x0, #:abs_g2_s:sym64_abs + movk x0, #:abs_g1_nc:sym64_abs + movk x0, #:abs_g0_nc:sym64_abs + ret +SYM_FUNC_END(signed_movw) + +SYM_FUNC_START(unsigned_movw) + movz x0, #:abs_g3:sym64_abs + movk x0, #:abs_g2_nc:sym64_abs + movk x0, #:abs_g1_nc:sym64_abs + movk x0, #:abs_g0_nc:sym64_abs + ret +SYM_FUNC_END(unsigned_movw) + + .align 12 + .space 0xff8 +SYM_FUNC_START(relative_adrp) + adrp x0, sym64_rel + add x0, x0, #:lo12:sym64_rel + ret +SYM_FUNC_END(relative_adrp) + + .align 12 + .space 0xffc +SYM_FUNC_START(relative_adrp_far) + adrp x0, memstart_addr + add x0, x0, #:lo12:memstart_addr + ret +SYM_FUNC_END(relative_adrp_far) + +SYM_FUNC_START(relative_adr) + adr x0, sym64_rel + ret +SYM_FUNC_END(relative_adr) + +SYM_FUNC_START(relative_data64) + adr x1, 0f + ldr x0, [x1] + add x0, x0, x1 + ret +0: .quad sym64_rel - . +SYM_FUNC_END(relative_data64) + +SYM_FUNC_START(relative_data32) + adr x1, 0f + ldr w0, [x1] + add x0, x0, x1 + ret +0: .long sym64_rel - . +SYM_FUNC_END(relative_data32) + +SYM_FUNC_START(relative_data16) + adr x1, 0f + ldrsh w0, [x1] + add x0, x0, x1 + ret +0: .short sym64_rel - ., 0 +SYM_FUNC_END(relative_data16) diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S new file mode 100644 index 0000000000..413f899e4a --- /dev/null +++ b/arch/arm64/kernel/relocate_kernel.S @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * Copyright (C) Huawei Futurewei Technologies. + * Copyright (C) 2021, Microsoft Corporation. + * Pasha Tatashin + */ + +#include +#include + +#include +#include +#include +#include +#include + +.macro turn_off_mmu tmp1, tmp2 + mov_q \tmp1, INIT_SCTLR_EL1_MMU_OFF + pre_disable_mmu_workaround + msr sctlr_el1, \tmp1 + isb +.endm + +.section ".kexec_relocate.text", "ax" +/* + * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. + * + * The memory that the old kernel occupies may be overwritten when copying the + * new image to its final location. To assure that the + * arm64_relocate_new_kernel routine which does that copy is not overwritten, + * all code and data needed by arm64_relocate_new_kernel must be between the + * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The + * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec + * safe memory that has been set up to be preserved during the copy operation. + */ +SYM_CODE_START(arm64_relocate_new_kernel) + /* + * The kimage structure isn't allocated specially and may be clobbered + * during relocation. We must load any values we need from it prior to + * any relocation occurring. + */ + ldr x28, [x0, #KIMAGE_START] + ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS] + ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM] + + /* Setup the list loop variables. */ + ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */ + ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */ + ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */ + ldr x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET] /* x22 phys_offset */ + raw_dcache_line_size x15, x1 /* x15 = dcache line size */ + break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */ +.Lloop: + and x12, x16, PAGE_MASK /* x12 = addr */ + sub x12, x12, x22 /* Convert x12 to virt */ + /* Test the entry flags. */ +.Ltest_source: + tbz x16, IND_SOURCE_BIT, .Ltest_indirection + + /* Invalidate dest page to PoC. */ + mov x19, x13 + copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 + add x1, x19, #PAGE_SIZE + dcache_by_myline_op civac, sy, x19, x1, x15, x20 + b .Lnext +.Ltest_indirection: + tbz x16, IND_INDIRECTION_BIT, .Ltest_destination + mov x14, x12 /* ptr = addr */ + b .Lnext +.Ltest_destination: + tbz x16, IND_DESTINATION_BIT, .Lnext + mov x13, x12 /* dest = addr */ +.Lnext: + ldr x16, [x14], #8 /* entry = *ptr++ */ + tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */ + /* wait for writes from copy_page to finish */ + dsb nsh + ic iallu + dsb nsh + isb + turn_off_mmu x12, x13 + + /* Start new image. */ + cbz x27, .Lel1 + mov x1, x28 /* kernel entry point */ + mov x2, x26 /* dtb address */ + mov x3, xzr + mov x4, xzr + mov x0, #HVC_SOFT_RESTART + hvc #0 /* Jumps from el2 */ +.Lel1: + mov x0, x26 /* dtb address */ + mov x1, xzr + mov x2, xzr + mov x3, xzr + br x28 /* Jumps from el1 */ +SYM_CODE_END(arm64_relocate_new_kernel) diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c new file mode 100644 index 0000000000..68330017d0 --- /dev/null +++ b/arch/arm64/kernel/return_address.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/arm64/kernel/return_address.c + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro + */ + +#include +#include +#include +#include + +#include + +struct return_address_data { + unsigned int level; + void *addr; +}; + +static bool save_return_addr(void *d, unsigned long pc) +{ + struct return_address_data *data = d; + + if (!data->level) { + data->addr = (void *)pc; + return false; + } else { + --data->level; + return true; + } +} +NOKPROBE_SYMBOL(save_return_addr); + +void *return_address(unsigned int level) +{ + struct return_address_data data; + + data.level = level + 2; + data.addr = NULL; + + arch_stack_walk(save_return_addr, &data, current, NULL); + + if (!data.level) + return data.addr; + else + return NULL; +} +EXPORT_SYMBOL_GPL(return_address); +NOKPROBE_SYMBOL(return_address); diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c new file mode 100644 index 0000000000..255d12f881 --- /dev/null +++ b/arch/arm64/kernel/sdei.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 Arm Ltd. +#define pr_fmt(fmt) "sdei: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned long sdei_exit_mode; + +/* + * VMAP'd stacks checking for stack overflow on exception using sp as a scratch + * register, meaning SDEI has to switch to its own stack. We need two stacks as + * a critical event may interrupt a normal event that has just taken a + * synchronous exception, and is using sp as scratch register. For a critical + * event interrupting a normal event, we can't reliably tell if we were on the + * sdei stack. + * For now, we allocate stacks when the driver is probed. + */ +DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); +DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); + +#ifdef CONFIG_VMAP_STACK +DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); +DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); +#endif + +DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); +DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); + +#ifdef CONFIG_SHADOW_CALL_STACK +DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); +DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); +#endif + +DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); +DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); + +static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) +{ + unsigned long *p; + + p = per_cpu(*ptr, cpu); + if (p) { + per_cpu(*ptr, cpu) = NULL; + vfree(p); + } +} + +static void free_sdei_stacks(void) +{ + int cpu; + + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + return; + + for_each_possible_cpu(cpu) { + _free_sdei_stack(&sdei_stack_normal_ptr, cpu); + _free_sdei_stack(&sdei_stack_critical_ptr, cpu); + } +} + +static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu) +{ + unsigned long *p; + + p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu)); + if (!p) + return -ENOMEM; + per_cpu(*ptr, cpu) = p; + + return 0; +} + +static int init_sdei_stacks(void) +{ + int cpu; + int err = 0; + + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + return 0; + + for_each_possible_cpu(cpu) { + err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu); + if (err) + break; + err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu); + if (err) + break; + } + + if (err) + free_sdei_stacks(); + + return err; +} + +static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu) +{ + void *s; + + s = per_cpu(*ptr, cpu); + if (s) { + per_cpu(*ptr, cpu) = NULL; + scs_free(s); + } +} + +static void free_sdei_scs(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); + _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu); + } +} + +static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu) +{ + void *s; + + s = scs_alloc(cpu_to_node(cpu)); + if (!s) + return -ENOMEM; + per_cpu(*ptr, cpu) = s; + + return 0; +} + +static int init_sdei_scs(void) +{ + int cpu; + int err = 0; + + if (!scs_is_enabled()) + return 0; + + for_each_possible_cpu(cpu) { + err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); + if (err) + break; + err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu); + if (err) + break; + } + + if (err) + free_sdei_scs(); + + return err; +} + +unsigned long sdei_arch_get_entry_point(int conduit) +{ + /* + * SDEI works between adjacent exception levels. If we booted at EL1 we + * assume a hypervisor is marshalling events. If we booted at EL2 and + * dropped to EL1 because we don't support VHE, then we can't support + * SDEI. + */ + if (is_hyp_nvhe()) { + pr_err("Not supported on this hardware/boot configuration\n"); + goto out_err; + } + + if (init_sdei_stacks()) + goto out_err; + + if (init_sdei_scs()) + goto out_err_free_stacks; + + sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC; + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + if (arm64_kernel_unmapped_at_el0()) { + unsigned long offset; + + offset = (unsigned long)__sdei_asm_entry_trampoline - + (unsigned long)__entry_tramp_text_start; + return TRAMP_VALIAS + offset; + } else +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + return (unsigned long)__sdei_asm_handler; + +out_err_free_stacks: + free_sdei_stacks(); +out_err: + return 0; +} + +/* + * do_sdei_event() returns one of: + * SDEI_EV_HANDLED - success, return to the interrupted context. + * SDEI_EV_FAILED - failure, return this error code to firmare. + * virtual-address - success, return to this address. + */ +unsigned long __kprobes do_sdei_event(struct pt_regs *regs, + struct sdei_registered_event *arg) +{ + u32 mode; + int i, err = 0; + int clobbered_registers = 4; + u64 elr = read_sysreg(elr_el1); + u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */ + unsigned long vbar = read_sysreg(vbar_el1); + + if (arm64_kernel_unmapped_at_el0()) + clobbered_registers++; + + /* Retrieve the missing registers values */ + for (i = 0; i < clobbered_registers; i++) { + /* from within the handler, this call always succeeds */ + sdei_api_event_context(i, ®s->regs[i]); + } + + err = sdei_event_handler(regs, arg); + if (err) + return SDEI_EV_FAILED; + + if (elr != read_sysreg(elr_el1)) { + /* + * We took a synchronous exception from the SDEI handler. + * This could deadlock, and if you interrupt KVM it will + * hyp-panic instead. + */ + pr_warn("unsafe: exception during handler\n"); + } + + mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK); + + /* + * If we interrupted the kernel with interrupts masked, we always go + * back to wherever we came from. + */ + if (mode == kernel_mode && !interrupts_enabled(regs)) + return SDEI_EV_HANDLED; + + /* + * Otherwise, we pretend this was an IRQ. This lets user space tasks + * receive signals before we return to them, and KVM to invoke it's + * world switch to do the same. + * + * See DDI0487B.a Table D1-7 'Vector offsets from vector table base + * address'. + */ + if (mode == kernel_mode) + return vbar + 0x280; + else if (mode & PSR_MODE32_BIT) + return vbar + 0x680; + + return vbar + 0x480; +} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c new file mode 100644 index 0000000000..417a8a86b2 --- /dev/null +++ b/arch/arm64/kernel/setup.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/setup.c + * + * Copyright (C) 1995-2001 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int num_standard_resources; +static struct resource *standard_resources; + +phys_addr_t __fdt_pointer __initdata; +u64 mmu_enabled_at_boot __initdata; + +/* + * Standard memory resources + */ +static struct resource mem_res[] = { + { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_SYSTEM_RAM + }, + { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_SYSTEM_RAM + } +}; + +#define kernel_code mem_res[0] +#define kernel_data mem_res[1] + +/* + * The recorded values of x0 .. x3 upon kernel entry. + */ +u64 __cacheline_aligned boot_args[4]; + +void __init smp_setup_processor_id(void) +{ + u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; + set_cpu_logical_map(0, mpidr); + + pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n", + (unsigned long)mpidr, read_cpuid_id()); +} + +bool arch_match_cpu_phys_id(int cpu, u64 phys_id) +{ + return phys_id == cpu_logical_map(cpu); +} + +struct mpidr_hash mpidr_hash; +/** + * smp_build_mpidr_hash - Pre-compute shifts required at each affinity + * level in order to build a linear index from an + * MPIDR value. Resulting algorithm is a collision + * free hash carried out through shifting and ORing + */ +static void __init smp_build_mpidr_hash(void) +{ + u32 i, affinity, fs[4], bits[4], ls; + u64 mask = 0; + /* + * Pre-scan the list of MPIDRS and filter out bits that do + * not contribute to affinity levels, ie they never toggle. + */ + for_each_possible_cpu(i) + mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); + pr_debug("mask of set bits %#llx\n", mask); + /* + * Find and stash the last and first bit set at all affinity levels to + * check how many bits are required to represent them. + */ + for (i = 0; i < 4; i++) { + affinity = MPIDR_AFFINITY_LEVEL(mask, i); + /* + * Find the MSB bit and LSB bits position + * to determine how many bits are required + * to express the affinity level. + */ + ls = fls(affinity); + fs[i] = affinity ? ffs(affinity) - 1 : 0; + bits[i] = ls - fs[i]; + } + /* + * An index can be created from the MPIDR_EL1 by isolating the + * significant bits at each affinity level and by shifting + * them in order to compress the 32 bits values space to a + * compressed set of values. This is equivalent to hashing + * the MPIDR_EL1 through shifting and ORing. It is a collision free + * hash though not minimal since some levels might contain a number + * of CPUs that is not an exact power of 2 and their bit + * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. + */ + mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; + mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; + mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - + (bits[1] + bits[0]); + mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + + fs[3] - (bits[2] + bits[1] + bits[0]); + mpidr_hash.mask = mask; + mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; + pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", + mpidr_hash.shift_aff[0], + mpidr_hash.shift_aff[1], + mpidr_hash.shift_aff[2], + mpidr_hash.shift_aff[3], + mpidr_hash.mask, + mpidr_hash.bits); + /* + * 4x is an arbitrary value used to warn on a hash table much bigger + * than expected on most systems. + */ + if (mpidr_hash_size() > 4 * num_possible_cpus()) + pr_warn("Large number of MPIDR hash buckets detected\n"); +} + +static void *early_fdt_ptr __initdata; + +void __init *get_early_fdt_ptr(void) +{ + return early_fdt_ptr; +} + +asmlinkage void __init early_fdt_map(u64 dt_phys) +{ + int fdt_size; + + early_fixmap_init(); + early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL); +} + +static void __init setup_machine_fdt(phys_addr_t dt_phys) +{ + int size; + void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); + const char *name; + + if (dt_virt) + memblock_reserve(dt_phys, size); + + if (!dt_virt || !early_init_dt_scan(dt_virt)) { + pr_crit("\n" + "Error: invalid device tree blob at physical address %pa (virtual address 0x%px)\n" + "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" + "\nPlease check your bootloader.", + &dt_phys, dt_virt); + + /* + * Note that in this _really_ early stage we cannot even BUG() + * or oops, so the least terrible thing to do is cpu_relax(), + * or else we could end-up printing non-initialized data, etc. + */ + while (true) + cpu_relax(); + } + + /* Early fixups are done, map the FDT as read-only now */ + fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); + + name = of_flat_dt_get_machine_name(); + if (!name) + return; + + pr_info("Machine model: %s\n", name); + dump_stack_set_arch_desc("%s (DT)", name); +} + +static void __init request_standard_resources(void) +{ + struct memblock_region *region; + struct resource *res; + unsigned long i = 0; + size_t res_size; + + kernel_code.start = __pa_symbol(_stext); + kernel_code.end = __pa_symbol(__init_begin - 1); + kernel_data.start = __pa_symbol(_sdata); + kernel_data.end = __pa_symbol(_end - 1); + insert_resource(&iomem_resource, &kernel_code); + insert_resource(&iomem_resource, &kernel_data); + + num_standard_resources = memblock.memory.cnt; + res_size = num_standard_resources * sizeof(*standard_resources); + standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); + if (!standard_resources) + panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); + + for_each_mem_region(region) { + res = &standard_resources[i++]; + if (memblock_is_nomap(region)) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + } + + insert_resource(&iomem_resource, res); + } +} + +static int __init reserve_memblock_reserved_regions(void) +{ + u64 i, j; + + for (i = 0; i < num_standard_resources; ++i) { + struct resource *mem = &standard_resources[i]; + phys_addr_t r_start, r_end, mem_size = resource_size(mem); + + if (!memblock_is_region_reserved(mem->start, mem_size)) + continue; + + for_each_reserved_mem_range(j, &r_start, &r_end) { + resource_size_t start, end; + + start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); + end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); + + if (start > mem->end || end < mem->start) + continue; + + reserve_region_with_split(mem, start, end, "reserved"); + } + } + + return 0; +} +arch_initcall(reserve_memblock_reserved_regions); + +u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; + +u64 cpu_logical_map(unsigned int cpu) +{ + return __cpu_logical_map[cpu]; +} + +void __init __no_sanitize_address setup_arch(char **cmdline_p) +{ + setup_initial_init_mm(_stext, _etext, _edata, _end); + + *cmdline_p = boot_command_line; + + kaslr_init(); + + /* + * If know now we are going to need KPTI then use non-global + * mappings from the start, avoiding the cost of rewriting + * everything later. + */ + arm64_use_ng_mappings = kaslr_requires_kpti(); + + early_fixmap_init(); + early_ioremap_init(); + + setup_machine_fdt(__fdt_pointer); + + /* + * Initialise the static keys early as they may be enabled by the + * cpufeature code and early parameters. + */ + jump_label_init(); + parse_early_param(); + + dynamic_scs_init(); + + /* + * Unmask asynchronous aborts and fiq after bringing up possible + * earlycon. (Report possible System Errors once we can report this + * occurred). + */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + + /* + * TTBR0 is only used for the identity mapping at this stage. Make it + * point to zero page to avoid speculatively fetching new entries. + */ + cpu_uninstall_idmap(); + + xen_early_init(); + efi_init(); + + if (!efi_enabled(EFI_BOOT)) { + if ((u64)_text % MIN_KIMG_ALIGN) + pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!"); + WARN_TAINT(mmu_enabled_at_boot, TAINT_FIRMWARE_WORKAROUND, + FW_BUG "Booted with MMU enabled!"); + } + + arm64_memblock_init(); + + paging_init(); + + acpi_table_upgrade(); + + /* Parse the ACPI tables for possible boot-time configuration */ + acpi_boot_table_init(); + + if (acpi_disabled) + unflatten_device_tree(); + + bootmem_init(); + + kasan_init(); + + request_standard_resources(); + + early_ioremap_reset(); + + if (acpi_disabled) + psci_dt_init(); + else + psci_acpi_init(); + + init_bootcpu_ops(); + smp_init_cpus(); + smp_build_mpidr_hash(); + + /* Init percpu seeds for random tags after cpus are set up. */ + kasan_init_sw_tags(); + +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Make sure init_thread_info.ttbr0 always generates translation + * faults in case uaccess_enable() is inadvertently called by the init + * thread. + */ + init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); +#endif + + if (boot_args[1] || boot_args[2] || boot_args[3]) { + pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" + "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" + "This indicates a broken bootloader or old kernel\n", + boot_args[1], boot_args[2], boot_args[3]); + } +} + +static inline bool cpu_can_disable(unsigned int cpu) +{ +#ifdef CONFIG_HOTPLUG_CPU + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops && ops->cpu_can_disable) + return ops->cpu_can_disable(cpu); +#endif + return false; +} + +static int __init topology_init(void) +{ + int i; + + for_each_possible_cpu(i) { + struct cpu *cpu = &per_cpu(cpu_data.cpu, i); + cpu->hotpluggable = cpu_can_disable(i); + register_cpu(cpu, i); + } + + return 0; +} +subsys_initcall(topology_init); + +static void dump_kernel_offset(void) +{ + const unsigned long offset = kaslr_offset(); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) { + pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", + offset, KIMAGE_VADDR); + pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET); + } else { + pr_emerg("Kernel Offset: disabled\n"); + } +} + +static int arm64_panic_block_dump(struct notifier_block *self, + unsigned long v, void *p) +{ + dump_kernel_offset(); + dump_cpu_features(); + dump_mem_limit(); + return 0; +} + +static struct notifier_block arm64_panic_block = { + .notifier_call = arm64_panic_block_dump +}; + +static int __init register_arm64_panic_block(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &arm64_panic_block); + return 0; +} +device_initcall(register_arm64_panic_block); + +static int __init check_mmu_enabled_at_boot(void) +{ + if (!efi_enabled(EFI_BOOT) && mmu_enabled_at_boot) + panic("Non-EFI boot detected with MMU and caches enabled"); + return 0; +} +device_initcall_sync(check_mmu_enabled_at_boot); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c new file mode 100644 index 0000000000..0e8beb3349 --- /dev/null +++ b/arch/arm64/kernel/signal.c @@ -0,0 +1,1379 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/signal.c + * + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Do a signal return; undo the signal stack. These are aligned to 128-bit. + */ +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +struct frame_record { + u64 fp; + u64 lr; +}; + +struct rt_sigframe_user_layout { + struct rt_sigframe __user *sigframe; + struct frame_record __user *next_frame; + + unsigned long size; /* size of allocated sigframe data */ + unsigned long limit; /* largest allowed size */ + + unsigned long fpsimd_offset; + unsigned long esr_offset; + unsigned long sve_offset; + unsigned long tpidr2_offset; + unsigned long za_offset; + unsigned long zt_offset; + unsigned long extra_offset; + unsigned long end_offset; +}; + +#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) +#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) +#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) + +static void init_user_layout(struct rt_sigframe_user_layout *user) +{ + const size_t reserved_size = + sizeof(user->sigframe->uc.uc_mcontext.__reserved); + + memset(user, 0, sizeof(*user)); + user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); + + user->limit = user->size + reserved_size; + + user->limit -= TERMINATOR_SIZE; + user->limit -= EXTRA_CONTEXT_SIZE; + /* Reserve space for extension and terminator ^ */ +} + +static size_t sigframe_size(struct rt_sigframe_user_layout const *user) +{ + return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); +} + +/* + * Sanity limit on the approximate maximum size of signal frame we'll + * try to generate. Stack alignment padding and the frame record are + * not taken into account. This limit is not a guarantee and is + * NOT ABI. + */ +#define SIGFRAME_MAXSZ SZ_256K + +static int __sigframe_alloc(struct rt_sigframe_user_layout *user, + unsigned long *offset, size_t size, bool extend) +{ + size_t padded_size = round_up(size, 16); + + if (padded_size > user->limit - user->size && + !user->extra_offset && + extend) { + int ret; + + user->limit += EXTRA_CONTEXT_SIZE; + ret = __sigframe_alloc(user, &user->extra_offset, + sizeof(struct extra_context), false); + if (ret) { + user->limit -= EXTRA_CONTEXT_SIZE; + return ret; + } + + /* Reserve space for the __reserved[] terminator */ + user->size += TERMINATOR_SIZE; + + /* + * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for + * the terminator: + */ + user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; + } + + /* Still not enough space? Bad luck! */ + if (padded_size > user->limit - user->size) + return -ENOMEM; + + *offset = user->size; + user->size += padded_size; + + return 0; +} + +/* + * Allocate space for an optional record of bytes in the user + * signal frame. The offset from the signal frame base address to the + * allocated block is assigned to *offset. + */ +static int sigframe_alloc(struct rt_sigframe_user_layout *user, + unsigned long *offset, size_t size) +{ + return __sigframe_alloc(user, offset, size, true); +} + +/* Allocate the null terminator record and prevent further allocations */ +static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) +{ + int ret; + + /* Un-reserve the space reserved for the terminator: */ + user->limit += TERMINATOR_SIZE; + + ret = sigframe_alloc(user, &user->end_offset, + sizeof(struct _aarch64_ctx)); + if (ret) + return ret; + + /* Prevent further allocation: */ + user->limit = user->size; + return 0; +} + +static void __user *apply_user_offset( + struct rt_sigframe_user_layout const *user, unsigned long offset) +{ + char __user *base = (char __user *)user->sigframe; + + return base + offset; +} + +struct user_ctxs { + struct fpsimd_context __user *fpsimd; + u32 fpsimd_size; + struct sve_context __user *sve; + u32 sve_size; + struct tpidr2_context __user *tpidr2; + u32 tpidr2_size; + struct za_context __user *za; + u32 za_size; + struct zt_context __user *zt; + u32 zt_size; +}; + +static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) +{ + struct user_fpsimd_state const *fpsimd = + ¤t->thread.uw.fpsimd_state; + int err; + + /* copy the FP and status/control registers */ + err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); + __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); + __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); + + /* copy the magic/size information */ + __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); + __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); + + return err ? -EFAULT : 0; +} + +static int restore_fpsimd_context(struct user_ctxs *user) +{ + struct user_fpsimd_state fpsimd; + int err = 0; + + /* check the size information */ + if (user->fpsimd_size != sizeof(struct fpsimd_context)) + return -EINVAL; + + /* copy the FP and status/control registers */ + err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), + sizeof(fpsimd.vregs)); + __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); + __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); + + clear_thread_flag(TIF_SVE); + current->thread.fp_type = FP_STATE_FPSIMD; + + /* load the hardware registers from the fpsimd_state structure */ + if (!err) + fpsimd_update_current_state(&fpsimd); + + return err ? -EFAULT : 0; +} + + +#ifdef CONFIG_ARM64_SVE + +static int preserve_sve_context(struct sve_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + u16 flags = 0; + unsigned int vl = task_get_sve_vl(current); + unsigned int vq = 0; + + if (thread_sm_enabled(¤t->thread)) { + vl = task_get_sme_vl(current); + vq = sve_vq_from_vl(vl); + flags |= SVE_SIG_FLAG_SM; + } else if (test_thread_flag(TIF_SVE)) { + vq = sve_vq_from_vl(vl); + } + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(SVE_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), + &ctx->head.size, err); + __put_user_error(vl, &ctx->vl, err); + __put_user_error(flags, &ctx->flags, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + if (vq) { + /* + * This assumes that the SVE state has already been saved to + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). + */ + err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, + current->thread.sve_state, + SVE_SIG_REGS_SIZE(vq)); + } + + return err ? -EFAULT : 0; +} + +static int restore_sve_fpsimd_context(struct user_ctxs *user) +{ + int err = 0; + unsigned int vl, vq; + struct user_fpsimd_state fpsimd; + u16 user_vl, flags; + + if (user->sve_size < sizeof(*user->sve)) + return -EINVAL; + + __get_user_error(user_vl, &(user->sve->vl), err); + __get_user_error(flags, &(user->sve->flags), err); + if (err) + return err; + + if (flags & SVE_SIG_FLAG_SM) { + if (!system_supports_sme()) + return -EINVAL; + + vl = task_get_sme_vl(current); + } else { + /* + * A SME only system use SVE for streaming mode so can + * have a SVE formatted context with a zero VL and no + * payload data. + */ + if (!system_supports_sve() && !system_supports_sme()) + return -EINVAL; + + vl = task_get_sve_vl(current); + } + + if (user_vl != vl) + return -EINVAL; + + if (user->sve_size == sizeof(*user->sve)) { + clear_thread_flag(TIF_SVE); + current->thread.svcr &= ~SVCR_SM_MASK; + current->thread.fp_type = FP_STATE_FPSIMD; + goto fpsimd_only; + } + + vq = sve_vq_from_vl(vl); + + if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.sve_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ + + sve_alloc(current, true); + if (!current->thread.sve_state) { + clear_thread_flag(TIF_SVE); + return -ENOMEM; + } + + err = __copy_from_user(current->thread.sve_state, + (char __user const *)user->sve + + SVE_SIG_REGS_OFFSET, + SVE_SIG_REGS_SIZE(vq)); + if (err) + return -EFAULT; + + if (flags & SVE_SIG_FLAG_SM) + current->thread.svcr |= SVCR_SM_MASK; + else + set_thread_flag(TIF_SVE); + current->thread.fp_type = FP_STATE_SVE; + +fpsimd_only: + /* copy the FP and status/control registers */ + /* restore_sigframe() already checked that user->fpsimd != NULL. */ + err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, + sizeof(fpsimd.vregs)); + __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); + __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); + + /* load the hardware registers from the fpsimd_state structure */ + if (!err) + fpsimd_update_current_state(&fpsimd); + + return err ? -EFAULT : 0; +} + +#else /* ! CONFIG_ARM64_SVE */ + +static int restore_sve_fpsimd_context(struct user_ctxs *user) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +/* Turn any non-optimised out attempts to use this into a link error: */ +extern int preserve_sve_context(void __user *ctx); + +#endif /* ! CONFIG_ARM64_SVE */ + +#ifdef CONFIG_ARM64_SME + +static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) +{ + int err = 0; + + current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); + + __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); + __put_user_error(sizeof(*ctx), &ctx->head.size, err); + __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); + + return err; +} + +static int restore_tpidr2_context(struct user_ctxs *user) +{ + u64 tpidr2_el0; + int err = 0; + + if (user->tpidr2_size != sizeof(*user->tpidr2)) + return -EINVAL; + + __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); + if (!err) + write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); + + return err; +} + +static int preserve_za_context(struct za_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + unsigned int vl = task_get_sme_vl(current); + unsigned int vq; + + if (thread_za_enabled(¤t->thread)) + vq = sve_vq_from_vl(vl); + else + vq = 0; + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(ZA_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), + &ctx->head.size, err); + __put_user_error(vl, &ctx->vl, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + if (vq) { + /* + * This assumes that the ZA state has already been saved to + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). + */ + err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, + current->thread.sme_state, + ZA_SIG_REGS_SIZE(vq)); + } + + return err ? -EFAULT : 0; +} + +static int restore_za_context(struct user_ctxs *user) +{ + int err = 0; + unsigned int vq; + u16 user_vl; + + if (user->za_size < sizeof(*user->za)) + return -EINVAL; + + __get_user_error(user_vl, &(user->za->vl), err); + if (err) + return err; + + if (user_vl != task_get_sme_vl(current)) + return -EINVAL; + + if (user->za_size == sizeof(*user->za)) { + current->thread.svcr &= ~SVCR_ZA_MASK; + return 0; + } + + vq = sve_vq_from_vl(user_vl); + + if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.sme_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ + + sme_alloc(current, true); + if (!current->thread.sme_state) { + current->thread.svcr &= ~SVCR_ZA_MASK; + clear_thread_flag(TIF_SME); + return -ENOMEM; + } + + err = __copy_from_user(current->thread.sme_state, + (char __user const *)user->za + + ZA_SIG_REGS_OFFSET, + ZA_SIG_REGS_SIZE(vq)); + if (err) + return -EFAULT; + + set_thread_flag(TIF_SME); + current->thread.svcr |= SVCR_ZA_MASK; + + return 0; +} + +static int preserve_zt_context(struct zt_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + + if (WARN_ON(!thread_za_enabled(¤t->thread))) + return -EINVAL; + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(ZT_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), + &ctx->head.size, err); + __put_user_error(1, &ctx->nregs, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + /* + * This assumes that the ZT state has already been saved to + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). + */ + err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, + thread_zt_state(¤t->thread), + ZT_SIG_REGS_SIZE(1)); + + return err ? -EFAULT : 0; +} + +static int restore_zt_context(struct user_ctxs *user) +{ + int err; + u16 nregs; + + /* ZA must be restored first for this check to be valid */ + if (!thread_za_enabled(¤t->thread)) + return -EINVAL; + + if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) + return -EINVAL; + + if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) + return -EFAULT; + + if (nregs != 1) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.zt_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ + + err = __copy_from_user(thread_zt_state(¤t->thread), + (char __user const *)user->zt + + ZT_SIG_REGS_OFFSET, + ZT_SIG_REGS_SIZE(1)); + if (err) + return -EFAULT; + + return 0; +} + +#else /* ! CONFIG_ARM64_SME */ + +/* Turn any non-optimised out attempts to use these into a link error: */ +extern int preserve_tpidr2_context(void __user *ctx); +extern int restore_tpidr2_context(struct user_ctxs *user); +extern int preserve_za_context(void __user *ctx); +extern int restore_za_context(struct user_ctxs *user); +extern int preserve_zt_context(void __user *ctx); +extern int restore_zt_context(struct user_ctxs *user); + +#endif /* ! CONFIG_ARM64_SME */ + +static int parse_user_sigframe(struct user_ctxs *user, + struct rt_sigframe __user *sf) +{ + struct sigcontext __user *const sc = &sf->uc.uc_mcontext; + struct _aarch64_ctx __user *head; + char __user *base = (char __user *)&sc->__reserved; + size_t offset = 0; + size_t limit = sizeof(sc->__reserved); + bool have_extra_context = false; + char const __user *const sfp = (char const __user *)sf; + + user->fpsimd = NULL; + user->sve = NULL; + user->tpidr2 = NULL; + user->za = NULL; + user->zt = NULL; + + if (!IS_ALIGNED((unsigned long)base, 16)) + goto invalid; + + while (1) { + int err = 0; + u32 magic, size; + char const __user *userp; + struct extra_context const __user *extra; + u64 extra_datap; + u32 extra_size; + struct _aarch64_ctx const __user *end; + u32 end_magic, end_size; + + if (limit - offset < sizeof(*head)) + goto invalid; + + if (!IS_ALIGNED(offset, 16)) + goto invalid; + + head = (struct _aarch64_ctx __user *)(base + offset); + __get_user_error(magic, &head->magic, err); + __get_user_error(size, &head->size, err); + if (err) + return err; + + if (limit - offset < size) + goto invalid; + + switch (magic) { + case 0: + if (size) + goto invalid; + + goto done; + + case FPSIMD_MAGIC: + if (!system_supports_fpsimd()) + goto invalid; + if (user->fpsimd) + goto invalid; + + user->fpsimd = (struct fpsimd_context __user *)head; + user->fpsimd_size = size; + break; + + case ESR_MAGIC: + /* ignore */ + break; + + case SVE_MAGIC: + if (!system_supports_sve() && !system_supports_sme()) + goto invalid; + + if (user->sve) + goto invalid; + + user->sve = (struct sve_context __user *)head; + user->sve_size = size; + break; + + case TPIDR2_MAGIC: + if (!system_supports_tpidr2()) + goto invalid; + + if (user->tpidr2) + goto invalid; + + user->tpidr2 = (struct tpidr2_context __user *)head; + user->tpidr2_size = size; + break; + + case ZA_MAGIC: + if (!system_supports_sme()) + goto invalid; + + if (user->za) + goto invalid; + + user->za = (struct za_context __user *)head; + user->za_size = size; + break; + + case ZT_MAGIC: + if (!system_supports_sme2()) + goto invalid; + + if (user->zt) + goto invalid; + + user->zt = (struct zt_context __user *)head; + user->zt_size = size; + break; + + case EXTRA_MAGIC: + if (have_extra_context) + goto invalid; + + if (size < sizeof(*extra)) + goto invalid; + + userp = (char const __user *)head; + + extra = (struct extra_context const __user *)userp; + userp += size; + + __get_user_error(extra_datap, &extra->datap, err); + __get_user_error(extra_size, &extra->size, err); + if (err) + return err; + + /* Check for the dummy terminator in __reserved[]: */ + + if (limit - offset - size < TERMINATOR_SIZE) + goto invalid; + + end = (struct _aarch64_ctx const __user *)userp; + userp += TERMINATOR_SIZE; + + __get_user_error(end_magic, &end->magic, err); + __get_user_error(end_size, &end->size, err); + if (err) + return err; + + if (end_magic || end_size) + goto invalid; + + /* Prevent looping/repeated parsing of extra_context */ + have_extra_context = true; + + base = (__force void __user *)extra_datap; + if (!IS_ALIGNED((unsigned long)base, 16)) + goto invalid; + + if (!IS_ALIGNED(extra_size, 16)) + goto invalid; + + if (base != userp) + goto invalid; + + /* Reject "unreasonably large" frames: */ + if (extra_size > sfp + SIGFRAME_MAXSZ - userp) + goto invalid; + + /* + * Ignore trailing terminator in __reserved[] + * and start parsing extra data: + */ + offset = 0; + limit = extra_size; + + if (!access_ok(base, limit)) + goto invalid; + + continue; + + default: + goto invalid; + } + + if (size < sizeof(*head)) + goto invalid; + + if (limit - offset < size) + goto invalid; + + offset += size; + } + +done: + return 0; + +invalid: + return -EINVAL; +} + +static int restore_sigframe(struct pt_regs *regs, + struct rt_sigframe __user *sf) +{ + sigset_t set; + int i, err; + struct user_ctxs user; + + err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); + if (err == 0) + set_current_blocked(&set); + + for (i = 0; i < 31; i++) + __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], + err); + __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); + __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); + __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); + + /* + * Avoid sys_rt_sigreturn() restarting. + */ + forget_syscall(regs); + + err |= !valid_user_regs(®s->user_regs, current); + if (err == 0) + err = parse_user_sigframe(&user, sf); + + if (err == 0 && system_supports_fpsimd()) { + if (!user.fpsimd) + return -EINVAL; + + if (user.sve) + err = restore_sve_fpsimd_context(&user); + else + err = restore_fpsimd_context(&user); + } + + if (err == 0 && system_supports_tpidr2() && user.tpidr2) + err = restore_tpidr2_context(&user); + + if (err == 0 && system_supports_sme() && user.za) + err = restore_za_context(&user); + + if (err == 0 && system_supports_sme2() && user.zt) + err = restore_zt_context(&user); + + return err; +} + +SYSCALL_DEFINE0(rt_sigreturn) +{ + struct pt_regs *regs = current_pt_regs(); + struct rt_sigframe __user *frame; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* + * Since we stacked the signal on a 128-bit boundary, then 'sp' should + * be word aligned here. + */ + if (regs->sp & 15) + goto badframe; + + frame = (struct rt_sigframe __user *)regs->sp; + + if (!access_ok(frame, sizeof (*frame))) + goto badframe; + + if (restore_sigframe(regs, frame)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return regs->regs[0]; + +badframe: + arm64_notify_segfault(regs->sp); + return 0; +} + +/* + * Determine the layout of optional records in the signal frame + * + * add_all: if true, lays out the biggest possible signal frame for + * this task; otherwise, generates a layout for the current state + * of the task. + */ +static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, + bool add_all) +{ + int err; + + if (system_supports_fpsimd()) { + err = sigframe_alloc(user, &user->fpsimd_offset, + sizeof(struct fpsimd_context)); + if (err) + return err; + } + + /* fault information, if valid */ + if (add_all || current->thread.fault_code) { + err = sigframe_alloc(user, &user->esr_offset, + sizeof(struct esr_context)); + if (err) + return err; + } + + if (system_supports_sve() || system_supports_sme()) { + unsigned int vq = 0; + + if (add_all || test_thread_flag(TIF_SVE) || + thread_sm_enabled(¤t->thread)) { + int vl = max(sve_max_vl(), sme_max_vl()); + + if (!add_all) + vl = thread_get_cur_vl(¤t->thread); + + vq = sve_vq_from_vl(vl); + } + + err = sigframe_alloc(user, &user->sve_offset, + SVE_SIG_CONTEXT_SIZE(vq)); + if (err) + return err; + } + + if (system_supports_tpidr2()) { + err = sigframe_alloc(user, &user->tpidr2_offset, + sizeof(struct tpidr2_context)); + if (err) + return err; + } + + if (system_supports_sme()) { + unsigned int vl; + unsigned int vq = 0; + + if (add_all) + vl = sme_max_vl(); + else + vl = task_get_sme_vl(current); + + if (thread_za_enabled(¤t->thread)) + vq = sve_vq_from_vl(vl); + + err = sigframe_alloc(user, &user->za_offset, + ZA_SIG_CONTEXT_SIZE(vq)); + if (err) + return err; + } + + if (system_supports_sme2()) { + if (add_all || thread_za_enabled(¤t->thread)) { + err = sigframe_alloc(user, &user->zt_offset, + ZT_SIG_CONTEXT_SIZE(1)); + if (err) + return err; + } + } + + return sigframe_alloc_end(user); +} + +static int setup_sigframe(struct rt_sigframe_user_layout *user, + struct pt_regs *regs, sigset_t *set) +{ + int i, err = 0; + struct rt_sigframe __user *sf = user->sigframe; + + /* set up the stack frame for unwinding */ + __put_user_error(regs->regs[29], &user->next_frame->fp, err); + __put_user_error(regs->regs[30], &user->next_frame->lr, err); + + for (i = 0; i < 31; i++) + __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], + err); + __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); + __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); + __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); + + __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); + + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); + + if (err == 0 && system_supports_fpsimd()) { + struct fpsimd_context __user *fpsimd_ctx = + apply_user_offset(user, user->fpsimd_offset); + err |= preserve_fpsimd_context(fpsimd_ctx); + } + + /* fault information, if valid */ + if (err == 0 && user->esr_offset) { + struct esr_context __user *esr_ctx = + apply_user_offset(user, user->esr_offset); + + __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); + __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); + __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); + } + + /* Scalable Vector Extension state (including streaming), if present */ + if ((system_supports_sve() || system_supports_sme()) && + err == 0 && user->sve_offset) { + struct sve_context __user *sve_ctx = + apply_user_offset(user, user->sve_offset); + err |= preserve_sve_context(sve_ctx); + } + + /* TPIDR2 if supported */ + if (system_supports_tpidr2() && err == 0) { + struct tpidr2_context __user *tpidr2_ctx = + apply_user_offset(user, user->tpidr2_offset); + err |= preserve_tpidr2_context(tpidr2_ctx); + } + + /* ZA state if present */ + if (system_supports_sme() && err == 0 && user->za_offset) { + struct za_context __user *za_ctx = + apply_user_offset(user, user->za_offset); + err |= preserve_za_context(za_ctx); + } + + /* ZT state if present */ + if (system_supports_sme2() && err == 0 && user->zt_offset) { + struct zt_context __user *zt_ctx = + apply_user_offset(user, user->zt_offset); + err |= preserve_zt_context(zt_ctx); + } + + if (err == 0 && user->extra_offset) { + char __user *sfp = (char __user *)user->sigframe; + char __user *userp = + apply_user_offset(user, user->extra_offset); + + struct extra_context __user *extra; + struct _aarch64_ctx __user *end; + u64 extra_datap; + u32 extra_size; + + extra = (struct extra_context __user *)userp; + userp += EXTRA_CONTEXT_SIZE; + + end = (struct _aarch64_ctx __user *)userp; + userp += TERMINATOR_SIZE; + + /* + * extra_datap is just written to the signal frame. + * The value gets cast back to a void __user * + * during sigreturn. + */ + extra_datap = (__force u64)userp; + extra_size = sfp + round_up(user->size, 16) - userp; + + __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); + __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); + __put_user_error(extra_datap, &extra->datap, err); + __put_user_error(extra_size, &extra->size, err); + + /* Add the terminator */ + __put_user_error(0, &end->magic, err); + __put_user_error(0, &end->size, err); + } + + /* set the "end" magic */ + if (err == 0) { + struct _aarch64_ctx __user *end = + apply_user_offset(user, user->end_offset); + + __put_user_error(0, &end->magic, err); + __put_user_error(0, &end->size, err); + } + + return err; +} + +static int get_sigframe(struct rt_sigframe_user_layout *user, + struct ksignal *ksig, struct pt_regs *regs) +{ + unsigned long sp, sp_top; + int err; + + init_user_layout(user); + err = setup_sigframe_layout(user, false); + if (err) + return err; + + sp = sp_top = sigsp(regs->sp, ksig); + + sp = round_down(sp - sizeof(struct frame_record), 16); + user->next_frame = (struct frame_record __user *)sp; + + sp = round_down(sp, 16) - sigframe_size(user); + user->sigframe = (struct rt_sigframe __user *)sp; + + /* + * Check that we can actually write to the signal frame. + */ + if (!access_ok(user->sigframe, sp_top - sp)) + return -EFAULT; + + return 0; +} + +static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, + struct rt_sigframe_user_layout *user, int usig) +{ + __sigrestore_t sigtramp; + + regs->regs[0] = usig; + regs->sp = (unsigned long)user->sigframe; + regs->regs[29] = (unsigned long)&user->next_frame->fp; + regs->pc = (unsigned long)ka->sa.sa_handler; + + /* + * Signal delivery is a (wacky) indirect function call in + * userspace, so simulate the same setting of BTYPE as a BLR + * . + * Signal delivery to a location in a PROT_BTI guarded page + * that is not a function entry point will now trigger a + * SIGILL in userspace. + * + * If the signal handler entry point is not in a PROT_BTI + * guarded page, this is harmless. + */ + if (system_supports_bti()) { + regs->pstate &= ~PSR_BTYPE_MASK; + regs->pstate |= PSR_BTYPE_C; + } + + /* TCO (Tag Check Override) always cleared for signal handlers */ + regs->pstate &= ~PSR_TCO_BIT; + + /* Signal handlers are invoked with ZA and streaming mode disabled */ + if (system_supports_sme()) { + /* + * If we were in streaming mode the saved register + * state was SVE but we will exit SM and use the + * FPSIMD register state - flush the saved FPSIMD + * register state in case it gets loaded. + */ + if (current->thread.svcr & SVCR_SM_MASK) { + memset(¤t->thread.uw.fpsimd_state, 0, + sizeof(current->thread.uw.fpsimd_state)); + current->thread.fp_type = FP_STATE_FPSIMD; + } + + current->thread.svcr &= ~(SVCR_ZA_MASK | + SVCR_SM_MASK); + sme_smstop(); + } + + if (ka->sa.sa_flags & SA_RESTORER) + sigtramp = ka->sa.sa_restorer; + else + sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); + + regs->regs[30] = (unsigned long)sigtramp; +} + +static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) +{ + struct rt_sigframe_user_layout user; + struct rt_sigframe __user *frame; + int err = 0; + + fpsimd_signal_preserve_current_state(); + + if (get_sigframe(&user, ksig, regs)) + return 1; + + frame = user.sigframe; + + __put_user_error(0, &frame->uc.uc_flags, err); + __put_user_error(NULL, &frame->uc.uc_link, err); + + err |= __save_altstack(&frame->uc.uc_stack, regs->sp); + err |= setup_sigframe(&user, regs, set); + if (err == 0) { + setup_return(regs, &ksig->ka, &user, usig); + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + regs->regs[1] = (unsigned long)&frame->info; + regs->regs[2] = (unsigned long)&frame->uc; + } + } + + return err; +} + +static void setup_restart_syscall(struct pt_regs *regs) +{ + if (is_compat_task()) + compat_setup_restart_syscall(regs); + else + regs->regs[8] = __NR_restart_syscall; +} + +/* + * OK, we're invoking a handler + */ +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int usig = ksig->sig; + int ret; + + rseq_signal_deliver(ksig, regs); + + /* + * Set up the stack frame + */ + if (is_compat_task()) { + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + ret = compat_setup_rt_frame(usig, ksig, oldset, regs); + else + ret = compat_setup_frame(usig, ksig, oldset, regs); + } else { + ret = setup_rt_frame(usig, ksig, oldset, regs); + } + + /* + * Check that the resulting registers are actually sane. + */ + ret |= !valid_user_regs(®s->user_regs, current); + + /* Step into the signal handler if we are stepping */ + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void do_signal(struct pt_regs *regs) +{ + unsigned long continue_addr = 0, restart_addr = 0; + int retval = 0; + struct ksignal ksig; + bool syscall = in_syscall(regs); + + /* + * If we were from a system call, check for system call restarting... + */ + if (syscall) { + continue_addr = regs->pc; + restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); + retval = regs->regs[0]; + + /* + * Avoid additional syscall restarting via ret_to_user. + */ + forget_syscall(regs); + + /* + * Prepare for system call restart. We do this here so that a + * debugger will see the already changed PC. + */ + switch (retval) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTART_RESTARTBLOCK: + regs->regs[0] = regs->orig_x0; + regs->pc = restart_addr; + break; + } + } + + /* + * Get the signal to deliver. When running under ptrace, at this point + * the debugger may change all of our registers. + */ + if (get_signal(&ksig)) { + /* + * Depending on the signal settings, we may need to revert the + * decision to restart the system call, but skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->pc == restart_addr && + (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK || + (retval == -ERESTARTSYS && + !(ksig.ka.sa.sa_flags & SA_RESTART)))) { + syscall_set_return_value(current, regs, -EINTR, 0); + regs->pc = continue_addr; + } + + handle_signal(&ksig, regs); + return; + } + + /* + * Handle restarting a different system call. As above, if a debugger + * has chosen to restart at a different PC, ignore the restart. + */ + if (syscall && regs->pc == restart_addr) { + if (retval == -ERESTART_RESTARTBLOCK) + setup_restart_syscall(regs); + user_rewind_single_step(current); + } + + restore_saved_sigmask(); +} + +void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) +{ + do { + if (thread_flags & _TIF_NEED_RESCHED) { + /* Unmask Debug and SError for the next task */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + + schedule(); + } else { + local_daif_restore(DAIF_PROCCTX); + + if (thread_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + + if (thread_flags & _TIF_MTE_ASYNC_FAULT) { + clear_thread_flag(TIF_MTE_ASYNC_FAULT); + send_sig_fault(SIGSEGV, SEGV_MTEAERR, + (void __user *)NULL, current); + } + + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + + if (thread_flags & _TIF_FOREIGN_FPSTATE) + fpsimd_restore_current_state(); + } + + local_daif_mask(); + thread_flags = read_thread_flags(); + } while (thread_flags & _TIF_WORK_MASK); +} + +unsigned long __ro_after_init signal_minsigstksz; + +/* + * Determine the stack space required for guaranteed signal devliery. + * This function is used to populate AT_MINSIGSTKSZ at process startup. + * cpufeatures setup is assumed to be complete. + */ +void __init minsigstksz_setup(void) +{ + struct rt_sigframe_user_layout user; + + init_user_layout(&user); + + /* + * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't + * be big enough, but it's our best guess: + */ + if (WARN_ON(setup_sigframe_layout(&user, true))) + return; + + signal_minsigstksz = sigframe_size(&user) + + round_up(sizeof(struct frame_record), 16) + + 16; /* max alignment padding */ +} + +/* + * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as + * changes likely come with new fields that should be added below. + */ +static_assert(NSIGILL == 11); +static_assert(NSIGFPE == 15); +static_assert(NSIGSEGV == 10); +static_assert(NSIGBUS == 5); +static_assert(NSIGTRAP == 6); +static_assert(NSIGCHLD == 6); +static_assert(NSIGSYS == 2); +static_assert(sizeof(siginfo_t) == 128); +static_assert(__alignof__(siginfo_t) == 8); +static_assert(offsetof(siginfo_t, si_signo) == 0x00); +static_assert(offsetof(siginfo_t, si_errno) == 0x04); +static_assert(offsetof(siginfo_t, si_code) == 0x08); +static_assert(offsetof(siginfo_t, si_pid) == 0x10); +static_assert(offsetof(siginfo_t, si_uid) == 0x14); +static_assert(offsetof(siginfo_t, si_tid) == 0x10); +static_assert(offsetof(siginfo_t, si_overrun) == 0x14); +static_assert(offsetof(siginfo_t, si_status) == 0x18); +static_assert(offsetof(siginfo_t, si_utime) == 0x20); +static_assert(offsetof(siginfo_t, si_stime) == 0x28); +static_assert(offsetof(siginfo_t, si_value) == 0x18); +static_assert(offsetof(siginfo_t, si_int) == 0x18); +static_assert(offsetof(siginfo_t, si_ptr) == 0x18); +static_assert(offsetof(siginfo_t, si_addr) == 0x10); +static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); +static_assert(offsetof(siginfo_t, si_lower) == 0x20); +static_assert(offsetof(siginfo_t, si_upper) == 0x28); +static_assert(offsetof(siginfo_t, si_pkey) == 0x20); +static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); +static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); +static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); +static_assert(offsetof(siginfo_t, si_band) == 0x10); +static_assert(offsetof(siginfo_t, si_fd) == 0x18); +static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); +static_assert(offsetof(siginfo_t, si_syscall) == 0x18); +static_assert(offsetof(siginfo_t, si_arch) == 0x1c); diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c new file mode 100644 index 0000000000..bbd5427047 --- /dev/null +++ b/arch/arm64/kernel/signal32.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/signal.c + * + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + * Modified by Will Deacon + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +struct compat_vfp_sigframe { + compat_ulong_t magic; + compat_ulong_t size; + struct compat_user_vfp { + compat_u64 fpregs[32]; + compat_ulong_t fpscr; + } ufp; + struct compat_user_vfp_exc { + compat_ulong_t fpexc; + compat_ulong_t fpinst; + compat_ulong_t fpinst2; + } ufp_exc; +} __attribute__((__aligned__(8))); + +#define VFP_MAGIC 0x56465001 +#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe) + +#define FSR_WRITE_SHIFT (11) + +struct compat_aux_sigframe { + struct compat_vfp_sigframe vfp; + + /* Something that isn't a valid magic number for any coprocessor. */ + unsigned long end_magic; +} __attribute__((__aligned__(8))); + +static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) +{ + compat_sigset_t cset; + + cset.sig[0] = set->sig[0] & 0xffffffffull; + cset.sig[1] = set->sig[0] >> 32; + + return copy_to_user(uset, &cset, sizeof(*uset)); +} + +static inline int get_sigset_t(sigset_t *set, + const compat_sigset_t __user *uset) +{ + compat_sigset_t s32; + + if (copy_from_user(&s32, uset, sizeof(*uset))) + return -EFAULT; + + set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); + return 0; +} + +/* + * VFP save/restore code. + * + * We have to be careful with endianness, since the fpsimd context-switch + * code operates on 128-bit (Q) register values whereas the compat ABI + * uses an array of 64-bit (D) registers. Consequently, we need to swap + * the two halves of each Q register when running on a big-endian CPU. + */ +union __fpsimd_vreg { + __uint128_t raw; + struct { +#ifdef __AARCH64EB__ + u64 hi; + u64 lo; +#else + u64 lo; + u64 hi; +#endif + }; +}; + +static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) +{ + struct user_fpsimd_state const *fpsimd = + ¤t->thread.uw.fpsimd_state; + compat_ulong_t magic = VFP_MAGIC; + compat_ulong_t size = VFP_STORAGE_SIZE; + compat_ulong_t fpscr, fpexc; + int i, err = 0; + + /* + * Save the hardware registers to the fpsimd_state structure. + * Note that this also saves V16-31, which aren't visible + * in AArch32. + */ + fpsimd_signal_preserve_current_state(); + + /* Place structure header on the stack */ + __put_user_error(magic, &frame->magic, err); + __put_user_error(size, &frame->size, err); + + /* + * Now copy the FP registers. Since the registers are packed, + * we can copy the prefix we want (V0-V15) as it is. + */ + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { + union __fpsimd_vreg vreg = { + .raw = fpsimd->vregs[i >> 1], + }; + + __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err); + __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); + } + + /* Create an AArch32 fpscr from the fpsr and the fpcr. */ + fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | + (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK); + __put_user_error(fpscr, &frame->ufp.fpscr, err); + + /* + * The exception register aren't available so we fake up a + * basic FPEXC and zero everything else. + */ + fpexc = (1 << 30); + __put_user_error(fpexc, &frame->ufp_exc.fpexc, err); + __put_user_error(0, &frame->ufp_exc.fpinst, err); + __put_user_error(0, &frame->ufp_exc.fpinst2, err); + + return err ? -EFAULT : 0; +} + +static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) +{ + struct user_fpsimd_state fpsimd; + compat_ulong_t magic = VFP_MAGIC; + compat_ulong_t size = VFP_STORAGE_SIZE; + compat_ulong_t fpscr; + int i, err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); + + if (err) + return -EFAULT; + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + + /* Copy the FP registers into the start of the fpsimd_state. */ + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { + union __fpsimd_vreg vreg; + + __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err); + __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); + fpsimd.vregs[i >> 1] = vreg.raw; + } + + /* Extract the fpsr and the fpcr from the fpscr */ + __get_user_error(fpscr, &frame->ufp.fpscr, err); + fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK; + fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK; + + /* + * We don't need to touch the exception register, so + * reload the hardware state. + */ + if (!err) + fpsimd_update_current_state(&fpsimd); + + return err ? -EFAULT : 0; +} + +static int compat_restore_sigframe(struct pt_regs *regs, + struct compat_sigframe __user *sf) +{ + int err; + sigset_t set; + struct compat_aux_sigframe __user *aux; + unsigned long psr; + + err = get_sigset_t(&set, &sf->uc.uc_sigmask); + if (err == 0) + set_current_blocked(&set); + + __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); + __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); + __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); + __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); + __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); + __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); + __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); + __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); + __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); + __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); + __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); + __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); + __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); + __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); + __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); + __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); + __get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); + + regs->pstate = compat_psr_to_pstate(psr); + + /* + * Avoid compat_sys_sigreturn() restarting. + */ + forget_syscall(regs); + + err |= !valid_user_regs(®s->user_regs, current); + + aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; + if (err == 0 && system_supports_fpsimd()) + err |= compat_restore_vfp_context(&aux->vfp); + + return err; +} + +COMPAT_SYSCALL_DEFINE0(sigreturn) +{ + struct pt_regs *regs = current_pt_regs(); + struct compat_sigframe __user *frame; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (regs->compat_sp & 7) + goto badframe; + + frame = (struct compat_sigframe __user *)regs->compat_sp; + + if (!access_ok(frame, sizeof (*frame))) + goto badframe; + + if (compat_restore_sigframe(regs, frame)) + goto badframe; + + return regs->regs[0]; + +badframe: + arm64_notify_segfault(regs->compat_sp); + return 0; +} + +COMPAT_SYSCALL_DEFINE0(rt_sigreturn) +{ + struct pt_regs *regs = current_pt_regs(); + struct compat_rt_sigframe __user *frame; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (regs->compat_sp & 7) + goto badframe; + + frame = (struct compat_rt_sigframe __user *)regs->compat_sp; + + if (!access_ok(frame, sizeof (*frame))) + goto badframe; + + if (compat_restore_sigframe(regs, &frame->sig)) + goto badframe; + + if (compat_restore_altstack(&frame->sig.uc.uc_stack)) + goto badframe; + + return regs->regs[0]; + +badframe: + arm64_notify_segfault(regs->compat_sp); + return 0; +} + +static void __user *compat_get_sigframe(struct ksignal *ksig, + struct pt_regs *regs, + int framesize) +{ + compat_ulong_t sp = sigsp(regs->compat_sp, ksig); + void __user *frame; + + /* + * ATPCS B01 mandates 8-byte alignment + */ + frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7)); + + /* + * Check that we can actually write to the signal frame. + */ + if (!access_ok(frame, framesize)) + frame = NULL; + + return frame; +} + +static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, + compat_ulong_t __user *rc, void __user *frame, + int usig) +{ + compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); + compat_ulong_t retcode; + compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT); + int thumb; + + /* Check if the handler is written for ARM or Thumb */ + thumb = handler & 1; + + if (thumb) + spsr |= PSR_AA32_T_BIT; + else + spsr &= ~PSR_AA32_T_BIT; + + /* The IT state must be cleared for both ARM and Thumb-2 */ + spsr &= ~PSR_AA32_IT_MASK; + + /* Restore the original endianness */ + spsr |= PSR_AA32_ENDSTATE; + + if (ka->sa.sa_flags & SA_RESTORER) { + retcode = ptr_to_compat(ka->sa.sa_restorer); + } else { + /* Set up sigreturn pointer */ + unsigned int idx = thumb << 1; + + if (ka->sa.sa_flags & SA_SIGINFO) + idx += 3; + + retcode = (unsigned long)current->mm->context.sigpage + + (idx << 2) + thumb; + } + + regs->regs[0] = usig; + regs->compat_sp = ptr_to_compat(frame); + regs->compat_lr = retcode; + regs->pc = handler; + regs->pstate = spsr; +} + +static int compat_setup_sigframe(struct compat_sigframe __user *sf, + struct pt_regs *regs, sigset_t *set) +{ + struct compat_aux_sigframe __user *aux; + unsigned long psr = pstate_to_compat_psr(regs->pstate); + int err = 0; + + __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); + __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); + __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); + __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); + __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); + __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); + __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); + __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); + __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); + __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); + __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); + __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); + __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); + __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); + __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); + __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); + __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); + + __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); + /* set the compat FSR WnR */ + __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) << + FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err); + __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); + __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); + + err |= put_sigset_t(&sf->uc.uc_sigmask, set); + + aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; + + if (err == 0 && system_supports_fpsimd()) + err |= compat_preserve_vfp_context(&aux->vfp); + __put_user_error(0, &aux->end_magic, err); + + return err; +} + +/* + * 32-bit signal handling routines called from signal.c + */ +int compat_setup_rt_frame(int usig, struct ksignal *ksig, + sigset_t *set, struct pt_regs *regs) +{ + struct compat_rt_sigframe __user *frame; + int err = 0; + + frame = compat_get_sigframe(ksig, regs, sizeof(*frame)); + + if (!frame) + return 1; + + err |= copy_siginfo_to_user32(&frame->info, &ksig->info); + + __put_user_error(0, &frame->sig.uc.uc_flags, err); + __put_user_error(0, &frame->sig.uc.uc_link, err); + + err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp); + + err |= compat_setup_sigframe(&frame->sig, regs, set); + + if (err == 0) { + compat_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); + regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; + regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; + } + + return err; +} + +int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) +{ + struct compat_sigframe __user *frame; + int err = 0; + + frame = compat_get_sigframe(ksig, regs, sizeof(*frame)); + + if (!frame) + return 1; + + __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); + + err |= compat_setup_sigframe(frame, regs, set); + if (err == 0) + compat_setup_return(regs, &ksig->ka, frame->retcode, frame, usig); + + return err; +} + +void compat_setup_restart_syscall(struct pt_regs *regs) +{ + regs->regs[7] = __NR_compat_restart_syscall; +} + +/* + * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as + * changes likely come with new fields that should be added below. + */ +static_assert(NSIGILL == 11); +static_assert(NSIGFPE == 15); +static_assert(NSIGSEGV == 10); +static_assert(NSIGBUS == 5); +static_assert(NSIGTRAP == 6); +static_assert(NSIGCHLD == 6); +static_assert(NSIGSYS == 2); +static_assert(sizeof(compat_siginfo_t) == 128); +static_assert(__alignof__(compat_siginfo_t) == 4); +static_assert(offsetof(compat_siginfo_t, si_signo) == 0x00); +static_assert(offsetof(compat_siginfo_t, si_errno) == 0x04); +static_assert(offsetof(compat_siginfo_t, si_code) == 0x08); +static_assert(offsetof(compat_siginfo_t, si_pid) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_uid) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_tid) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_overrun) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_status) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_utime) == 0x18); +static_assert(offsetof(compat_siginfo_t, si_stime) == 0x1c); +static_assert(offsetof(compat_siginfo_t, si_value) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_int) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_ptr) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_addr) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_addr_lsb) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_lower) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_upper) == 0x18); +static_assert(offsetof(compat_siginfo_t, si_pkey) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_perf_data) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_perf_type) == 0x14); +static_assert(offsetof(compat_siginfo_t, si_perf_flags) == 0x18); +static_assert(offsetof(compat_siginfo_t, si_band) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_fd) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_call_addr) == 0x0c); +static_assert(offsetof(compat_siginfo_t, si_syscall) == 0x10); +static_assert(offsetof(compat_siginfo_t, si_arch) == 0x14); diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S new file mode 100644 index 0000000000..ccbd4aab4b --- /dev/null +++ b/arch/arm64/kernel/sigreturn32.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AArch32 sigreturn code. + * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. + * + * Copyright (C) 2005-2011 Nicolas Pitre + * Copyright (C) 2012-2018 ARM Ltd. + * + * For ARM syscalls, the syscall number has to be loaded into r7. + * We do not support an OABI userspace. + * + * For Thumb syscalls, we also pass the syscall number via r7. We therefore + * need two 16-bit instructions. + */ + +#include + + .section .rodata + .globl __aarch32_sigret_code_start +__aarch32_sigret_code_start: + + /* + * ARM Code + */ + .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn + .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn + + /* + * Thumb code + */ + .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn + .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn + + /* + * ARM code + */ + .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn + .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn + + /* + * Thumb code + */ + .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn + .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn + + .globl __aarch32_sigret_code_end +__aarch32_sigret_code_end: diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S new file mode 100644 index 0000000000..2aa5129d82 --- /dev/null +++ b/arch/arm64/kernel/sleep.S @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include + + .text +/* + * Implementation of MPIDR_EL1 hash algorithm through shifting + * and OR'ing. + * + * @dst: register containing hash result + * @rs0: register containing affinity level 0 bit shift + * @rs1: register containing affinity level 1 bit shift + * @rs2: register containing affinity level 2 bit shift + * @rs3: register containing affinity level 3 bit shift + * @mpidr: register containing MPIDR_EL1 value + * @mask: register containing MPIDR mask + * + * Pseudo C-code: + * + *u32 dst; + * + *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) { + * u32 aff0, aff1, aff2, aff3; + * u64 mpidr_masked = mpidr & mask; + * aff0 = mpidr_masked & 0xff; + * aff1 = mpidr_masked & 0xff00; + * aff2 = mpidr_masked & 0xff0000; + * aff3 = mpidr_masked & 0xff00000000; + * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3); + *} + * Input registers: rs0, rs1, rs2, rs3, mpidr, mask + * Output register: dst + * Note: input and output registers must be disjoint register sets + (eg: a macro instance with mpidr = x1 and dst = x1 is invalid) + */ + .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask + and \mpidr, \mpidr, \mask // mask out MPIDR bits + and \dst, \mpidr, #0xff // mask=aff0 + lsr \dst ,\dst, \rs0 // dst=aff0>>rs0 + and \mask, \mpidr, #0xff00 // mask = aff1 + lsr \mask ,\mask, \rs1 + orr \dst, \dst, \mask // dst|=(aff1>>rs1) + and \mask, \mpidr, #0xff0000 // mask = aff2 + lsr \mask ,\mask, \rs2 + orr \dst, \dst, \mask // dst|=(aff2>>rs2) + and \mask, \mpidr, #0xff00000000 // mask = aff3 + lsr \mask ,\mask, \rs3 + orr \dst, \dst, \mask // dst|=(aff3>>rs3) + .endm +/* + * Save CPU state in the provided sleep_stack_data area, and publish its + * location for cpu_resume()'s use in sleep_save_stash. + * + * cpu_resume() will restore this saved state, and return. Because the + * link-register is saved and restored, it will appear to return from this + * function. So that the caller can tell the suspend/resume paths apart, + * __cpu_suspend_enter() will always return a non-zero value, whereas the + * path through cpu_resume() will return 0. + * + * x0 = struct sleep_stack_data area + */ +SYM_FUNC_START(__cpu_suspend_enter) + stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS] + stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16] + stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32] + stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48] + stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64] + stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80] + + /* save the sp in cpu_suspend_ctx */ + mov x2, sp + str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP] + + /* find the mpidr_hash */ + ldr_l x1, sleep_save_stash + mrs x7, mpidr_el1 + adr_l x9, mpidr_hash + ldr x10, [x9, #MPIDR_HASH_MASK] + /* + * Following code relies on the struct mpidr_hash + * members size. + */ + ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] + ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] + compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 + add x1, x1, x8, lsl #3 + + str x0, [x1] + add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS + stp x29, lr, [sp, #-16]! + bl cpu_do_suspend + ldp x29, lr, [sp], #16 + mov x0, #1 + ret +SYM_FUNC_END(__cpu_suspend_enter) + + .pushsection ".idmap.text", "a" +SYM_CODE_START(cpu_resume) + mov x0, xzr + bl init_kernel_el + mov x19, x0 // preserve boot mode +#if VA_BITS > 48 + ldr_l x0, vabits_actual +#endif + bl __cpu_setup + /* enable the MMU early - so we can access sleep_save_stash by va */ + adrp x1, swapper_pg_dir + adrp x2, idmap_pg_dir + bl __enable_mmu + ldr x8, =_cpu_resume + br x8 +SYM_CODE_END(cpu_resume) + .ltorg + .popsection + +SYM_FUNC_START(_cpu_resume) + mov x0, x19 + bl finalise_el2 + + mrs x1, mpidr_el1 + adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address + + /* retrieve mpidr_hash members to compute the hash */ + ldr x2, [x8, #MPIDR_HASH_MASK] + ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] + ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] + compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 + + /* x7 contains hash index, let's use it to grab context pointer */ + ldr_l x0, sleep_save_stash + ldr x0, [x0, x7, lsl #3] + add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS + add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS + /* load sp from context */ + ldr x2, [x0, #CPU_CTX_SP] + mov sp, x2 + /* + * cpu_do_resume expects x0 to contain context address pointer + */ + bl cpu_do_resume + +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) + mov x0, sp + bl kasan_unpoison_task_stack_below +#endif + + ldp x19, x20, [x29, #16] + ldp x21, x22, [x29, #32] + ldp x23, x24, [x29, #48] + ldp x25, x26, [x29, #64] + ldp x27, x28, [x29, #80] + ldp x29, lr, [x29] + mov x0, #0 + ret +SYM_FUNC_END(_cpu_resume) diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S new file mode 100644 index 0000000000..487381164f --- /dev/null +++ b/arch/arm64/kernel/smccc-call.S @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, Linaro Limited + */ +#include +#include + +#include +#include +#include + +/* + * If we have SMCCC v1.3 and (as is likely) no SVE state in + * the registers then set the SMCCC hint bit to say there's no + * need to preserve it. Do this by directly adjusting the SMCCC + * function value which is already stored in x0 ready to be called. + */ +SYM_FUNC_START(__arm_smccc_sve_check) + + ldr_l x16, smccc_has_sve_hint + cbz x16, 2f + + get_current_task x16 + ldr x16, [x16, #TSK_TI_FLAGS] + tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state? + tbnz x16, #TIF_SVE, 2f // Does that state include SVE? + +1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT + +2: ret +SYM_FUNC_END(__arm_smccc_sve_check) +EXPORT_SYMBOL(__arm_smccc_sve_check) + + .macro SMCCC instr + stp x29, x30, [sp, #-16]! + mov x29, sp +alternative_if ARM64_SVE + bl __arm_smccc_sve_check +alternative_else_nop_endif + \instr #0 + ldr x4, [sp, #16] + stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] + stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] + ldr x4, [sp, #24] + cbz x4, 1f /* no quirk structure */ + ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] + cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 + b.ne 1f + str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] +1: ldp x29, x30, [sp], #16 + ret + .endm + +/* + * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, + * struct arm_smccc_quirk *quirk) + */ +SYM_FUNC_START(__arm_smccc_smc) + SMCCC smc +SYM_FUNC_END(__arm_smccc_smc) +EXPORT_SYMBOL(__arm_smccc_smc) + +/* + * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, + * struct arm_smccc_quirk *quirk) + */ +SYM_FUNC_START(__arm_smccc_hvc) + SMCCC hvc +SYM_FUNC_END(__arm_smccc_hvc) +EXPORT_SYMBOL(__arm_smccc_hvc) + + .macro SMCCC_1_2 instr + /* Save `res` and free a GPR that won't be clobbered */ + stp x1, x19, [sp, #-16]! + + /* Ensure `args` won't be clobbered while loading regs in next step */ + mov x19, x0 + + /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */ + ldp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] + ldp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] + ldp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] + ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] + ldp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] + ldp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] + ldp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] + ldp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] + ldp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] + + \instr #0 + + /* Load the `res` from the stack */ + ldr x19, [sp] + + /* Store the registers x0 - x17 into the result structure */ + stp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] + stp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] + stp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] + stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] + stp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] + stp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] + stp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] + stp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] + stp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] + + /* Restore original x19 */ + ldp xzr, x19, [sp], #16 + ret +.endm + +/* + * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, + * struct arm_smccc_1_2_regs *res); + */ +SYM_FUNC_START(arm_smccc_1_2_hvc) + SMCCC_1_2 hvc +SYM_FUNC_END(arm_smccc_1_2_hvc) +EXPORT_SYMBOL(arm_smccc_1_2_hvc) + +/* + * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, + * struct arm_smccc_1_2_regs *res); + */ +SYM_FUNC_START(arm_smccc_1_2_smc) + SMCCC_1_2 smc +SYM_FUNC_END(arm_smccc_1_2_smc) +EXPORT_SYMBOL(arm_smccc_1_2_smc) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c new file mode 100644 index 0000000000..960b98b435 --- /dev/null +++ b/arch/arm64/kernel/smp.c @@ -0,0 +1,1096 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SMP initialisation and IPI support + * Based on arch/arm/kernel/smp.c + * + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); +EXPORT_PER_CPU_SYMBOL(cpu_number); + +/* + * as from 2.5, kernels no longer have an init_tasks structure + * so we need some other way of telling a new secondary core + * where to place its SVC stack + */ +struct secondary_data secondary_data; +/* Number of CPUs which aren't online, but looping in kernel text. */ +static int cpus_stuck_in_kernel; + +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CPU_STOP, + IPI_CPU_CRASH_STOP, + IPI_TIMER, + IPI_IRQ_WORK, + IPI_WAKEUP, + NR_IPI +}; + +static int ipi_irq_base __read_mostly; +static int nr_ipi __read_mostly = NR_IPI; +static struct irq_desc *ipi_desc[NR_IPI] __read_mostly; + +static void ipi_setup(int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +static void ipi_teardown(int cpu); +static int op_cpu_kill(unsigned int cpu); +#else +static inline int op_cpu_kill(unsigned int cpu) +{ + return -ENOSYS; +} +#endif + + +/* + * Boot a secondary CPU, and assign it the specified idle task. + * This also gives us the initial stack to use for this CPU. + */ +static int boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops->cpu_boot) + return ops->cpu_boot(cpu); + + return -EOPNOTSUPP; +} + +static DECLARE_COMPLETION(cpu_running); + +int __cpu_up(unsigned int cpu, struct task_struct *idle) +{ + int ret; + long status; + + /* + * We need to tell the secondary core where to find its stack and the + * page tables. + */ + secondary_data.task = idle; + update_cpu_boot_status(CPU_MMU_OFF); + + /* Now bring the CPU into our world */ + ret = boot_secondary(cpu, idle); + if (ret) { + pr_err("CPU%u: failed to boot: %d\n", cpu, ret); + return ret; + } + + /* + * CPU was successfully started, wait for it to come online or + * time out. + */ + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(5000)); + if (cpu_online(cpu)) + return 0; + + pr_crit("CPU%u: failed to come online\n", cpu); + secondary_data.task = NULL; + status = READ_ONCE(secondary_data.status); + if (status == CPU_MMU_OFF) + status = READ_ONCE(__early_cpu_boot_status); + + switch (status & CPU_BOOT_STATUS_MASK) { + default: + pr_err("CPU%u: failed in unknown state : 0x%lx\n", + cpu, status); + cpus_stuck_in_kernel++; + break; + case CPU_KILL_ME: + if (!op_cpu_kill(cpu)) { + pr_crit("CPU%u: died during early boot\n", cpu); + break; + } + pr_crit("CPU%u: may not have shut down cleanly\n", cpu); + fallthrough; + case CPU_STUCK_IN_KERNEL: + pr_crit("CPU%u: is stuck in kernel\n", cpu); + if (status & CPU_STUCK_REASON_52_BIT_VA) + pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); + if (status & CPU_STUCK_REASON_NO_GRAN) { + pr_crit("CPU%u: does not support %luK granule\n", + cpu, PAGE_SIZE / SZ_1K); + } + cpus_stuck_in_kernel++; + break; + case CPU_PANIC_KERNEL: + panic("CPU%u detected unsupported configuration\n", cpu); + } + + return -EIO; +} + +static void init_gic_priority_masking(void) +{ + u32 cpuflags; + + if (WARN_ON(!gic_enable_sre())) + return; + + cpuflags = read_sysreg(daif); + + WARN_ON(!(cpuflags & PSR_I_BIT)); + WARN_ON(!(cpuflags & PSR_F_BIT)); + + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); +} + +/* + * This is the secondary CPU boot entry. We're using this CPUs + * idle thread stack, but a set of temporary page tables. + */ +asmlinkage notrace void secondary_start_kernel(void) +{ + u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; + struct mm_struct *mm = &init_mm; + const struct cpu_operations *ops; + unsigned int cpu = smp_processor_id(); + + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + mmgrab(mm); + current->active_mm = mm; + + /* + * TTBR0 is only used for the identity mapping at this stage. Make it + * point to zero page to avoid speculatively fetching new entries. + */ + cpu_uninstall_idmap(); + + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); + + rcu_cpu_starting(cpu); + trace_hardirqs_off(); + + /* + * If the system has established the capabilities, make sure + * this CPU ticks all of those. If it doesn't, the CPU will + * fail to come online. + */ + check_local_cpu_capabilities(); + + ops = get_cpu_ops(cpu); + if (ops->cpu_postboot) + ops->cpu_postboot(); + + /* + * Log the CPU info before it is marked online and might get read. + */ + cpuinfo_store_cpu(); + store_cpu_topology(cpu); + + /* + * Enable GIC and timers. + */ + notify_cpu_starting(cpu); + + ipi_setup(cpu); + + numa_add_cpu(cpu); + + /* + * OK, now it's safe to let the boot CPU continue. Wait for + * the CPU migration code to notice that the CPU is online + * before we continue. + */ + pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", + cpu, (unsigned long)mpidr, + read_cpuid_id()); + update_cpu_boot_status(CPU_BOOT_SUCCESS); + set_cpu_online(cpu, true); + complete(&cpu_running); + + local_daif_restore(DAIF_PROCCTX); + + /* + * OK, it's off to the idle thread for us + */ + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +#ifdef CONFIG_HOTPLUG_CPU +static int op_cpu_disable(unsigned int cpu) +{ + const struct cpu_operations *ops = get_cpu_ops(cpu); + + /* + * If we don't have a cpu_die method, abort before we reach the point + * of no return. CPU0 may not have an cpu_ops, so test for it. + */ + if (!ops || !ops->cpu_die) + return -EOPNOTSUPP; + + /* + * We may need to abort a hot unplug for some other mechanism-specific + * reason. + */ + if (ops->cpu_disable) + return ops->cpu_disable(cpu); + + return 0; +} + +/* + * __cpu_disable runs on the processor to be shutdown. + */ +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + int ret; + + ret = op_cpu_disable(cpu); + if (ret) + return ret; + + remove_cpu_topology(cpu); + numa_remove_cpu(cpu); + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + ipi_teardown(cpu); + + /* + * OK - migrate IRQs away from this CPU + */ + irq_migrate_all_off_this_cpu(); + + return 0; +} + +static int op_cpu_kill(unsigned int cpu) +{ + const struct cpu_operations *ops = get_cpu_ops(cpu); + + /* + * If we have no means of synchronising with the dying CPU, then assume + * that it is really dead. We can only wait for an arbitrary length of + * time and hope that it's dead, so let's skip the wait and just hope. + */ + if (!ops->cpu_kill) + return 0; + + return ops->cpu_kill(cpu); +} + +/* + * Called on the thread which is asking for a CPU to be shutdown after the + * shutdown completed. + */ +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) +{ + int err; + + pr_debug("CPU%u: shutdown\n", cpu); + + /* + * Now that the dying CPU is beyond the point of no return w.r.t. + * in-kernel synchronisation, try to get the firwmare to help us to + * verify that it has really left the kernel before we consider + * clobbering anything it might still be using. + */ + err = op_cpu_kill(cpu); + if (err) + pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); +} + +/* + * Called from the idle thread for the CPU which has been shutdown. + * + */ +void __noreturn cpu_die(void) +{ + unsigned int cpu = smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(cpu); + + idle_task_exit(); + + local_daif_mask(); + + /* Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose of */ + cpuhp_ap_report_dead(); + + /* + * Actually shutdown the CPU. This must never fail. The specific hotplug + * mechanism must perform all required cache maintenance to ensure that + * no dirty lines are lost in the process of shutting down the CPU. + */ + ops->cpu_die(cpu); + + BUG(); +} +#endif + +static void __cpu_try_die(int cpu) +{ +#ifdef CONFIG_HOTPLUG_CPU + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops && ops->cpu_die) + ops->cpu_die(cpu); +#endif +} + +/* + * Kill the calling secondary CPU, early in bringup before it is turned + * online. + */ +void __noreturn cpu_die_early(void) +{ + int cpu = smp_processor_id(); + + pr_crit("CPU%d: will not boot\n", cpu); + + /* Mark this CPU absent */ + set_cpu_present(cpu, 0); + rcu_report_dead(cpu); + + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { + update_cpu_boot_status(CPU_KILL_ME); + __cpu_try_die(cpu); + } + + update_cpu_boot_status(CPU_STUCK_IN_KERNEL); + + cpu_park_loop(); +} + +static void __init hyp_mode_check(void) +{ + if (is_hyp_mode_available()) + pr_info("CPU: All CPU(s) started at EL2\n"); + else if (is_hyp_mode_mismatched()) + WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, + "CPU: CPUs started in inconsistent modes"); + else + pr_info("CPU: All CPU(s) started at EL1\n"); + if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) { + kvm_compute_layout(); + kvm_apply_hyp_relocations(); + } +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); + setup_cpu_features(); + hyp_mode_check(); + apply_alternatives_all(); + mark_linear_text_alias_ro(); +} + +void __init smp_prepare_boot_cpu(void) +{ + /* + * The runtime per-cpu areas have been allocated by + * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be + * freed shortly, so we must move over to the runtime per-cpu area. + */ + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + cpuinfo_store_boot_cpu(); + + /* + * We now know enough about the boot CPU to apply the + * alternatives that cannot wait until interrupt handling + * and/or scheduling is enabled. + */ + apply_boot_alternatives(); + + /* Conditionally switch to GIC PMR for interrupt masking */ + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); + + kasan_init_hw_tags(); +} + +/* + * Duplicate MPIDRs are a recipe for disaster. Scan all initialized + * entries and check for duplicates. If any is found just ignore the + * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid + * matching valid MPIDR values. + */ +static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) +{ + unsigned int i; + + for (i = 1; (i < cpu) && (i < NR_CPUS); i++) + if (cpu_logical_map(i) == hwid) + return true; + return false; +} + +/* + * Initialize cpu operations for a logical cpu and + * set it in the possible mask on success + */ +static int __init smp_cpu_setup(int cpu) +{ + const struct cpu_operations *ops; + + if (init_cpu_ops(cpu)) + return -ENODEV; + + ops = get_cpu_ops(cpu); + if (ops->cpu_init(cpu)) + return -ENODEV; + + set_cpu_possible(cpu, true); + + return 0; +} + +static bool bootcpu_valid __initdata; +static unsigned int cpu_count = 1; + +#ifdef CONFIG_ACPI +static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; + +struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) +{ + return &cpu_madt_gicc[cpu]; +} +EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc); + +/* + * acpi_map_gic_cpu_interface - parse processor MADT entry + * + * Carry out sanity checks on MADT processor entry and initialize + * cpu_logical_map on success + */ +static void __init +acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) +{ + u64 hwid = processor->arm_mpidr; + + if (!(processor->flags & ACPI_MADT_ENABLED)) { + pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); + return; + } + + if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { + pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); + return; + } + + if (is_mpidr_duplicate(cpu_count, hwid)) { + pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); + return; + } + + /* Check if GICC structure of boot CPU is available in the MADT */ + if (cpu_logical_map(0) == hwid) { + if (bootcpu_valid) { + pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", + hwid); + return; + } + bootcpu_valid = true; + cpu_madt_gicc[0] = *processor; + return; + } + + if (cpu_count >= NR_CPUS) + return; + + /* map the logical cpu id to cpu MPIDR */ + set_cpu_logical_map(cpu_count, hwid); + + cpu_madt_gicc[cpu_count] = *processor; + + /* + * Set-up the ACPI parking protocol cpu entries + * while initializing the cpu_logical_map to + * avoid parsing MADT entries multiple times for + * nothing (ie a valid cpu_logical_map entry should + * contain a valid parking protocol data set to + * initialize the cpu if the parking protocol is + * the only available enable method). + */ + acpi_set_mailbox_entry(cpu_count, processor); + + cpu_count++; +} + +static int __init +acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *processor; + + processor = (struct acpi_madt_generic_interrupt *)header; + if (BAD_MADT_GICC_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + + acpi_map_gic_cpu_interface(processor); + + return 0; +} + +static void __init acpi_parse_and_init_cpus(void) +{ + int i; + + /* + * do a walk of MADT to determine how many CPUs + * we have including disabled CPUs, and get information + * we need for SMP init. + */ + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + acpi_parse_gic_cpu_interface, 0); + + /* + * In ACPI, SMP and CPU NUMA information is provided in separate + * static tables, namely the MADT and the SRAT. + * + * Thus, it is simpler to first create the cpu logical map through + * an MADT walk and then map the logical cpus to their node ids + * as separate steps. + */ + acpi_map_cpus_to_nodes(); + + for (i = 0; i < nr_cpu_ids; i++) + early_map_cpu_to_node(i, acpi_numa_get_nid(i)); +} +#else +#define acpi_parse_and_init_cpus(...) do { } while (0) +#endif + +/* + * Enumerate the possible CPU set from the device tree and build the + * cpu logical map array containing MPIDR values related to logical + * cpus. Assumes that cpu_logical_map(0) has already been initialized. + */ +static void __init of_parse_and_init_cpus(void) +{ + struct device_node *dn; + + for_each_of_cpu_node(dn) { + u64 hwid = of_get_cpu_hwid(dn, 0); + + if (hwid & ~MPIDR_HWID_BITMASK) + goto next; + + if (is_mpidr_duplicate(cpu_count, hwid)) { + pr_err("%pOF: duplicate cpu reg properties in the DT\n", + dn); + goto next; + } + + /* + * The numbering scheme requires that the boot CPU + * must be assigned logical id 0. Record it so that + * the logical map built from DT is validated and can + * be used. + */ + if (hwid == cpu_logical_map(0)) { + if (bootcpu_valid) { + pr_err("%pOF: duplicate boot cpu reg property in DT\n", + dn); + goto next; + } + + bootcpu_valid = true; + early_map_cpu_to_node(0, of_node_to_nid(dn)); + + /* + * cpu_logical_map has already been + * initialized and the boot cpu doesn't need + * the enable-method so continue without + * incrementing cpu. + */ + continue; + } + + if (cpu_count >= NR_CPUS) + goto next; + + pr_debug("cpu logical map 0x%llx\n", hwid); + set_cpu_logical_map(cpu_count, hwid); + + early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); +next: + cpu_count++; + } +} + +/* + * Enumerate the possible CPU set from the device tree or ACPI and build the + * cpu logical map array containing MPIDR values related to logical + * cpus. Assumes that cpu_logical_map(0) has already been initialized. + */ +void __init smp_init_cpus(void) +{ + int i; + + if (acpi_disabled) + of_parse_and_init_cpus(); + else + acpi_parse_and_init_cpus(); + + if (cpu_count > nr_cpu_ids) + pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n", + cpu_count, nr_cpu_ids); + + if (!bootcpu_valid) { + pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); + return; + } + + /* + * We need to set the cpu_logical_map entries before enabling + * the cpus so that cpu processor description entries (DT cpu nodes + * and ACPI MADT entries) can be retrieved by matching the cpu hwid + * with entries in cpu_logical_map while initializing the cpus. + * If the cpu set-up fails, invalidate the cpu_logical_map entry. + */ + for (i = 1; i < nr_cpu_ids; i++) { + if (cpu_logical_map(i) != INVALID_HWID) { + if (smp_cpu_setup(i)) + set_cpu_logical_map(i, INVALID_HWID); + } + } +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + const struct cpu_operations *ops; + int err; + unsigned int cpu; + unsigned int this_cpu; + + init_cpu_topology(); + + this_cpu = smp_processor_id(); + store_cpu_topology(this_cpu); + numa_store_cpu_info(this_cpu); + numa_add_cpu(this_cpu); + + /* + * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set + * secondary CPUs present. + */ + if (max_cpus == 0) + return; + + /* + * Initialise the present map (which describes the set of CPUs + * actually populated at the present time) and release the + * secondaries from the bootloader. + */ + for_each_possible_cpu(cpu) { + + per_cpu(cpu_number, cpu) = cpu; + + if (cpu == smp_processor_id()) + continue; + + ops = get_cpu_ops(cpu); + if (!ops) + continue; + + err = ops->cpu_prepare(cpu); + if (err) + continue; + + set_cpu_present(cpu, true); + numa_store_cpu_info(cpu); + } +} + +static const char *ipi_types[NR_IPI] __tracepoint_string = { + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_WAKEUP] = "CPU wake-up interrupts", +}; + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); + +unsigned long irq_err_count; + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + unsigned int cpu, i; + + for (i = 0; i < NR_IPI; i++) { + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, + prec >= 4 ? " " : ""); + for_each_online_cpu(cpu) + seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); + seq_printf(p, " %s\n", ipi_types[i]); + } + + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); + return 0; +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); +} + +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_WAKEUP); +} +#endif + +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); +} +#endif + +static void __noreturn local_cpu_stop(void) +{ + set_cpu_online(smp_processor_id(), false); + + local_daif_mask(); + sdei_mask_local_cpu(); + cpu_park_loop(); +} + +/* + * We need to implement panic_smp_self_stop() for parallel panic() calls, so + * that cpu_online_mask gets correctly updated and smp_send_stop() can skip + * CPUs that have already stopped themselves. + */ +void __noreturn panic_smp_self_stop(void) +{ + local_cpu_stop(); +} + +#ifdef CONFIG_KEXEC_CORE +static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); +#endif + +static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) +{ +#ifdef CONFIG_KEXEC_CORE + crash_save_cpu(regs, cpu); + + atomic_dec(&waiting_for_crash_ipi); + + local_irq_disable(); + sdei_mask_local_cpu(); + + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) + __cpu_try_die(cpu); + + /* just in case */ + cpu_park_loop(); +#else + BUG(); +#endif +} + +/* + * Main handler for inter-processor interrupts + */ +static void do_handle_IPI(int ipinr) +{ + unsigned int cpu = smp_processor_id(); + + if ((unsigned)ipinr < NR_IPI) + trace_ipi_entry(ipi_types[ipinr]); + + switch (ipinr) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + + case IPI_CPU_STOP: + local_cpu_stop(); + break; + + case IPI_CPU_CRASH_STOP: + if (IS_ENABLED(CONFIG_KEXEC_CORE)) { + ipi_cpu_crash_stop(cpu, get_irq_regs()); + + unreachable(); + } + break; + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + case IPI_TIMER: + tick_receive_broadcast(); + break; +#endif + +#ifdef CONFIG_IRQ_WORK + case IPI_IRQ_WORK: + irq_work_run(); + break; +#endif + +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL + case IPI_WAKEUP: + WARN_ONCE(!acpi_parking_protocol_valid(cpu), + "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", + cpu); + break; +#endif + + default: + pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); + break; + } + + if ((unsigned)ipinr < NR_IPI) + trace_ipi_exit(ipi_types[ipinr]); +} + +static irqreturn_t ipi_handler(int irq, void *data) +{ + do_handle_IPI(irq - ipi_irq_base); + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +static void ipi_setup(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_irq_base + i, 0); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void ipi_teardown(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_irq_base + i); +} +#endif + +void __init set_smp_ipi_range(int ipi_base, int n) +{ + int i; + + WARN_ON(n < NR_IPI); + nr_ipi = min(n, NR_IPI); + + for (i = 0; i < nr_ipi; i++) { + int err; + + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &cpu_number); + WARN_ON(err); + + ipi_desc[i] = irq_to_desc(ipi_base + i); + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + } + + ipi_irq_base = ipi_base; + + /* Setup the boot CPU immediately */ + ipi_setup(smp_processor_id()); +} + +void arch_smp_send_reschedule(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); +} + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_TIMER); +} +#endif + +/* + * The number of CPUs online, not counting this CPU (which may not be + * fully online and so not counted in num_online_cpus()). + */ +static inline unsigned int num_other_online_cpus(void) +{ + unsigned int this_cpu_online = cpu_online(smp_processor_id()); + + return num_online_cpus() - this_cpu_online; +} + +void smp_send_stop(void) +{ + unsigned long timeout; + + if (num_other_online_cpus()) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + smp_cross_call(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_other_online_cpus() && timeout--) + udelay(1); + + if (num_other_online_cpus()) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(cpu_online_mask)); + + sdei_mask_local_cpu(); +} + +#ifdef CONFIG_KEXEC_CORE +void crash_smp_send_stop(void) +{ + static int cpus_stopped; + cpumask_t mask; + unsigned long timeout; + + /* + * This function can be called twice in panic path, but obviously + * we execute this only once. + */ + if (cpus_stopped) + return; + + cpus_stopped = 1; + + /* + * If this cpu is the only one alive at this point in time, online or + * not, there are no stop messages to be sent around, so just back out. + */ + if (num_other_online_cpus() == 0) + goto skip_ipi; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); + + pr_crit("SMP: stopping secondary CPUs\n"); + smp_cross_call(&mask, IPI_CPU_CRASH_STOP); + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--) + udelay(1); + + if (atomic_read(&waiting_for_crash_ipi) > 0) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(&mask)); + +skip_ipi: + sdei_mask_local_cpu(); + sdei_handler_abort(); +} + +bool smp_crash_stop_failed(void) +{ + return (atomic_read(&waiting_for_crash_ipi) > 0); +} +#endif + +static bool have_cpu_die(void) +{ +#ifdef CONFIG_HOTPLUG_CPU + int any_cpu = raw_smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(any_cpu); + + if (ops && ops->cpu_die) + return true; +#endif + return false; +} + +bool cpus_are_stuck_in_kernel(void) +{ + bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); + + return !!cpus_stuck_in_kernel || smp_spin_tables || + is_protected_kvm_enabled(); +} diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c new file mode 100644 index 0000000000..49029eace3 --- /dev/null +++ b/arch/arm64/kernel/smp_spin_table.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Spin Table SMP initialisation + * + * Copyright (C) 2013 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +extern void secondary_holding_pen(void); +volatile unsigned long __section(".mmuoff.data.read") +secondary_holding_pen_release = INVALID_HWID; + +static phys_addr_t cpu_release_addr[NR_CPUS]; + +/* + * Write secondary_holding_pen_release in a way that is guaranteed to be + * visible to all observers, irrespective of whether they're taking part + * in coherency or not. This is necessary for the hotplug code to work + * reliably. + */ +static void write_pen_release(u64 val) +{ + void *start = (void *)&secondary_holding_pen_release; + unsigned long size = sizeof(secondary_holding_pen_release); + + secondary_holding_pen_release = val; + dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size); +} + + +static int smp_spin_table_cpu_init(unsigned int cpu) +{ + struct device_node *dn; + int ret; + + dn = of_get_cpu_node(cpu, NULL); + if (!dn) + return -ENODEV; + + /* + * Determine the address from which the CPU is polling. + */ + ret = of_property_read_u64(dn, "cpu-release-addr", + &cpu_release_addr[cpu]); + if (ret) + pr_err("CPU %d: missing or invalid cpu-release-addr property\n", + cpu); + + of_node_put(dn); + + return ret; +} + +static int smp_spin_table_cpu_prepare(unsigned int cpu) +{ + __le64 __iomem *release_addr; + phys_addr_t pa_holding_pen = __pa_symbol(secondary_holding_pen); + + if (!cpu_release_addr[cpu]) + return -ENODEV; + + /* + * The cpu-release-addr may or may not be inside the linear mapping. + * As ioremap_cache will either give us a new mapping or reuse the + * existing linear mapping, we can use it to cover both cases. In + * either case the memory will be MT_NORMAL. + */ + release_addr = ioremap_cache(cpu_release_addr[cpu], + sizeof(*release_addr)); + if (!release_addr) + return -ENOMEM; + + /* + * We write the release address as LE regardless of the native + * endianness of the kernel. Therefore, any boot-loaders that + * read this address need to convert this address to the + * boot-loader's endianness before jumping. This is mandated by + * the boot protocol. + */ + writeq_relaxed(pa_holding_pen, release_addr); + dcache_clean_inval_poc((__force unsigned long)release_addr, + (__force unsigned long)release_addr + + sizeof(*release_addr)); + + /* + * Send an event to wake up the secondary CPU. + */ + sev(); + + iounmap(release_addr); + + return 0; +} + +static int smp_spin_table_cpu_boot(unsigned int cpu) +{ + /* + * Update the pen release flag. + */ + write_pen_release(cpu_logical_map(cpu)); + + /* + * Send an event, causing the secondaries to read pen_release. + */ + sev(); + + return 0; +} + +const struct cpu_operations smp_spin_table_ops = { + .name = "spin-table", + .cpu_init = smp_spin_table_cpu_init, + .cpu_prepare = smp_spin_table_cpu_prepare, + .cpu_boot = smp_spin_table_cpu_boot, +}; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c new file mode 100644 index 0000000000..17f66a74c7 --- /dev/null +++ b/arch/arm64/kernel/stacktrace.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Stack tracing support + * + * Copyright (C) 2012 ARM Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Start an unwind from a pt_regs. + * + * The unwind will begin at the PC within the regs. + * + * The regs must be on a stack currently owned by the calling task. + */ +static __always_inline void +unwind_init_from_regs(struct unwind_state *state, + struct pt_regs *regs) +{ + unwind_init_common(state, current); + + state->fp = regs->regs[29]; + state->pc = regs->pc; +} + +/* + * Start an unwind from a caller. + * + * The unwind will begin at the caller of whichever function this is inlined + * into. + * + * The function which invokes this must be noinline. + */ +static __always_inline void +unwind_init_from_caller(struct unwind_state *state) +{ + unwind_init_common(state, current); + + state->fp = (unsigned long)__builtin_frame_address(1); + state->pc = (unsigned long)__builtin_return_address(0); +} + +/* + * Start an unwind from a blocked task. + * + * The unwind will begin at the blocked tasks saved PC (i.e. the caller of + * cpu_switch_to()). + * + * The caller should ensure the task is blocked in cpu_switch_to() for the + * duration of the unwind, or the unwind will be bogus. It is never valid to + * call this for the current task. + */ +static __always_inline void +unwind_init_from_task(struct unwind_state *state, + struct task_struct *task) +{ + unwind_init_common(state, task); + + state->fp = thread_saved_fp(task); + state->pc = thread_saved_pc(task); +} + +static __always_inline int +unwind_recover_return_address(struct unwind_state *state) +{ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (state->task->ret_stack && + (state->pc == (unsigned long)return_to_handler)) { + unsigned long orig_pc; + orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc, + (void *)state->fp); + if (WARN_ON_ONCE(state->pc == orig_pc)) + return -EINVAL; + state->pc = orig_pc; + } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(state->pc)) { + state->pc = kretprobe_find_ret_addr(state->task, + (void *)state->fp, + &state->kr_cur); + } +#endif /* CONFIG_KRETPROBES */ + + return 0; +} + +/* + * Unwind from one frame record (A) to the next frame record (B). + * + * We terminate early if the location of B indicates a malformed chain of frame + * records (e.g. a cycle), determined based on the location and fp value of A + * and the location (but not the fp value) of B. + */ +static __always_inline int +unwind_next(struct unwind_state *state) +{ + struct task_struct *tsk = state->task; + unsigned long fp = state->fp; + int err; + + /* Final frame; nothing to unwind */ + if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) + return -ENOENT; + + err = unwind_next_frame_record(state); + if (err) + return err; + + state->pc = ptrauth_strip_kernel_insn_pac(state->pc); + + return unwind_recover_return_address(state); +} + +static __always_inline void +unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, + void *cookie) +{ + if (unwind_recover_return_address(state)) + return; + + while (1) { + int ret; + + if (!consume_entry(cookie, state->pc)) + break; + ret = unwind_next(state); + if (ret < 0) + break; + } +} + +/* + * Per-cpu stacks are only accessible when unwinding the current task in a + * non-preemptible context. + */ +#define STACKINFO_CPU(name) \ + ({ \ + ((task == current) && !preemptible()) \ + ? stackinfo_get_##name() \ + : stackinfo_get_unknown(); \ + }) + +/* + * SDEI stacks are only accessible when unwinding the current task in an NMI + * context. + */ +#define STACKINFO_SDEI(name) \ + ({ \ + ((task == current) && in_nmi()) \ + ? stackinfo_get_sdei_##name() \ + : stackinfo_get_unknown(); \ + }) + +#define STACKINFO_EFI \ + ({ \ + ((task == current) && current_in_efi()) \ + ? stackinfo_get_efi() \ + : stackinfo_get_unknown(); \ + }) + +noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task, + struct pt_regs *regs) +{ + struct stack_info stacks[] = { + stackinfo_get_task(task), + STACKINFO_CPU(irq), +#if defined(CONFIG_VMAP_STACK) + STACKINFO_CPU(overflow), +#endif +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) + STACKINFO_SDEI(normal), + STACKINFO_SDEI(critical), +#endif +#ifdef CONFIG_EFI + STACKINFO_EFI, +#endif + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; + + if (regs) { + if (task != current) + return; + unwind_init_from_regs(&state, regs); + } else if (task == current) { + unwind_init_from_caller(&state); + } else { + unwind_init_from_task(&state, task); + } + + unwind(&state, consume_entry, cookie); +} + +static bool dump_backtrace_entry(void *arg, unsigned long where) +{ + char *loglvl = arg; + printk("%s %pSb\n", loglvl, (void *)where); + return true; +} + +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) +{ + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); + + if (regs && user_mode(regs)) + return; + + if (!tsk) + tsk = current; + + if (!try_get_task_stack(tsk)) + return; + + printk("%sCall trace:\n", loglvl); + arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); + + put_task_stack(tsk); +} + +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) +{ + dump_backtrace(NULL, tsk, loglvl); + barrier(); +} diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c new file mode 100644 index 0000000000..0fbdf5fe64 --- /dev/null +++ b/arch/arm64/kernel/suspend.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This is allocated by cpu_suspend_init(), and used to store a pointer to + * the 'struct sleep_stack_data' the contains a particular CPUs state. + */ +unsigned long *sleep_save_stash; + +/* + * This hook is provided so that cpu_suspend code can restore HW + * breakpoints as early as possible in the resume path, before reenabling + * debug exceptions. Code cannot be run from a CPU PM notifier since by the + * time the notifier runs debug exceptions might have been enabled already, + * with HW breakpoints registers content still in an unknown state. + */ +static int (*hw_breakpoint_restore)(unsigned int); +void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) +{ + /* Prevent multiple restore hook initializations */ + if (WARN_ON(hw_breakpoint_restore)) + return; + hw_breakpoint_restore = hw_bp_restore; +} + +void notrace __cpu_suspend_exit(void) +{ + unsigned int cpu = smp_processor_id(); + + mte_suspend_exit(); + + /* + * We are resuming from reset with the idmap active in TTBR0_EL1. + * We must uninstall the idmap and restore the expected MMU + * state before we can possibly return to userspace. + */ + cpu_uninstall_idmap(); + + /* Restore CnP bit in TTBR1_EL1 */ + if (system_supports_cnp()) + cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); + + /* + * PSTATE was not saved over suspend/resume, re-enable any detected + * features that might not have been set correctly. + */ + if (cpus_have_const_cap(ARM64_HAS_DIT)) + set_pstate_dit(1); + __uaccess_enable_hw_pan(); + + /* + * Restore HW breakpoint registers to sane values + * before debug exceptions are possibly reenabled + * by cpu_suspend()s local_daif_restore() call. + */ + if (hw_breakpoint_restore) + hw_breakpoint_restore(cpu); + + /* + * On resume, firmware implementing dynamic mitigation will + * have turned the mitigation on. If the user has forcefully + * disabled it, make sure their wishes are obeyed. + */ + spectre_v4_enable_mitigation(NULL); + + /* Restore additional feature-specific configuration */ + ptrauth_suspend_exit(); +} + +/* + * cpu_suspend + * + * arg: argument to pass to the finisher function + * fn: finisher function pointer + * + */ +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +{ + int ret = 0; + unsigned long flags; + struct sleep_stack_data state; + struct arm_cpuidle_irq_context context; + + /* Report any MTE async fault before going to suspend */ + mte_suspend_enter(); + + /* + * From this point debug exceptions are disabled to prevent + * updates to mdscr register (saved and restored along with + * general purpose registers) from kernel debuggers. + * + * Strictly speaking the trace_hardirqs_off() here is superfluous, + * hardirqs should be firmly off by now. This really ought to use + * something like raw_local_daif_save(). + */ + flags = local_daif_save(); + + /* + * Function graph tracer state gets inconsistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* + * Switch to using DAIF.IF instead of PMR in order to reliably + * resume if we're using pseudo-NMIs. + */ + arm_cpuidle_save_irq_context(&context); + + ct_cpuidle_enter(); + + if (__cpu_suspend_enter(&state)) { + /* Call the suspend finisher */ + ret = fn(arg); + + /* + * Never gets here, unless the suspend finisher fails. + * Successful cpu_suspend() should return from cpu_resume(), + * returning through this code path is considered an error + * If the return value is set to 0 force ret = -EOPNOTSUPP + * to make sure a proper error condition is propagated + */ + if (!ret) + ret = -EOPNOTSUPP; + + ct_cpuidle_exit(); + } else { + ct_cpuidle_exit(); + __cpu_suspend_exit(); + } + + arm_cpuidle_restore_irq_context(&context); + + unpause_graph_tracing(); + + /* + * Restore pstate flags. OS lock and mdscr have been already + * restored, so from this point onwards, debugging is fully + * reenabled if it was enabled when core started shutdown. + */ + local_daif_restore(flags); + + return ret; +} + +static int __init cpu_suspend_init(void) +{ + /* ctx_ptr is an array of physical addresses */ + sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash), + GFP_KERNEL); + + if (WARN_ON(!sleep_save_stash)) + return -ENOMEM; + + return 0; +} +early_initcall(cpu_suspend_init); diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c new file mode 100644 index 0000000000..d5ffaaab31 --- /dev/null +++ b/arch/arm64/kernel/sys.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AArch64-specific system calls implementation + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, off) +{ + if (offset_in_page(off) != 0) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); +} + +SYSCALL_DEFINE1(arm64_personality, unsigned int, personality) +{ + if (personality(personality) == PER_LINUX32 && + !system_supports_32bit_el0()) + return -EINVAL; + return ksys_personality(personality); +} + +asmlinkage long sys_ni_syscall(void); + +asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused) +{ + return sys_ni_syscall(); +} + +/* + * Wrappers to pass the pt_regs argument. + */ +#define __arm64_sys_personality __arm64_sys_arm64_personality + +#undef __SYSCALL +#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); +#include + +#undef __SYSCALL +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, + +const syscall_fn_t sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall, +#include +}; diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c new file mode 100644 index 0000000000..fc40386afb --- /dev/null +++ b/arch/arm64/kernel/sys32.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/arm64/kernel/sys32.c + * + * Copyright (C) 2015 ARM Ltd. + */ + +/* + * Needed to avoid conflicting __NR_* macros between uapi/asm/unistd.h and + * asm/unistd32.h. + */ +#define __COMPAT_SYSCALL_NR + +#include +#include +#include + +#include + +asmlinkage long compat_sys_sigreturn(void); +asmlinkage long compat_sys_rt_sigreturn(void); + +COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname, + compat_size_t, sz, struct compat_statfs64 __user *, buf) +{ + /* + * 32-bit ARM applies an OABI compatibility fixup to statfs64 and + * fstatfs64 regardless of whether OABI is in use, and therefore + * arbitrary binaries may rely upon it, so we must do the same. + * For more details, see commit: + * + * 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and + * fstatfs64") + */ + if (sz == 88) + sz = 84; + + return kcompat_sys_statfs64(pathname, sz, buf); +} + +COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz, + struct compat_statfs64 __user *, buf) +{ + /* see aarch32_statfs64 */ + if (sz == 88) + sz = 84; + + return kcompat_sys_fstatfs64(fd, sz, buf); +} + +/* + * Note: off_4k is always in units of 4K. If we can't do the + * requested offset because it is not page-aligned, we return -EINVAL. + */ +COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, off_4k) +{ + if (off_4k & (~PAGE_MASK >> 12)) + return -EINVAL; + + off_4k >>= (PAGE_SHIFT - 12); + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k); +} + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define arg_u32p(name) u32, name##_hi, u32, name##_lo +#else +#define arg_u32p(name) u32, name##_lo, u32, name##_hi +#endif + +#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo) + +COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf, + size_t, count, u32, __pad, arg_u32p(pos)) +{ + return ksys_pread64(fd, buf, count, arg_u64(pos)); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd, + const char __user *, buf, size_t, count, u32, __pad, + arg_u32p(pos)) +{ + return ksys_pwrite64(fd, buf, count, arg_u64(pos)); +} + +COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname, + u32, __pad, arg_u32p(length)) +{ + return ksys_truncate(pathname, arg_u64(length)); +} + +COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad, + arg_u32p(length)) +{ + return ksys_ftruncate(fd, arg_u64(length)); +} + +COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad, + arg_u32p(offset), size_t, count) +{ + return ksys_readahead(fd, arg_u64(offset), count); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice, + arg_u32p(offset), arg_u32p(len)) +{ + return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags, + arg_u32p(offset), arg_u32p(nbytes)) +{ + return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes), + flags); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode, + arg_u32p(offset), arg_u32p(len)) +{ + return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len)); +} + +#undef __SYSCALL +#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); +#include + +#undef __SYSCALL +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, + +const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = { + [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall, +#include +}; diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c new file mode 100644 index 0000000000..df14336c3a --- /dev/null +++ b/arch/arm64/kernel/sys_compat.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/sys_arm.c + * + * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c + * Copyright (C) 1995, 1996 Russell King. + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static long +__do_compat_cache_op(unsigned long start, unsigned long end) +{ + long ret; + + do { + unsigned long chunk = min(PAGE_SIZE, end - start); + + if (fatal_signal_pending(current)) + return 0; + + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* + * The workaround requires an inner-shareable tlbi. + * We pick the reserved-ASID to minimise the impact. + */ + __tlbi(aside1is, __TLBI_VADDR(0, 0)); + dsb(ish); + } + + ret = caches_clean_inval_user_pou(start, start + chunk); + if (ret) + return ret; + + cond_resched(); + start += chunk; + } while (start < end); + + return 0; +} + +static inline long +do_compat_cache_op(unsigned long start, unsigned long end, int flags) +{ + if (end < start || flags) + return -EINVAL; + + if (!access_ok((const void __user *)start, end - start)) + return -EFAULT; + + return __do_compat_cache_op(start, end); +} +/* + * Handle all unrecognised system calls. + */ +long compat_arm_syscall(struct pt_regs *regs, int scno) +{ + unsigned long addr; + + switch (scno) { + /* + * Flush a region from virtual address 'r0' to virtual address 'r1' + * _exclusive_. There is no alignment requirement on either address; + * user space does not need to know the hardware cache layout. + * + * r2 contains flags. It should ALWAYS be passed as ZERO until it + * is defined to be something else. For now we ignore it, but may + * the fires of hell burn in your belly if you break this rule. ;) + * + * (at a later date, we may want to allow this call to not flush + * various aspects of the cache. Passing '0' will guarantee that + * everything necessary gets flushed to maintain consistency in + * the specified region). + */ + case __ARM_NR_compat_cacheflush: + return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); + + case __ARM_NR_compat_set_tls: + current->thread.uw.tp_value = regs->regs[0]; + + /* + * Protect against register corruption from context switch. + * See comment in tls_thread_flush. + */ + barrier(); + write_sysreg(regs->regs[0], tpidrro_el0); + return 0; + + default: + /* + * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS + * if not implemented, rather than raising SIGILL. This + * way the calling program can gracefully determine whether + * a feature is supported. + */ + if (scno < __ARM_NR_COMPAT_END) + return -ENOSYS; + break; + } + + addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4); + + arm64_notify_die("Oops - bad compat syscall(2)", regs, + SIGILL, ILL_ILLTRP, addr, 0); + return 0; +} diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c new file mode 100644 index 0000000000..9a70d9746b --- /dev/null +++ b/arch/arm64/kernel/syscall.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +long compat_arm_syscall(struct pt_regs *regs, int scno); +long sys_ni_syscall(void); + +static long do_ni_syscall(struct pt_regs *regs, int scno) +{ +#ifdef CONFIG_COMPAT + long ret; + if (is_compat_task()) { + ret = compat_arm_syscall(regs, scno); + if (ret != -ENOSYS) + return ret; + } +#endif + + return sys_ni_syscall(); +} + +static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn) +{ + return syscall_fn(regs); +} + +static void invoke_syscall(struct pt_regs *regs, unsigned int scno, + unsigned int sc_nr, + const syscall_fn_t syscall_table[]) +{ + long ret; + + add_random_kstack_offset(); + + if (scno < sc_nr) { + syscall_fn_t syscall_fn; + syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; + ret = __invoke_syscall(regs, syscall_fn); + } else { + ret = do_ni_syscall(regs, scno); + } + + syscall_set_return_value(current, regs, 0, ret); + + /* + * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), + * but not enough for arm64 stack utilization comfort. To keep + * reasonable stack head room, reduce the maximum offset to 9 bits. + * + * The actual entropy will be further reduced by the compiler when + * applying stack alignment constraints: the AAPCS mandates a + * 16-byte (i.e. 4-bit) aligned SP at function boundaries. + * + * The resulting 5 bits of entropy is seen in SP[8:4]. + */ + choose_random_kstack_offset(get_random_u16() & 0x1FF); +} + +static inline bool has_syscall_work(unsigned long flags) +{ + return unlikely(flags & _TIF_SYSCALL_WORK); +} + +static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, + const syscall_fn_t syscall_table[]) +{ + unsigned long flags = read_thread_flags(); + + regs->orig_x0 = regs->regs[0]; + regs->syscallno = scno; + + /* + * BTI note: + * The architecture does not guarantee that SPSR.BTYPE is zero + * on taking an SVC, so we could return to userspace with a + * non-zero BTYPE after the syscall. + * + * This shouldn't matter except when userspace is explicitly + * doing something stupid, such as setting PROT_BTI on a page + * that lacks conforming BTI/PACIxSP instructions, falling + * through from one executable page to another with differing + * PROT_BTI, or messing with BTYPE via ptrace: in such cases, + * userspace should not be surprised if a SIGILL occurs on + * syscall return. + * + * So, don't touch regs->pstate & PSR_BTYPE_MASK here. + * (Similarly for HVC and SMC elsewhere.) + */ + + if (flags & _TIF_MTE_ASYNC_FAULT) { + /* + * Process the asynchronous tag check fault before the actual + * syscall. do_notify_resume() will send a signal to userspace + * before the syscall is restarted. + */ + syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0); + return; + } + + if (has_syscall_work(flags)) { + /* + * The de-facto standard way to skip a system call using ptrace + * is to set the system call to -1 (NO_SYSCALL) and set x0 to a + * suitable error code for consumption by userspace. However, + * this cannot be distinguished from a user-issued syscall(-1) + * and so we must set x0 to -ENOSYS here in case the tracer doesn't + * issue the skip and we fall into trace_exit with x0 preserved. + * + * This is slightly odd because it also means that if a tracer + * sets the system call number to -1 but does not initialise x0, + * then x0 will be preserved for all system calls apart from a + * user-issued syscall(-1). However, requesting a skip and not + * setting the return value is unlikely to do anything sensible + * anyway. + */ + if (scno == NO_SYSCALL) + syscall_set_return_value(current, regs, -ENOSYS, 0); + scno = syscall_trace_enter(regs); + if (scno == NO_SYSCALL) + goto trace_exit; + } + + invoke_syscall(regs, scno, sc_nr, syscall_table); + + /* + * The tracing status may have changed under our feet, so we have to + * check again. However, if we were tracing entry, then we always trace + * exit regardless, as the old entry assembly did. + */ + if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { + flags = read_thread_flags(); + if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) + return; + } + +trace_exit: + syscall_trace_exit(regs); +} + +void do_el0_svc(struct pt_regs *regs) +{ + el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); +} + +#ifdef CONFIG_COMPAT +void do_el0_svc_compat(struct pt_regs *regs) +{ + el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, + compat_sys_call_table); +} +#endif diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c new file mode 100644 index 0000000000..b5855eb743 --- /dev/null +++ b/arch/arm64/kernel/time.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/time.c + * + * Copyright (C) 1991, 1992, 1995 Linus Torvalds + * Modifications for ARM (C) 1994-2001 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +static bool profile_pc_cb(void *arg, unsigned long pc) +{ + unsigned long *prof_pc = arg; + + if (in_lock_functions(pc)) + return true; + *prof_pc = pc; + return false; +} + +unsigned long profile_pc(struct pt_regs *regs) +{ + unsigned long prof_pc = 0; + + arch_stack_walk(profile_pc_cb, &prof_pc, current, regs); + + return prof_pc; +} +EXPORT_SYMBOL(profile_pc); + +void __init time_init(void) +{ + u32 arch_timer_rate; + + of_clk_init(NULL); + timer_probe(); + + tick_setup_hrtimer_broadcast(); + + arch_timer_rate = arch_timer_get_rate(); + if (!arch_timer_rate) + panic("Unable to initialise architected timer.\n"); + + /* Calibrate the delay loop directly */ + lpj_fine = arch_timer_rate / HZ; + + pv_time_init(); +} diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c new file mode 100644 index 0000000000..817d788cd8 --- /dev/null +++ b/arch/arm64/kernel/topology.c @@ -0,0 +1,348 @@ +/* + * arch/arm64/kernel/topology.c + * + * Copyright (C) 2011,2013,2014 Linaro Limited. + * + * Based on the arm32 version written by Vincent Guittot in turn based on + * arch/sh/kernel/topology.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_ACPI +static bool __init acpi_cpu_is_threaded(int cpu) +{ + int is_threaded = acpi_pptt_cpu_is_thread(cpu); + + /* + * if the PPTT doesn't have thread information, assume a homogeneous + * machine and return the current CPU's thread state. + */ + if (is_threaded < 0) + is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; + + return !!is_threaded; +} + +/* + * Propagate the topology information of the processor_topology_node tree to the + * cpu_topology array. + */ +int __init parse_acpi_topology(void) +{ + int cpu, topology_id; + + if (acpi_disabled) + return 0; + + for_each_possible_cpu(cpu) { + topology_id = find_acpi_cpu_topology(cpu, 0); + if (topology_id < 0) + return topology_id; + + if (acpi_cpu_is_threaded(cpu)) { + cpu_topology[cpu].thread_id = topology_id; + topology_id = find_acpi_cpu_topology(cpu, 1); + cpu_topology[cpu].core_id = topology_id; + } else { + cpu_topology[cpu].thread_id = -1; + cpu_topology[cpu].core_id = topology_id; + } + topology_id = find_acpi_cpu_topology_cluster(cpu); + cpu_topology[cpu].cluster_id = topology_id; + topology_id = find_acpi_cpu_topology_package(cpu); + cpu_topology[cpu].package_id = topology_id; + } + + return 0; +} +#endif + +#ifdef CONFIG_ARM64_AMU_EXTN +#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0) +#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0) +#else +#define read_corecnt() (0UL) +#define read_constcnt() (0UL) +#endif + +#undef pr_fmt +#define pr_fmt(fmt) "AMU: " fmt + +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); +static DEFINE_PER_CPU(u64, arch_const_cycles_prev); +static DEFINE_PER_CPU(u64, arch_core_cycles_prev); +static cpumask_var_t amu_fie_cpus; + +void update_freq_counters_refs(void) +{ + this_cpu_write(arch_core_cycles_prev, read_corecnt()); + this_cpu_write(arch_const_cycles_prev, read_constcnt()); +} + +static inline bool freq_counters_valid(int cpu) +{ + if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) + return false; + + if (!cpu_has_amu_feat(cpu)) { + pr_debug("CPU%d: counters are not supported.\n", cpu); + return false; + } + + if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) || + !per_cpu(arch_core_cycles_prev, cpu))) { + pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); + return false; + } + + return true; +} + +static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) +{ + u64 ratio; + + if (unlikely(!max_rate || !ref_rate)) { + pr_debug("CPU%d: invalid maximum or reference frequency.\n", + cpu); + return -EINVAL; + } + + /* + * Pre-compute the fixed ratio between the frequency of the constant + * reference counter and the maximum frequency of the CPU. + * + * ref_rate + * arch_max_freq_scale = ---------- * SCHED_CAPACITY_SCALE² + * max_rate + * + * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE² + * in order to ensure a good resolution for arch_max_freq_scale for + * very low reference frequencies (down to the KHz range which should + * be unlikely). + */ + ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT); + ratio = div64_u64(ratio, max_rate); + if (!ratio) { + WARN_ONCE(1, "Reference frequency too low.\n"); + return -EINVAL; + } + + per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio; + + return 0; +} + +static void amu_scale_freq_tick(void) +{ + u64 prev_core_cnt, prev_const_cnt; + u64 core_cnt, const_cnt, scale; + + prev_const_cnt = this_cpu_read(arch_const_cycles_prev); + prev_core_cnt = this_cpu_read(arch_core_cycles_prev); + + update_freq_counters_refs(); + + const_cnt = this_cpu_read(arch_const_cycles_prev); + core_cnt = this_cpu_read(arch_core_cycles_prev); + + if (unlikely(core_cnt <= prev_core_cnt || + const_cnt <= prev_const_cnt)) + return; + + /* + * /\core arch_max_freq_scale + * scale = ------- * -------------------- + * /\const SCHED_CAPACITY_SCALE + * + * See validate_cpu_freq_invariance_counters() for details on + * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT. + */ + scale = core_cnt - prev_core_cnt; + scale *= this_cpu_read(arch_max_freq_scale); + scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, + const_cnt - prev_const_cnt); + + scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); + this_cpu_write(arch_freq_scale, (unsigned long)scale); +} + +static struct scale_freq_data amu_sfd = { + .source = SCALE_FREQ_SOURCE_ARCH, + .set_freq_scale = amu_scale_freq_tick, +}; + +static void amu_fie_setup(const struct cpumask *cpus) +{ + int cpu; + + /* We are already set since the last insmod of cpufreq driver */ + if (unlikely(cpumask_subset(cpus, amu_fie_cpus))) + return; + + for_each_cpu(cpu, cpus) { + if (!freq_counters_valid(cpu) || + freq_inv_set_max_ratio(cpu, + cpufreq_get_hw_max_freq(cpu) * 1000ULL, + arch_timer_get_rate())) + return; + } + + cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus); + + topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus); + + pr_debug("CPUs[%*pbl]: counters will be used for FIE.", + cpumask_pr_args(cpus)); +} + +static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_policy *policy = data; + + if (val == CPUFREQ_CREATE_POLICY) + amu_fie_setup(policy->related_cpus); + + /* + * We don't need to handle CPUFREQ_REMOVE_POLICY event as the AMU + * counters don't have any dependency on cpufreq driver once we have + * initialized AMU support and enabled invariance. The AMU counters will + * keep on working just fine in the absence of the cpufreq driver, and + * for the CPUs for which there are no counters available, the last set + * value of arch_freq_scale will remain valid as that is the frequency + * those CPUs are running at. + */ + + return 0; +} + +static struct notifier_block init_amu_fie_notifier = { + .notifier_call = init_amu_fie_callback, +}; + +static int __init init_amu_fie(void) +{ + int ret; + + if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) + return -ENOMEM; + + ret = cpufreq_register_notifier(&init_amu_fie_notifier, + CPUFREQ_POLICY_NOTIFIER); + if (ret) + free_cpumask_var(amu_fie_cpus); + + return ret; +} +core_initcall(init_amu_fie); + +#ifdef CONFIG_ACPI_CPPC_LIB +#include + +static void cpu_read_corecnt(void *val) +{ + /* + * A value of 0 can be returned if the current CPU does not support AMUs + * or if the counter is disabled for this CPU. A return value of 0 at + * counter read is properly handled as an error case by the users of the + * counter. + */ + *(u64 *)val = read_corecnt(); +} + +static void cpu_read_constcnt(void *val) +{ + /* + * Return 0 if the current CPU is affected by erratum 2457168. A value + * of 0 is also returned if the current CPU does not support AMUs or if + * the counter is disabled. A return value of 0 at counter read is + * properly handled as an error case by the users of the counter. + */ + *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ? + 0UL : read_constcnt(); +} + +static inline +int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) +{ + /* + * Abort call on counterless CPU or when interrupts are + * disabled - can lead to deadlock in smp sync call. + */ + if (!cpu_has_amu_feat(cpu)) + return -EOPNOTSUPP; + + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + + smp_call_function_single(cpu, func, val, 1); + + return 0; +} + +/* + * Refer to drivers/acpi/cppc_acpi.c for the description of the functions + * below. + */ +bool cpc_ffh_supported(void) +{ + int cpu = get_cpu_with_amu_feat(); + + /* + * FFH is considered supported if there is at least one present CPU that + * supports AMUs. Using FFH to read core and reference counters for CPUs + * that do not support AMUs, have counters disabled or that are affected + * by errata, will result in a return value of 0. + * + * This is done to allow any enabled and valid counters to be read + * through FFH, knowing that potentially returning 0 as counter value is + * properly handled by the users of these counters. + */ + if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) + return false; + + return true; +} + +int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) +{ + int ret = -EOPNOTSUPP; + + switch ((u64)reg->address) { + case 0x0: + ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val); + break; + case 0x1: + ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val); + break; + } + + if (!ret) { + *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, + reg->bit_offset); + *val >>= reg->bit_offset; + } + + return ret; +} + +int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_ACPI_CPPC_LIB */ diff --git a/arch/arm64/kernel/trace-events-emulation.h b/arch/arm64/kernel/trace-events-emulation.h new file mode 100644 index 0000000000..6c40f58b84 --- /dev/null +++ b/arch/arm64/kernel/trace-events-emulation.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM emulation + +#if !defined(_TRACE_EMULATION_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EMULATION_H + +#include + +TRACE_EVENT(instruction_emulation, + + TP_PROTO(const char *instr, u64 addr), + TP_ARGS(instr, addr), + + TP_STRUCT__entry( + __string(instr, instr) + __field(u64, addr) + ), + + TP_fast_assign( + __assign_str(instr, instr); + __entry->addr = addr; + ), + + TP_printk("instr=\"%s\" addr=0x%llx", __get_str(instr), __entry->addr) +); + +#endif /* _TRACE_EMULATION_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . + +#define TRACE_INCLUDE_FILE trace-events-emulation +#include diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c new file mode 100644 index 0000000000..8b70759cdb --- /dev/null +++ b/arch/arm64/kernel/traps.c @@ -0,0 +1,1192 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/traps.c + * + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static bool __kprobes __check_eq(unsigned long pstate) +{ + return (pstate & PSR_Z_BIT) != 0; +} + +static bool __kprobes __check_ne(unsigned long pstate) +{ + return (pstate & PSR_Z_BIT) == 0; +} + +static bool __kprobes __check_cs(unsigned long pstate) +{ + return (pstate & PSR_C_BIT) != 0; +} + +static bool __kprobes __check_cc(unsigned long pstate) +{ + return (pstate & PSR_C_BIT) == 0; +} + +static bool __kprobes __check_mi(unsigned long pstate) +{ + return (pstate & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_pl(unsigned long pstate) +{ + return (pstate & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_vs(unsigned long pstate) +{ + return (pstate & PSR_V_BIT) != 0; +} + +static bool __kprobes __check_vc(unsigned long pstate) +{ + return (pstate & PSR_V_BIT) == 0; +} + +static bool __kprobes __check_hi(unsigned long pstate) +{ + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) != 0; +} + +static bool __kprobes __check_ls(unsigned long pstate) +{ + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) == 0; +} + +static bool __kprobes __check_ge(unsigned long pstate) +{ + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_lt(unsigned long pstate) +{ + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_gt(unsigned long pstate) +{ + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_le(unsigned long pstate) +{ + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_al(unsigned long pstate) +{ + return true; +} + +/* + * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that + * it behaves identically to 0b1110 ("al"). + */ +pstate_check_t * const aarch32_opcode_cond_checks[16] = { + __check_eq, __check_ne, __check_cs, __check_cc, + __check_mi, __check_pl, __check_vs, __check_vc, + __check_hi, __check_ls, __check_ge, __check_lt, + __check_gt, __check_le, __check_al, __check_al +}; + +int show_unhandled_signals = 0; + +static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) +{ + unsigned long addr = instruction_pointer(regs); + char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; + int i; + + if (user_mode(regs)) + return; + + for (i = -4; i < 1; i++) { + unsigned int val, bad; + + bad = aarch64_insn_read(&((u32 *)addr)[i], &val); + + if (!bad) + p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); + else + p += sprintf(p, i == 0 ? "(????????) " : "???????? "); + } + + printk("%sCode: %s\n", lvl, str); +} + +#ifdef CONFIG_PREEMPT +#define S_PREEMPT " PREEMPT" +#elif defined(CONFIG_PREEMPT_RT) +#define S_PREEMPT " PREEMPT_RT" +#else +#define S_PREEMPT "" +#endif + +#define S_SMP " SMP" + +static int __die(const char *str, long err, struct pt_regs *regs) +{ + static int die_counter; + int ret; + + pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n", + str, err, ++die_counter); + + /* trap and error numbers are mostly meaningless on ARM */ + ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); + if (ret == NOTIFY_STOP) + return ret; + + print_modules(); + show_regs(regs); + + dump_kernel_instr(KERN_EMERG, regs); + + return ret; +} + +static DEFINE_RAW_SPINLOCK(die_lock); + +/* + * This function is protected against re-entrancy. + */ +void die(const char *str, struct pt_regs *regs, long err) +{ + int ret; + unsigned long flags; + + raw_spin_lock_irqsave(&die_lock, flags); + + oops_enter(); + + console_verbose(); + bust_spinlocks(1); + ret = __die(str, err, regs); + + if (regs && kexec_should_crash(current)) + crash_kexec(regs); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + oops_exit(); + + if (in_interrupt()) + panic("%s: Fatal exception in interrupt", str); + if (panic_on_oops) + panic("%s: Fatal exception", str); + + raw_spin_unlock_irqrestore(&die_lock, flags); + + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +static void arm64_show_signal(int signo, const char *str) +{ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + struct task_struct *tsk = current; + unsigned long esr = tsk->thread.fault_code; + struct pt_regs *regs = task_pt_regs(tsk); + + /* Leave if the signal won't be shown */ + if (!show_unhandled_signals || + !unhandled_signal(tsk, signo) || + !__ratelimit(&rs)) + return; + + pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); + if (esr) + pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr); + + pr_cont("%s", str); + print_vma_addr(KERN_CONT " in ", regs->pc); + pr_cont("\n"); + __show_regs(regs); +} + +void arm64_force_sig_fault(int signo, int code, unsigned long far, + const char *str) +{ + arm64_show_signal(signo, str); + if (signo == SIGKILL) + force_sig(SIGKILL); + else + force_sig_fault(signo, code, (void __user *)far); +} + +void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, + const char *str) +{ + arm64_show_signal(SIGBUS, str); + force_sig_mceerr(code, (void __user *)far, lsb); +} + +void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, + const char *str) +{ + arm64_show_signal(SIGTRAP, str); + force_sig_ptrace_errno_trap(errno, (void __user *)far); +} + +void arm64_notify_die(const char *str, struct pt_regs *regs, + int signo, int sicode, unsigned long far, + unsigned long err) +{ + if (user_mode(regs)) { + WARN_ON(regs != current_pt_regs()); + current->thread.fault_address = 0; + current->thread.fault_code = err; + + arm64_force_sig_fault(signo, sicode, far, str); + } else { + die(str, regs, err); + } +} + +#ifdef CONFIG_COMPAT +#define PSTATE_IT_1_0_SHIFT 25 +#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) +#define PSTATE_IT_7_2_SHIFT 10 +#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) + +static u32 compat_get_it_state(struct pt_regs *regs) +{ + u32 it, pstate = regs->pstate; + + it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; + it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; + + return it; +} + +static void compat_set_it_state(struct pt_regs *regs, u32 it) +{ + u32 pstate_it; + + pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; + pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; + + regs->pstate &= ~PSR_AA32_IT_MASK; + regs->pstate |= pstate_it; +} + +static void advance_itstate(struct pt_regs *regs) +{ + u32 it; + + /* ARM mode */ + if (!(regs->pstate & PSR_AA32_T_BIT) || + !(regs->pstate & PSR_AA32_IT_MASK)) + return; + + it = compat_get_it_state(regs); + + /* + * If this is the last instruction of the block, wipe the IT + * state. Otherwise advance it. + */ + if (!(it & 7)) + it = 0; + else + it = (it & 0xe0) | ((it << 1) & 0x1f); + + compat_set_it_state(regs, it); +} +#else +static void advance_itstate(struct pt_regs *regs) +{ +} +#endif + +void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) +{ + regs->pc += size; + + /* + * If we were single stepping, we want to get the step exception after + * we return from the trap. + */ + if (user_mode(regs)) + user_fastforward_single_step(current); + + if (compat_user_mode(regs)) + advance_itstate(regs); + else + regs->pstate &= ~PSR_BTYPE_MASK; +} + +static int user_insn_read(struct pt_regs *regs, u32 *insnp) +{ + u32 instr; + unsigned long pc = instruction_pointer(regs); + + if (compat_thumb_mode(regs)) { + /* 16-bit Thumb instruction */ + __le16 instr_le; + if (get_user(instr_le, (__le16 __user *)pc)) + return -EFAULT; + instr = le16_to_cpu(instr_le); + if (aarch32_insn_is_wide(instr)) { + u32 instr2; + + if (get_user(instr_le, (__le16 __user *)(pc + 2))) + return -EFAULT; + instr2 = le16_to_cpu(instr_le); + instr = (instr << 16) | instr2; + } + } else { + /* 32-bit ARM instruction */ + __le32 instr_le; + if (get_user(instr_le, (__le32 __user *)pc)) + return -EFAULT; + instr = le32_to_cpu(instr_le); + } + + *insnp = instr; + return 0; +} + +void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) +{ + const char *desc; + struct pt_regs *regs = current_pt_regs(); + + if (WARN_ON(!user_mode(regs))) + return; + + switch (signal) { + case SIGILL: + desc = "undefined instruction"; + break; + case SIGSEGV: + desc = "illegal memory access"; + break; + default: + desc = "unknown or unrecoverable error"; + break; + } + + /* Force signals we don't understand to SIGKILL */ + if (WARN_ON(signal != SIGKILL && + siginfo_layout(signal, code) != SIL_FAULT)) { + signal = SIGKILL; + } + + arm64_notify_die(desc, regs, signal, code, address, err); +} + +/* + * Set up process info to signal segmentation fault - called on access error. + */ +void arm64_notify_segfault(unsigned long addr) +{ + int code; + + mmap_read_lock(current->mm); + if (find_vma(current->mm, untagged_addr(addr)) == NULL) + code = SEGV_MAPERR; + else + code = SEGV_ACCERR; + mmap_read_unlock(current->mm); + + force_signal_inject(SIGSEGV, code, addr, 0); +} + +void do_el0_undef(struct pt_regs *regs, unsigned long esr) +{ + u32 insn; + + /* check for AArch32 breakpoint instructions */ + if (!aarch32_break_handler(regs)) + return; + + if (user_insn_read(regs, &insn)) + goto out_err; + + if (try_emulate_mrs(regs, insn)) + return; + + if (try_emulate_armv8_deprecated(regs, insn)) + return; + +out_err: + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); +} + +void do_el1_undef(struct pt_regs *regs, unsigned long esr) +{ + u32 insn; + + if (aarch64_insn_read((void *)regs->pc, &insn)) + goto out_err; + + if (try_emulate_el1_ssbs(regs, insn)) + return; + +out_err: + die("Oops - Undefined instruction", regs, esr); +} + +void do_el0_bti(struct pt_regs *regs) +{ + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); +} + +void do_el1_bti(struct pt_regs *regs, unsigned long esr) +{ + if (efi_runtime_fixup_exception(regs, "BTI violation")) { + regs->pstate &= ~PSR_BTYPE_MASK; + return; + } + die("Oops - BTI", regs, esr); +} + +void do_el0_fpac(struct pt_regs *regs, unsigned long esr) +{ + force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr); +} + +void do_el1_fpac(struct pt_regs *regs, unsigned long esr) +{ + /* + * Unexpected FPAC exception in the kernel: kill the task before it + * does any more harm. + */ + die("Oops - FPAC", regs, esr); +} + +void do_el0_mops(struct pt_regs *regs, unsigned long esr) +{ + bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION; + bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A; + int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); + int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); + int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); + unsigned long dst, src, size; + + dst = pt_regs_read_reg(regs, dstreg); + src = pt_regs_read_reg(regs, srcreg); + size = pt_regs_read_reg(regs, sizereg); + + /* + * Put the registers back in the original format suitable for a + * prologue instruction, using the generic return routine from the + * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH. + */ + if (esr & ESR_ELx_MOPS_ISS_MEM_INST) { + /* SET* instruction */ + if (option_a ^ wrong_option) { + /* Format is from Option A; forward set */ + pt_regs_write_reg(regs, dstreg, dst + size); + pt_regs_write_reg(regs, sizereg, -size); + } + } else { + /* CPY* instruction */ + if (!(option_a ^ wrong_option)) { + /* Format is from Option B */ + if (regs->pstate & PSR_N_BIT) { + /* Backward copy */ + pt_regs_write_reg(regs, dstreg, dst - size); + pt_regs_write_reg(regs, srcreg, src - size); + } + } else { + /* Format is from Option A */ + if (size & BIT(63)) { + /* Forward copy */ + pt_regs_write_reg(regs, dstreg, dst + size); + pt_regs_write_reg(regs, srcreg, src + size); + pt_regs_write_reg(regs, sizereg, -size); + } + } + } + + if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE) + regs->pc -= 8; + else + regs->pc -= 4; + + /* + * If single stepping then finish the step before executing the + * prologue instruction. + */ + user_fastforward_single_step(current); +} + +#define __user_cache_maint(insn, address, res) \ + if (address >= TASK_SIZE_MAX) { \ + res = -EFAULT; \ + } else { \ + uaccess_ttbr0_enable(); \ + asm volatile ( \ + "1: " insn ", %1\n" \ + " mov %w0, #0\n" \ + "2:\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ + : "=r" (res) \ + : "r" (address)); \ + uaccess_ttbr0_disable(); \ + } + +static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) +{ + unsigned long tagged_address, address; + int rt = ESR_ELx_SYS64_ISS_RT(esr); + int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; + int ret = 0; + + tagged_address = pt_regs_read_reg(regs, rt); + address = untagged_addr(tagged_address); + + switch (crm) { + case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ + __user_cache_maint("dc civac", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ + __user_cache_maint("dc civac", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ + __user_cache_maint("sys 3, c7, c13, 1", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ + __user_cache_maint("sys 3, c7, c12, 1", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ + __user_cache_maint("dc civac", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ + __user_cache_maint("ic ivau", address, ret); + break; + default: + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + if (ret) + arm64_notify_segfault(tagged_address); + else + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); +} + +static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) +{ + int rt = ESR_ELx_SYS64_ISS_RT(esr); + unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); + + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* Hide DIC so that we can trap the unnecessary maintenance...*/ + val &= ~BIT(CTR_EL0_DIC_SHIFT); + + /* ... and fake IminLine to reduce the number of traps. */ + val &= ~CTR_EL0_IminLine_MASK; + val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK; + } + + pt_regs_write_reg(regs, rt, val); + + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); +} + +static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) +{ + int rt = ESR_ELx_SYS64_ISS_RT(esr); + + pt_regs_write_reg(regs, rt, arch_timer_read_counter()); + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); +} + +static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) +{ + int rt = ESR_ELx_SYS64_ISS_RT(esr); + + pt_regs_write_reg(regs, rt, arch_timer_get_rate()); + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); +} + +static void mrs_handler(unsigned long esr, struct pt_regs *regs) +{ + u32 sysreg, rt; + + rt = ESR_ELx_SYS64_ISS_RT(esr); + sysreg = esr_sys64_to_sysreg(esr); + + if (do_emulate_mrs(regs, sysreg, rt) != 0) + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); +} + +static void wfi_handler(unsigned long esr, struct pt_regs *regs) +{ + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); +} + +struct sys64_hook { + unsigned long esr_mask; + unsigned long esr_val; + void (*handler)(unsigned long esr, struct pt_regs *regs); +}; + +static const struct sys64_hook sys64_hooks[] = { + { + .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, + .handler = user_cache_maint_handler, + }, + { + /* Trap read access to CTR_EL0 */ + .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, + .handler = ctr_read_handler, + }, + { + /* Trap read access to CNTVCT_EL0 */ + .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, + .handler = cntvct_read_handler, + }, + { + /* Trap read access to CNTVCTSS_EL0 */ + .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS, + .handler = cntvct_read_handler, + }, + { + /* Trap read access to CNTFRQ_EL0 */ + .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, + .handler = cntfrq_read_handler, + }, + { + /* Trap read access to CPUID registers */ + .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL, + .handler = mrs_handler, + }, + { + /* Trap WFI instructions executed in userspace */ + .esr_mask = ESR_ELx_WFx_MASK, + .esr_val = ESR_ELx_WFx_WFI_VAL, + .handler = wfi_handler, + }, + {}, +}; + +#ifdef CONFIG_COMPAT +static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) +{ + int cond; + + /* Only a T32 instruction can trap without CV being set */ + if (!(esr & ESR_ELx_CV)) { + u32 it; + + it = compat_get_it_state(regs); + if (!it) + return true; + + cond = it >> 4; + } else { + cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; + } + + return aarch32_opcode_cond_checks[cond](regs->pstate); +} + +static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) +{ + int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; + + pt_regs_write_reg(regs, reg, arch_timer_get_rate()); + arm64_skip_faulting_instruction(regs, 4); +} + +static const struct sys64_hook cp15_32_hooks[] = { + { + .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, + .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, + .handler = compat_cntfrq_read_handler, + }, + {}, +}; + +static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) +{ + int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; + int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; + u64 val = arch_timer_read_counter(); + + pt_regs_write_reg(regs, rt, lower_32_bits(val)); + pt_regs_write_reg(regs, rt2, upper_32_bits(val)); + arm64_skip_faulting_instruction(regs, 4); +} + +static const struct sys64_hook cp15_64_hooks[] = { + { + .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, + .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, + .handler = compat_cntvct_read_handler, + }, + { + .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, + .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS, + .handler = compat_cntvct_read_handler, + }, + {}, +}; + +void do_el0_cp15(unsigned long esr, struct pt_regs *regs) +{ + const struct sys64_hook *hook, *hook_base; + + if (!cp15_cond_valid(esr, regs)) { + /* + * There is no T16 variant of a CP access, so we + * always advance PC by 4 bytes. + */ + arm64_skip_faulting_instruction(regs, 4); + return; + } + + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_CP15_32: + hook_base = cp15_32_hooks; + break; + case ESR_ELx_EC_CP15_64: + hook_base = cp15_64_hooks; + break; + default: + do_el0_undef(regs, esr); + return; + } + + for (hook = hook_base; hook->handler; hook++) + if ((hook->esr_mask & esr) == hook->esr_val) { + hook->handler(esr, regs); + return; + } + + /* + * New cp15 instructions may previously have been undefined at + * EL0. Fall back to our usual undefined instruction handler + * so that we handle these consistently. + */ + do_el0_undef(regs, esr); +} +#endif + +void do_el0_sys(unsigned long esr, struct pt_regs *regs) +{ + const struct sys64_hook *hook; + + for (hook = sys64_hooks; hook->handler; hook++) + if ((hook->esr_mask & esr) == hook->esr_val) { + hook->handler(esr, regs); + return; + } + + /* + * New SYS instructions may previously have been undefined at EL0. Fall + * back to our usual undefined instruction handler so that we handle + * these consistently. + */ + do_el0_undef(regs, esr); +} + +static const char *esr_class_str[] = { + [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", + [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", + [ESR_ELx_EC_WFx] = "WFI/WFE", + [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", + [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", + [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", + [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", + [ESR_ELx_EC_FP_ASIMD] = "ASIMD", + [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", + [ESR_ELx_EC_PAC] = "PAC", + [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", + [ESR_ELx_EC_BTI] = "BTI", + [ESR_ELx_EC_ILL] = "PSTATE.IL", + [ESR_ELx_EC_SVC32] = "SVC (AArch32)", + [ESR_ELx_EC_HVC32] = "HVC (AArch32)", + [ESR_ELx_EC_SMC32] = "SMC (AArch32)", + [ESR_ELx_EC_SVC64] = "SVC (AArch64)", + [ESR_ELx_EC_HVC64] = "HVC (AArch64)", + [ESR_ELx_EC_SMC64] = "SMC (AArch64)", + [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", + [ESR_ELx_EC_SVE] = "SVE", + [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", + [ESR_ELx_EC_FPAC] = "FPAC", + [ESR_ELx_EC_SME] = "SME", + [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", + [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", + [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", + [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", + [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", + [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", + [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", + [ESR_ELx_EC_MOPS] = "MOPS", + [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", + [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", + [ESR_ELx_EC_SERROR] = "SError", + [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", + [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", + [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", + [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", + [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", + [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", + [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", + [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", + [ESR_ELx_EC_BRK64] = "BRK (AArch64)", +}; + +const char *esr_get_class_string(unsigned long esr) +{ + return esr_class_str[ESR_ELx_EC(esr)]; +} + +/* + * bad_el0_sync handles unexpected, but potentially recoverable synchronous + * exceptions taken from EL0. + */ +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) +{ + unsigned long pc = instruction_pointer(regs); + + current->thread.fault_address = 0; + current->thread.fault_code = esr; + + arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, + "Bad EL0 synchronous exception"); +} + +#ifdef CONFIG_VMAP_STACK + +DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) + __aligned(16); + +void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) +{ + unsigned long tsk_stk = (unsigned long)current->stack; + unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); + unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); + + console_verbose(); + pr_emerg("Insufficient stack space to handle exception!"); + + pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr)); + pr_emerg("FAR: 0x%016lx\n", far); + + pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", + tsk_stk, tsk_stk + THREAD_SIZE); + pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n", + irq_stk, irq_stk + IRQ_STACK_SIZE); + pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", + ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); + + __show_regs(regs); + + /* + * We use nmi_panic to limit the potential for recusive overflows, and + * to get a better stack trace. + */ + nmi_panic(NULL, "kernel stack overflow"); + cpu_park_loop(); +} +#endif + +void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) +{ + console_verbose(); + + pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", + smp_processor_id(), esr, esr_get_class_string(esr)); + if (regs) + __show_regs(regs); + + nmi_panic(regs, "Asynchronous SError Interrupt"); + + cpu_park_loop(); +} + +bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) +{ + unsigned long aet = arm64_ras_serror_get_severity(esr); + + switch (aet) { + case ESR_ELx_AET_CE: /* corrected error */ + case ESR_ELx_AET_UEO: /* restartable, not yet consumed */ + /* + * The CPU can make progress. We may take UEO again as + * a more severe error. + */ + return false; + + case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */ + case ESR_ELx_AET_UER: /* Uncorrected Recoverable */ + /* + * The CPU can't make progress. The exception may have + * been imprecise. + * + * Neoverse-N1 #1349291 means a non-KVM SError reported as + * Unrecoverable should be treated as Uncontainable. We + * call arm64_serror_panic() in both cases. + */ + return true; + + case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */ + default: + /* Error has been silently propagated */ + arm64_serror_panic(regs, esr); + } +} + +void do_serror(struct pt_regs *regs, unsigned long esr) +{ + /* non-RAS errors are not containable */ + if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) + arm64_serror_panic(regs, esr); +} + +/* GENERIC_BUG traps */ +#ifdef CONFIG_GENERIC_BUG +int is_valid_bugaddr(unsigned long addr) +{ + /* + * bug_handler() only called for BRK #BUG_BRK_IMM. + * So the answer is trivial -- any spurious instances with no + * bug table entry will be rejected by report_bug() and passed + * back to the debug-monitors code and handled as a fatal + * unexpected debug exception. + */ + return 1; +} +#endif + +static int bug_handler(struct pt_regs *regs, unsigned long esr) +{ + switch (report_bug(regs->pc, regs)) { + case BUG_TRAP_TYPE_BUG: + die("Oops - BUG", regs, esr); + break; + + case BUG_TRAP_TYPE_WARN: + break; + + default: + /* unknown/unrecognised bug trap type */ + return DBG_HOOK_ERROR; + } + + /* If thread survives, skip over the BUG instruction and continue: */ + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + return DBG_HOOK_HANDLED; +} + +static struct break_hook bug_break_hook = { + .fn = bug_handler, + .imm = BUG_BRK_IMM, +}; + +#ifdef CONFIG_CFI_CLANG +static int cfi_handler(struct pt_regs *regs, unsigned long esr) +{ + unsigned long target; + u32 type; + + target = pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TARGET, esr)); + type = (u32)pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TYPE, esr)); + + switch (report_cfi_failure(regs, regs->pc, &target, type)) { + case BUG_TRAP_TYPE_BUG: + die("Oops - CFI", regs, esr); + break; + + case BUG_TRAP_TYPE_WARN: + break; + + default: + return DBG_HOOK_ERROR; + } + + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + return DBG_HOOK_HANDLED; +} + +static struct break_hook cfi_break_hook = { + .fn = cfi_handler, + .imm = CFI_BRK_IMM_BASE, + .mask = CFI_BRK_IMM_MASK, +}; +#endif /* CONFIG_CFI_CLANG */ + +static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) +{ + pr_err("%s generated an invalid instruction at %pS!\n", + "Kernel text patching", + (void *)instruction_pointer(regs)); + + /* We cannot handle this */ + return DBG_HOOK_ERROR; +} + +static struct break_hook fault_break_hook = { + .fn = reserved_fault_handler, + .imm = FAULT_BRK_IMM, +}; + +#ifdef CONFIG_KASAN_SW_TAGS + +#define KASAN_ESR_RECOVER 0x20 +#define KASAN_ESR_WRITE 0x10 +#define KASAN_ESR_SIZE_MASK 0x0f +#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) + +static int kasan_handler(struct pt_regs *regs, unsigned long esr) +{ + bool recover = esr & KASAN_ESR_RECOVER; + bool write = esr & KASAN_ESR_WRITE; + size_t size = KASAN_ESR_SIZE(esr); + void *addr = (void *)regs->regs[0]; + u64 pc = regs->pc; + + kasan_report(addr, size, write, pc); + + /* + * The instrumentation allows to control whether we can proceed after + * a crash was detected. This is done by passing the -recover flag to + * the compiler. Disabling recovery allows to generate more compact + * code. + * + * Unfortunately disabling recovery doesn't work for the kernel right + * now. KASAN reporting is disabled in some contexts (for example when + * the allocator accesses slab object metadata; this is controlled by + * current->kasan_depth). All these accesses are detected by the tool, + * even though the reports for them are not printed. + * + * This is something that might be fixed at some point in the future. + */ + if (!recover) + die("Oops - KASAN", regs, esr); + + /* If thread survives, skip over the brk instruction and continue: */ + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + return DBG_HOOK_HANDLED; +} + +static struct break_hook kasan_break_hook = { + .fn = kasan_handler, + .imm = KASAN_BRK_IMM, + .mask = KASAN_BRK_MASK, +}; +#endif + +#ifdef CONFIG_UBSAN_TRAP +static int ubsan_handler(struct pt_regs *regs, unsigned long esr) +{ + die(report_ubsan_failure(regs, esr & UBSAN_BRK_MASK), regs, esr); + return DBG_HOOK_HANDLED; +} + +static struct break_hook ubsan_break_hook = { + .fn = ubsan_handler, + .imm = UBSAN_BRK_IMM, + .mask = UBSAN_BRK_MASK, +}; +#endif + +#define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK) + +/* + * Initial handler for AArch64 BRK exceptions + * This handler only used until debug_traps_init(). + */ +int __init early_brk64(unsigned long addr, unsigned long esr, + struct pt_regs *regs) +{ +#ifdef CONFIG_CFI_CLANG + if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE) + return cfi_handler(regs, esr) != DBG_HOOK_HANDLED; +#endif +#ifdef CONFIG_KASAN_SW_TAGS + if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) + return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; +#endif +#ifdef CONFIG_UBSAN_TRAP + if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM) + return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED; +#endif + return bug_handler(regs, esr) != DBG_HOOK_HANDLED; +} + +void __init trap_init(void) +{ + register_kernel_break_hook(&bug_break_hook); +#ifdef CONFIG_CFI_CLANG + register_kernel_break_hook(&cfi_break_hook); +#endif + register_kernel_break_hook(&fault_break_hook); +#ifdef CONFIG_KASAN_SW_TAGS + register_kernel_break_hook(&kasan_break_hook); +#endif +#ifdef CONFIG_UBSAN_TRAP + register_kernel_break_hook(&ubsan_break_hook); +#endif + debug_traps_init(); +} diff --git a/arch/arm64/kernel/vdso-wrap.S b/arch/arm64/kernel/vdso-wrap.S new file mode 100644 index 0000000000..c4b1990bf2 --- /dev/null +++ b/arch/arm64/kernel/vdso-wrap.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + */ + +#include +#include +#include +#include +#include + + .globl vdso_start, vdso_end + .section .rodata + .balign PAGE_SIZE +vdso_start: + .incbin "arch/arm64/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous + +emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c new file mode 100644 index 0000000000..d9e1355730 --- /dev/null +++ b/arch/arm64/kernel/vdso.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VDSO implementations. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +enum vdso_abi { + VDSO_ABI_AA64, + VDSO_ABI_AA32, +}; + +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, + VVAR_NR_PAGES, +}; + +struct vdso_abi_info { + const char *name; + const char *vdso_code_start; + const char *vdso_code_end; + unsigned long vdso_pages; + /* Data Mapping */ + struct vm_special_mapping *dm; + /* Code Mapping */ + struct vm_special_mapping *cm; +}; + +static struct vdso_abi_info vdso_info[] __ro_after_init = { + [VDSO_ABI_AA64] = { + .name = "vdso", + .vdso_code_start = vdso_start, + .vdso_code_end = vdso_end, + }, +#ifdef CONFIG_COMPAT_VDSO + [VDSO_ABI_AA32] = { + .name = "vdso32", + .vdso_code_start = vdso32_start, + .vdso_code_end = vdso32_end, + }, +#endif /* CONFIG_COMPAT_VDSO */ +}; + +/* + * The vDSO data page. + */ +static union { + struct vdso_data data[CS_BASES]; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = vdso_data_store.data; + +static int vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + current->mm->context.vdso = (void *)new_vma->vm_start; + + return 0; +} + +static int __init __vdso_init(enum vdso_abi abi) +{ + int i; + struct page **vdso_pagelist; + unsigned long pfn; + + if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) { + pr_err("vDSO is not a valid ELF object!\n"); + return -EINVAL; + } + + vdso_info[abi].vdso_pages = ( + vdso_info[abi].vdso_code_end - + vdso_info[abi].vdso_code_start) >> + PAGE_SHIFT; + + vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages, + sizeof(struct page *), + GFP_KERNEL); + if (vdso_pagelist == NULL) + return -ENOMEM; + + /* Grab the vDSO code pages. */ + pfn = sym_to_pfn(vdso_info[abi].vdso_code_start); + + for (i = 0; i < vdso_info[abi].vdso_pages; i++) + vdso_pagelist[i] = pfn_to_page(pfn + i); + + vdso_info[abi].cm->pages = vdso_pagelist; + + return 0; +} + +#ifdef CONFIG_TIME_NS +struct vdso_data *arch_get_vdso_data(void *vvar_page) +{ + return (struct vdso_data *)(vvar_page); +} + +/* + * The vvar mapping contains data for a specific time namespace, so when a task + * changes namespace we must unmap its vvar data for the old namespace. + * Subsequent faults will map in data for the new namespace. + * + * For more details see timens_setup_vdso_data(). + */ +int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) +{ + struct mm_struct *mm = task->mm; + struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); + + mmap_read_lock(mm); + + for_each_vma(vmi, vma) { + if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm)) + zap_vma_pages(vma); +#ifdef CONFIG_COMPAT_VDSO + if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm)) + zap_vma_pages(vma); +#endif + } + + mmap_read_unlock(mm); + return 0; +} +#endif + +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *timens_page = find_timens_vvar_page(vma); + unsigned long pfn; + + switch (vmf->pgoff) { + case VVAR_DATA_PAGE_OFFSET: + if (timens_page) + pfn = page_to_pfn(timens_page); + else + pfn = sym_to_pfn(vdso_data); + break; +#ifdef CONFIG_TIME_NS + case VVAR_TIMENS_PAGE_OFFSET: + /* + * If a task belongs to a time namespace then a namespace + * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and + * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET + * offset. + * See also the comment near timens_setup_vdso_data(). + */ + if (!timens_page) + return VM_FAULT_SIGBUS; + pfn = sym_to_pfn(vdso_data); + break; +#endif /* CONFIG_TIME_NS */ + default: + return VM_FAULT_SIGBUS; + } + + return vmf_insert_pfn(vma, vmf->address, pfn); +} + +static int __setup_additional_pages(enum vdso_abi abi, + struct mm_struct *mm, + struct linux_binprm *bprm, + int uses_interp) +{ + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + unsigned long gp_flags = 0; + void *ret; + + BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); + + vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; + /* Be sure to map the data page */ + vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; + + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = ERR_PTR(vdso_base); + goto up_fail; + } + + ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE, + VM_READ|VM_MAYREAD|VM_PFNMAP, + vdso_info[abi].dm); + if (IS_ERR(ret)) + goto up_fail; + + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) + gp_flags = VM_ARM64_BTI; + + vdso_base += VVAR_NR_PAGES * PAGE_SIZE; + mm->context.vdso = (void *)vdso_base; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC|gp_flags| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + vdso_info[abi].cm); + if (IS_ERR(ret)) + goto up_fail; + + return 0; + +up_fail: + mm->context.vdso = NULL; + return PTR_ERR(ret); +} + +#ifdef CONFIG_COMPAT +/* + * Create and map the vectors page for AArch32 tasks. + */ +enum aarch32_map { + AA32_MAP_VECTORS, /* kuser helpers */ + AA32_MAP_SIGPAGE, + AA32_MAP_VVAR, + AA32_MAP_VDSO, +}; + +static struct page *aarch32_vectors_page __ro_after_init; +static struct page *aarch32_sig_page __ro_after_init; + +static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + current->mm->context.sigpage = (void *)new_vma->vm_start; + + return 0; +} + +static struct vm_special_mapping aarch32_vdso_maps[] = { + [AA32_MAP_VECTORS] = { + .name = "[vectors]", /* ABI */ + .pages = &aarch32_vectors_page, + }, + [AA32_MAP_SIGPAGE] = { + .name = "[sigpage]", /* ABI */ + .pages = &aarch32_sig_page, + .mremap = aarch32_sigpage_mremap, + }, + [AA32_MAP_VVAR] = { + .name = "[vvar]", + .fault = vvar_fault, + }, + [AA32_MAP_VDSO] = { + .name = "[vdso]", + .mremap = vdso_mremap, + }, +}; + +static int aarch32_alloc_kuser_vdso_page(void) +{ + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned long vdso_page; + + if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) + return 0; + + vdso_page = get_zeroed_page(GFP_KERNEL); + if (!vdso_page) + return -ENOMEM; + + memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, + kuser_sz); + aarch32_vectors_page = virt_to_page((void *)vdso_page); + return 0; +} + +#define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1 +static int aarch32_alloc_sigpage(void) +{ + extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; + int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; + __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD); + void *sigpage; + + sigpage = (void *)__get_free_page(GFP_KERNEL); + if (!sigpage) + return -ENOMEM; + + memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison)); + memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz); + aarch32_sig_page = virt_to_page(sigpage); + return 0; +} + +static int __init __aarch32_alloc_vdso_pages(void) +{ + + if (!IS_ENABLED(CONFIG_COMPAT_VDSO)) + return 0; + + vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR]; + vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO]; + + return __vdso_init(VDSO_ABI_AA32); +} + +static int __init aarch32_alloc_vdso_pages(void) +{ + int ret; + + ret = __aarch32_alloc_vdso_pages(); + if (ret) + return ret; + + ret = aarch32_alloc_sigpage(); + if (ret) + return ret; + + return aarch32_alloc_kuser_vdso_page(); +} +arch_initcall(aarch32_alloc_vdso_pages); + +static int aarch32_kuser_helpers_setup(struct mm_struct *mm) +{ + void *ret; + + if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) + return 0; + + /* + * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's + * not safe to CoW the page containing the CPU exception vectors. + */ + ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYEXEC, + &aarch32_vdso_maps[AA32_MAP_VECTORS]); + + return PTR_ERR_OR_ZERO(ret); +} + +static int aarch32_sigreturn_setup(struct mm_struct *mm) +{ + unsigned long addr; + void *ret; + + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = ERR_PTR(addr); + goto out; + } + + /* + * VM_MAYWRITE is required to allow gdb to Copy-on-Write and + * set breakpoints. + */ + ret = _install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | VM_MAYREAD | + VM_MAYWRITE | VM_MAYEXEC, + &aarch32_vdso_maps[AA32_MAP_SIGPAGE]); + if (IS_ERR(ret)) + goto out; + + mm->context.sigpage = (void *)addr; + +out: + return PTR_ERR_OR_ZERO(ret); +} + +int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + int ret; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + ret = aarch32_kuser_helpers_setup(mm); + if (ret) + goto out; + + if (IS_ENABLED(CONFIG_COMPAT_VDSO)) { + ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm, + uses_interp); + if (ret) + goto out; + } + + ret = aarch32_sigreturn_setup(mm); +out: + mmap_write_unlock(mm); + return ret; +} +#endif /* CONFIG_COMPAT */ + +enum aarch64_map { + AA64_MAP_VVAR, + AA64_MAP_VDSO, +}; + +static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = { + [AA64_MAP_VVAR] = { + .name = "[vvar]", + .fault = vvar_fault, + }, + [AA64_MAP_VDSO] = { + .name = "[vdso]", + .mremap = vdso_mremap, + }, +}; + +static int __init vdso_init(void) +{ + vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR]; + vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO]; + + return __vdso_init(VDSO_ABI_AA64); +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + int ret; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp); + mmap_write_unlock(mm); + + return ret; +} diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore new file mode 100644 index 0000000000..652e31d825 --- /dev/null +++ b/arch/arm64/kernel/vdso/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +vdso.lds diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile new file mode 100644 index 0000000000..fe7a53c678 --- /dev/null +++ b/arch/arm64/kernel/vdso/Makefile @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Building a vDSO image for AArch64. +# +# Author: Will Deacon +# Heavily based on the vDSO Makefiles for other archs. +# + +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile + +obj-vdso := vgettimeofday.o note.o sigreturn.o + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti + +# -Bsymbolic has been added for consistency with arm, the compat vDSO and +# potential future proofing if we end up with internal calls to the exported +# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so +# preparation in build-time C")). +ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \ + -Bsymbolic --build-id=sha1 -n $(btildflags-y) + +ifdef CONFIG_LD_ORPHAN_WARN + ldflags-y += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) +endif + +ldflags-y += -T + +ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 +ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO + +# -Wmissing-prototypes and -Wmissing-declarations are removed from +# the CFLAGS of vgettimeofday.c to make possible to build the +# kernel with CONFIG_WERROR enabled. +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \ + $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \ + $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \ + -Wmissing-prototypes -Wmissing-declarations +KASAN_SANITIZE := n +KCSAN_SANITIZE := n +UBSAN_SANITIZE := n +OBJECT_FILES_NON_STANDARD := y +KCOV_INSTRUMENT := n + +CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) +endif + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n + +targets += vdso.lds +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# Link rule for the .so file, .lds has to be first +$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold_and_vdso_check) + +# Strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# Generate VDSO offsets using helper script +gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE + $(call if_changed,vdsosym) + +# Actual build commands +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) + +# Install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh new file mode 100755 index 0000000000..0387209d65 --- /dev/null +++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh @@ -0,0 +1,16 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# +# Match symbols in the DSO that look like VDSO_*; produce a header file +# of constant offsets into the shared object. +# +# Doing this inside the Makefile will break the $(filter-out) function, +# causing Kbuild to rebuild the vdso-offsets header file every time. +# +# Author: Will Deacon +# + +LC_ALL=C +sed -n -e 's/^00*/0/' -e \ +'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2 0x\1/p' diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S new file mode 100644 index 0000000000..3d4e82290c --- /dev/null +++ b/arch/arm64/kernel/vdso/note.S @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + * + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include +#include +#include +#include +#include + +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END + +BUILD_SALT + +emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S new file mode 100644 index 0000000000..0e18729abc --- /dev/null +++ b/arch/arm64/kernel/vdso/sigreturn.S @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Sigreturn trampoline for returning from a signal when the SA_RESTORER + * flag is not set. It serves primarily as a hall of shame for crappy + * unwinders and features an exciting but mysterious NOP instruction. + * + * It's also fragile as hell, so please think twice before changing anything + * in here. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + */ + +#include +#include +#include + + .text + +/* + * NOTE!!! You may notice that all of the .cfi directives in this file have + * been commented out. This is because they have been shown to trigger segfaults + * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread + * cleanup handlers during the thread cancellation dance. By omitting the + * directives, we trigger an arm64-specific fallback path in the unwinder which + * recognises the signal frame and restores many of the registers directly from + * the sigcontext. Re-enabling the cfi directives here therefore needs to be + * much more comprehensive to reduce the risk of further regressions. + */ + +/* Ensure that the mysterious NOP can be associated with a function. */ +// .cfi_startproc + +/* + * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in + * the .eh_frame section to be annotated as a signal frame. This allows DWARF + * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify + * the next frame using the unmodified return address instead of subtracting 1, + * which may yield the wrong FDE. + */ +// .cfi_signal_frame + +/* + * Tell the unwinder where to locate the frame record linking back to the + * interrupted context. We don't provide unwind info for registers other than + * the frame pointer and the link register here; in practice, this is likely to + * be insufficient for unwinding in C/C++ based runtimes, especially without a + * means to restore the stack pointer. Thankfully, unwinders and debuggers + * already have baked-in strategies for attempting to unwind out of signals. + */ +// .cfi_def_cfa x29, 0 +// .cfi_offset x29, 0 * 8 +// .cfi_offset x30, 1 * 8 + +/* + * This mysterious NOP is required for some unwinders (e.g. libc++) that + * unconditionally subtract one from the result of _Unwind_GetIP() in order to + * identify the calling function. + * Hack borrowed from arch/powerpc/kernel/vdso64/sigtramp.S. + */ + nop // Mysterious NOP + +/* + * GDB, libgcc and libunwind rely on being able to identify the sigreturn + * instruction sequence to unwind from signal handlers. We cannot, therefore, + * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the + * unwinder. Thankfully, this function is only ever called from a RET and so + * omitting the landing pad is perfectly fine. + */ +SYM_CODE_START(__kernel_rt_sigreturn) +// PLEASE DO NOT MODIFY + mov x8, #__NR_rt_sigreturn +// PLEASE DO NOT MODIFY + svc #0 +// PLEASE DO NOT MODIFY +// .cfi_endproc +SYM_CODE_END(__kernel_rt_sigreturn) + +emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S new file mode 100644 index 0000000000..45354f2ddf --- /dev/null +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * GNU linker script for the VDSO library. +* + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + * Heavily based on the vDSO linker scripts for other archs. + */ + +#include +#include +#include +#include + +OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64") +OUTPUT_ARCH(aarch64) + +SECTIONS +{ + PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); +#ifdef CONFIG_TIME_NS + PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); +#endif + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + /* + * Discard .note.gnu.property sections which are unused and have + * different alignment requirement from vDSO note sections. + */ + /DISCARD/ : { + *(.note.GNU-stack .note.gnu.property) + } + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + + .text : { *(.text*) } :text =0xd503201f + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + . = ALIGN(4); + .altinstructions : { + *(.altinstructions) + } + + .dynamic : { *(.dynamic) } :text :dynamic + + .rela.dyn : ALIGN(8) { *(.rela .rela*) } + + .rodata : { + *(.rodata*) + *(.got) + *(.got.plt) + *(.plt) + *(.plt.*) + *(.iplt) + *(.igot .igot.plt) + } :text + + _end = .; + PROVIDE(end = .); + + DWARF_DEBUG + ELF_DETAILS + + /DISCARD/ : { + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + *(.eh_frame .eh_frame_hdr) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6.39 { + global: + __kernel_rt_sigreturn; + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + local: *; + }; +} + +/* + * Make the sigreturn code visible to the kernel. + */ +VDSO_sigtramp = __kernel_rt_sigreturn; diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c new file mode 100644 index 0000000000..9941c5b04f --- /dev/null +++ b/arch/arm64/kernel/vdso/vgettimeofday.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 userspace implementations of gettimeofday() and similar. + * + * Copyright (C) 2018 ARM Limited + * + */ + +int __kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +int __kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); + +int __kernel_clock_gettime(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int __kernel_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +int __kernel_clock_getres(clockid_t clock_id, + struct __kernel_timespec *res) +{ + return __cvdso_clock_getres(clock_id, res); +} diff --git a/arch/arm64/kernel/vdso32-wrap.S b/arch/arm64/kernel/vdso32-wrap.S new file mode 100644 index 0000000000..e72ac7bc4c --- /dev/null +++ b/arch/arm64/kernel/vdso32-wrap.S @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012 ARM Limited + */ + +#include +#include +#include +#include + + .globl vdso32_start, vdso32_end + .section .rodata + .balign PAGE_SIZE +vdso32_start: + .incbin "arch/arm64/kernel/vdso32/vdso.so" + .balign PAGE_SIZE +vdso32_end: + + .previous diff --git a/arch/arm64/kernel/vdso32/.gitignore b/arch/arm64/kernel/vdso32/.gitignore new file mode 100644 index 0000000000..3542fa24e2 --- /dev/null +++ b/arch/arm64/kernel/vdso32/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +vdso.lds +vdso.so.raw diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile new file mode 100644 index 0000000000..2f73e5bca2 --- /dev/null +++ b/arch/arm64/kernel/vdso32/Makefile @@ -0,0 +1,184 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for vdso32 +# + +include $(srctree)/lib/vdso/Makefile + +# Same as cc-*option, but using CC_COMPAT instead of CC +ifeq ($(CONFIG_CC_IS_CLANG), y) +CC_COMPAT ?= $(CC) +CC_COMPAT += --target=arm-linux-gnueabi +else +CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc +endif + +ifeq ($(CONFIG_LD_IS_LLD), y) +LD_COMPAT ?= $(LD) +else +LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld +endif + +cc32-option = $(call try-run,\ + $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) +cc32-disable-warning = $(call try-run,\ + $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) + +# We cannot use the global flags to compile the vDSO files, the main reason +# being that the 32-bit compiler may be older than the main (64-bit) compiler +# and therefore may not understand flags set using $(cc-option ...). Besides, +# arch-specific options should be taken from the arm Makefile instead of the +# arm64 one. +# As a result we set our own flags here. + +# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile +VDSO_CPPFLAGS := -DBUILD_VDSO -D__KERNEL__ -nostdinc +VDSO_CPPFLAGS += -isystem $(shell $(CC_COMPAT) -print-file-name=include 2>/dev/null) +VDSO_CPPFLAGS += $(LINUXINCLUDE) + +# Common C and assembly flags +# From top-level Makefile +VDSO_CAFLAGS := $(VDSO_CPPFLAGS) +VDSO_CAFLAGS += $(call cc32-option,-fno-PIE) +ifdef CONFIG_DEBUG_INFO +VDSO_CAFLAGS += -g +endif + +# From arm Makefile +VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm) +VDSO_CAFLAGS += -mabi=aapcs-linux -mfloat-abi=soft +ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) +VDSO_CAFLAGS += -mbig-endian +else +VDSO_CAFLAGS += -mlittle-endian +endif + +# From arm vDSO Makefile +VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector +VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING +VDSO_CAFLAGS += -march=armv8-a + +VDSO_CFLAGS := $(VDSO_CAFLAGS) +VDSO_CFLAGS += -DENABLE_COMPAT_VDSO=1 +# KBUILD_CFLAGS from top-level Makefile +VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -fno-common \ + -Werror-implicit-function-declaration \ + -Wno-format-security \ + -std=gnu11 +VDSO_CFLAGS += -O2 +# Some useful compiler-dependent flags from top-level Makefile +VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign) +VDSO_CFLAGS += -fno-strict-overflow +VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes) +VDSO_CFLAGS += -Werror=date-time +VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types) + +# The 32-bit compiler does not provide 128-bit integers, which are used in +# some headers that are indirectly included from the vDSO code. +# This hack makes the compiler happy and should trigger a warning/error if +# variables of such type are referenced. +VDSO_CFLAGS += -D__uint128_t='void*' +# Silence some warnings coming from headers that operate on long's +# (on GCC 4.8 or older, there is unfortunately no way to silence this warning) +VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow) +VDSO_CFLAGS += -Wno-int-to-pointer-cast + +# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is +# unreliable. +ifeq ($(CONFIG_THUMB2_COMPAT_VDSO), y) +VDSO_CFLAGS += -mthumb -fomit-frame-pointer +else +VDSO_CFLAGS += -marm +endif + +VDSO_AFLAGS := $(VDSO_CAFLAGS) +VDSO_AFLAGS += -D__ASSEMBLY__ + +# From arm vDSO Makefile +VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1 +VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096 +VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1 +VDSO_LDFLAGS += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) + + +# Borrow vdsomunge.c from the arm vDSO +# We have to use a relative path because scripts/Makefile.host prefixes +# $(hostprogs) with $(obj) +munge := ../../../arm/vdso/vdsomunge +hostprogs := $(munge) + +c-obj-vdso := note.o +c-obj-vdso-gettimeofday := vgettimeofday.o + +ifneq ($(c-gettimeofday-y),) +VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y) +endif + +VDSO_CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os + +# Build rules +targets := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) vdso.so vdso.so.dbg vdso.so.raw +c-obj-vdso := $(addprefix $(obj)/, $(c-obj-vdso)) +c-obj-vdso-gettimeofday := $(addprefix $(obj)/, $(c-obj-vdso-gettimeofday)) +asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso)) +obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) + +targets += vdso.lds +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE + $(call if_changed,vdsosym) + +# Strip rule for vdso.so +$(obj)/vdso.so: OBJCOPYFLAGS := -S +$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE + $(call if_changed,objcopy) + +$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE + $(call if_changed,vdsomunge) + +# Link rule for the .so file, .lds has to be first +$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold_and_vdso_check) + +# Compilation rules for the vDSO sources +$(c-obj-vdso): %.o: %.c FORCE + $(call if_changed_dep,vdsocc) +$(c-obj-vdso-gettimeofday): %.o: %.c FORCE + $(call if_changed_dep,vdsocc_gettimeofday) +$(asm-obj-vdso): %.o: %.S FORCE + $(call if_changed_dep,vdsoas) + +# Actual build commands +quiet_cmd_vdsold_and_vdso_check = LD32 $@ + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + +quiet_cmd_vdsold = LD32 $@ + cmd_vdsold = $(LD_COMPAT) $(VDSO_LDFLAGS) \ + -T $(filter %.lds,$^) $(filter %.o,$^) -o $@ +quiet_cmd_vdsocc = CC32 $@ + cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< +quiet_cmd_vdsocc_gettimeofday = CC32 $@ + cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< +quiet_cmd_vdsoas = AS32 $@ + cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< + +quiet_cmd_vdsomunge = MUNGE $@ + cmd_vdsomunge = $(obj)/$(munge) $< $@ + +# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO) +gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ +# The AArch64 nm should be able to read an AArch32 binary + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +# Install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL32 $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/arm64/kernel/vdso32/note.c b/arch/arm64/kernel/vdso32/note.c new file mode 100644 index 0000000000..eff5bf9efb --- /dev/null +++ b/arch/arm64/kernel/vdso32/note.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2018 ARM Limited + * + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include +#include +#include +#include + +ELFNOTE32("Linux", 0, LINUX_VERSION_CODE); +BUILD_SALT; diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S new file mode 100644 index 0000000000..8d95d7d350 --- /dev/null +++ b/arch/arm64/kernel/vdso32/vdso.lds.S @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Adapted from arm64 version. + * + * GNU linker script for the VDSO library. + * Heavily based on the vDSO linker scripts for other archs. + * + * Copyright (C) 2012-2018 ARM Limited + */ + +#include +#include +#include +#include + +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm") +OUTPUT_ARCH(arm) + +SECTIONS +{ + PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); +#ifdef CONFIG_TIME_NS + PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE); +#endif + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { + *(.rodata*) + *(.got) + *(.got.plt) + *(.plt) + *(.rel.iplt) + *(.iplt) + *(.igot.plt) + } :text + + .text : { + *(.text*) + *(.glue_7) + *(.glue_7t) + *(.vfp11_veneer) + *(.v4_bx) + } :text =0xe7f001f2 + + .rel.dyn : { *(.rel*) } + + .ARM.exidx : { *(.ARM.exidx*) } + DWARF_DEBUG + ELF_DETAILS + .ARM.attributes 0 : { *(.ARM.attributes) } + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ +} + +VERSION +{ + LINUX_2.6 { + global: + __vdso_clock_gettime; + __vdso_gettimeofday; + __vdso_clock_getres; + __vdso_clock_gettime64; + local: *; + }; +} diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c new file mode 100644 index 0000000000..5acff29c59 --- /dev/null +++ b/arch/arm64/kernel/vdso32/vgettimeofday.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 compat userspace implementations of gettimeofday() and similar. + * + * Copyright (C) 2018 ARM Limited + * + */ + +int __vdso_clock_gettime(clockid_t clock, + struct old_timespec32 *ts) +{ + return __cvdso_clock_gettime32(clock, ts); +} + +int __vdso_clock_gettime64(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +int __vdso_clock_getres(clockid_t clock_id, + struct old_timespec32 *res) +{ + return __cvdso_clock_getres_time32(clock_id, res); +} + +/* Avoid unresolved references emitted by GCC */ + +void __aeabi_unwind_cpp_pr0(void) +{ +} + +void __aeabi_unwind_cpp_pr1(void) +{ +} + +void __aeabi_unwind_cpp_pr2(void) +{ +} diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S new file mode 100644 index 0000000000..3cd7e76cc5 --- /dev/null +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -0,0 +1,388 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ld script to make ARM Linux kernel + * taken from the i386 version by Russell King + * Written by Martin Mares + */ + +#include +#ifdef CONFIG_KVM +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + +#define HYPERVISOR_DATA_SECTIONS \ + HYP_SECTION_NAME(.rodata) : { \ + . = ALIGN(PAGE_SIZE); \ + __hyp_rodata_start = .; \ + *(HYP_SECTION_NAME(.data..ro_after_init)) \ + *(HYP_SECTION_NAME(.rodata)) \ + . = ALIGN(PAGE_SIZE); \ + __hyp_rodata_end = .; \ + } + +#define HYPERVISOR_PERCPU_SECTION \ + . = ALIGN(PAGE_SIZE); \ + HYP_SECTION_NAME(.data..percpu) : { \ + *(HYP_SECTION_NAME(.data..percpu)) \ + } + +#define HYPERVISOR_RELOC_SECTION \ + .hyp.reloc : ALIGN(4) { \ + __hyp_reloc_begin = .; \ + *(.hyp.reloc) \ + __hyp_reloc_end = .; \ + } + +#define BSS_FIRST_SECTIONS \ + __hyp_bss_start = .; \ + *(HYP_SECTION_NAME(.bss)) \ + . = ALIGN(PAGE_SIZE); \ + __hyp_bss_end = .; + +/* + * We require that __hyp_bss_start and __bss_start are aligned, and enforce it + * with an assertion. But the BSS_SECTION macro places an empty .sbss section + * between them, which can in some cases cause the linker to misalign them. To + * work around the issue, force a page alignment for __bss_start. + */ +#define SBSS_ALIGN PAGE_SIZE +#else /* CONFIG_KVM */ +#define HYPERVISOR_EXTABLE +#define HYPERVISOR_DATA_SECTIONS +#define HYPERVISOR_PERCPU_SECTION +#define HYPERVISOR_RELOC_SECTION +#define SBSS_ALIGN 0 +#endif + +#define RO_EXCEPTION_TABLE_ALIGN 4 +#define RUNTIME_DISCARD_EXIT + +#include +#include +#include +#include +#include +#include + +#include "image.h" + +OUTPUT_ARCH(aarch64) +ENTRY(_text) + +jiffies = jiffies_64; + +#define HYPERVISOR_TEXT \ + . = ALIGN(PAGE_SIZE); \ + __hyp_idmap_text_start = .; \ + *(.hyp.idmap.text) \ + __hyp_idmap_text_end = .; \ + __hyp_text_start = .; \ + *(.hyp.text) \ + HYPERVISOR_EXTABLE \ + . = ALIGN(PAGE_SIZE); \ + __hyp_text_end = .; + +#define IDMAP_TEXT \ + . = ALIGN(SZ_4K); \ + __idmap_text_start = .; \ + *(.idmap.text) \ + __idmap_text_end = .; + +#ifdef CONFIG_HIBERNATION +#define HIBERNATE_TEXT \ + ALIGN_FUNCTION(); \ + __hibernate_exit_text_start = .; \ + *(.hibernate_exit.text) \ + __hibernate_exit_text_end = .; +#else +#define HIBERNATE_TEXT +#endif + +#ifdef CONFIG_KEXEC_CORE +#define KEXEC_TEXT \ + ALIGN_FUNCTION(); \ + __relocate_new_kernel_start = .; \ + *(.kexec_relocate.text) \ + __relocate_new_kernel_end = .; +#else +#define KEXEC_TEXT +#endif + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_TEXT \ + . = ALIGN(PAGE_SIZE); \ + __entry_tramp_text_start = .; \ + *(.entry.tramp.text) \ + . = ALIGN(PAGE_SIZE); \ + __entry_tramp_text_end = .; \ + *(.entry.tramp.rodata) +#else +#define TRAMP_TEXT +#endif + +#ifdef CONFIG_UNWIND_TABLES +#define UNWIND_DATA_SECTIONS \ + .eh_frame : { \ + __eh_frame_start = .; \ + *(.eh_frame) \ + __eh_frame_end = .; \ + } +#else +#define UNWIND_DATA_SECTIONS +#endif + +/* + * The size of the PE/COFF section that covers the kernel image, which + * runs from _stext to _edata, must be a round multiple of the PE/COFF + * FileAlignment, which we set to its minimum value of 0x200. '_stext' + * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned + * boundary should be sufficient. + */ +PECOFF_FILE_ALIGNMENT = 0x200; + +#ifdef CONFIG_EFI +#define PECOFF_EDATA_PADDING \ + .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } +#else +#define PECOFF_EDATA_PADDING +#endif + +SECTIONS +{ + /* + * XXX: The linker does not define how output sections are + * assigned to input sections when there are multiple statements + * matching the same input section name. There is no documented + * order of matching. + */ + DISCARDS + /DISCARD/ : { + *(.interp .dynamic) + *(.dynsym .dynstr .hash .gnu.hash) + } + + . = KIMAGE_VADDR; + + .head.text : { + _text = .; + HEAD_TEXT + } + .text : ALIGN(SEGMENT_ALIGN) { /* Real text segment */ + _stext = .; /* Text and read-only data */ + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + ENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + HYPERVISOR_TEXT + *(.gnu.warning) + } + + . = ALIGN(SEGMENT_ALIGN); + _etext = .; /* End of text section */ + + /* everything from this point to __init_begin will be marked RO NX */ + RO_DATA(PAGE_SIZE) + + HYPERVISOR_DATA_SECTIONS + + .got : { *(.got) } + /* + * Make sure that the .got.plt is either completely empty or it + * contains only the lazy dispatch entries. + */ + .got.plt : { *(.got.plt) } + ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, + "Unexpected GOT/PLT entries detected!") + + /* code sections that are never executed via the kernel mapping */ + .rodata.text : { + TRAMP_TEXT + HIBERNATE_TEXT + KEXEC_TEXT + IDMAP_TEXT + . = ALIGN(PAGE_SIZE); + } + + idmap_pg_dir = .; + . += PAGE_SIZE; + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; + . += PAGE_SIZE; +#endif + + reserved_pg_dir = .; + . += PAGE_SIZE; + + swapper_pg_dir = .; + . += PAGE_SIZE; + + . = ALIGN(SEGMENT_ALIGN); + __init_begin = .; + __inittext_begin = .; + + INIT_TEXT_SECTION(8) + + __exittext_begin = .; + .exit.text : { + EXIT_TEXT + } + __exittext_end = .; + + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + + UNWIND_DATA_SECTIONS + + . = ALIGN(SEGMENT_ALIGN); + __inittext_end = .; + __initdata_begin = .; + + init_idmap_pg_dir = .; + . += INIT_IDMAP_DIR_SIZE; + init_idmap_pg_end = .; + + .init.data : { + INIT_DATA + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + INIT_RAM_FS + *(.init.altinstructions .init.bss) /* from the EFI stub */ + } + .exit.data : { + EXIT_DATA + } + + PERCPU_SECTION(L1_CACHE_BYTES) + HYPERVISOR_PERCPU_SECTION + + HYPERVISOR_RELOC_SECTION + + .rela.dyn : ALIGN(8) { + __rela_start = .; + *(.rela .rela*) + __rela_end = .; + } + + .relr.dyn : ALIGN(8) { + __relr_start = .; + *(.relr.dyn) + __relr_end = .; + } + + . = ALIGN(SEGMENT_ALIGN); + __initdata_end = .; + __init_end = .; + + _data = .; + _sdata = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) + + /* + * Data written with the MMU off but read with the MMU on requires + * cache lines to be invalidated, discarding up to a Cache Writeback + * Granule (CWG) of data from the cache. Keep the section that + * requires this type of maintenance to be in its own Cache Writeback + * Granule (CWG) area so the cache maintenance operations don't + * interfere with adjacent data. + */ + .mmuoff.data.write : ALIGN(SZ_2K) { + __mmuoff_data_start = .; + *(.mmuoff.data.write) + } + . = ALIGN(SZ_2K); + .mmuoff.data.read : { + *(.mmuoff.data.read) + __mmuoff_data_end = .; + } + + PECOFF_EDATA_PADDING + __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); + _edata = .; + + BSS_SECTION(SBSS_ALIGN, 0, 0) + + . = ALIGN(PAGE_SIZE); + init_pg_dir = .; + . += INIT_DIR_SIZE; + init_pg_end = .; + + . = ALIGN(SEGMENT_ALIGN); + __pecoff_data_size = ABSOLUTE(. - __initdata_begin); + _end = .; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + HEAD_SYMBOLS + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .plt : { + *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + + .data.rel.ro : { *(.data.rel.ro) } + ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!") +} + +#include "image-vars.h" + +/* + * The HYP init code and ID map text can't be longer than a page each. The + * former is page-aligned, but the latter may not be with 16K or 64K pages, so + * it should also not cross a page boundary. + */ +ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE, + "HYP init code too big") +ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, + "ID map text too big or misaligned") +#ifdef CONFIG_HIBERNATION +ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K, + "Hibernate exit text is bigger than 4 KiB") +ASSERT(__hibernate_exit_text_start == swsusp_arch_suspend_exit, + "Hibernate exit text does not start with swsusp_arch_suspend_exit") +#endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, + "Entry trampoline text too big") +#endif +#ifdef CONFIG_KVM +ASSERT(__hyp_bss_start == __bss_start, "HYP and Host BSS are misaligned") +#endif +/* + * If padding is applied before .head.text, virt<->phys conversions will fail. + */ +ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned") + +ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET, + "RESERVED_SWAPPER_OFFSET is wrong!") + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, + "TRAMP_SWAPPER_OFFSET is wrong!") +#endif + +#ifdef CONFIG_KEXEC_CORE +/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */ +ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K, + "kexec relocation code is bigger than 4 KiB") +ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken") +ASSERT(__relocate_new_kernel_start == arm64_relocate_new_kernel, + "kexec control page does not start with arm64_relocate_new_kernel") +#endif diff --git a/arch/arm64/kernel/watchdog_hld.c b/arch/arm64/kernel/watchdog_hld.c new file mode 100644 index 0000000000..dcd2532212 --- /dev/null +++ b/arch/arm64/kernel/watchdog_hld.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +/* + * Safe maximum CPU frequency in case a particular platform doesn't implement + * cpufreq driver. Although, architecture doesn't put any restrictions on + * maximum frequency but 5 GHz seems to be safe maximum given the available + * Arm CPUs in the market which are clocked much less than 5 GHz. On the other + * hand, we can't make it much higher as it would lead to a large hard-lockup + * detection timeout on parts which are running slower (eg. 1GHz on + * Developerbox) and doesn't possess a cpufreq driver. + */ +#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz +u64 hw_nmi_get_sample_period(int watchdog_thresh) +{ + unsigned int cpu = smp_processor_id(); + unsigned long max_cpu_freq; + + max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL; + if (!max_cpu_freq) + max_cpu_freq = SAFE_MAX_CPU_FREQ; + + return (u64)max_cpu_freq * watchdog_thresh; +} + +bool __init arch_perf_nmi_is_available(void) +{ + /* + * hardlockup_detector_perf_init() will success even if Pseudo-NMI turns off, + * however, the pmu interrupts will act like a normal interrupt instead of + * NMI and the hardlockup detector would be broken. + */ + return arm_pmu_irq_is_nmi(); +} -- cgit v1.2.3