diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /arch/arm64/kernel | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | arch/arm64/kernel/acpi_parking_protocol.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/armv8_deprecated.c | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu_errata.c | 20 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 322 | ||||
-rw-r--r-- | arch/arm64/kernel/cpuinfo.c | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/efi.c | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/fpsimd.c | 169 | ||||
-rw-r--r-- | arch/arm64/kernel/idle.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/image-vars.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/irq.c | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/module-plts.c | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/mte.c | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/proton-pack.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/signal.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 148 | ||||
-rw-r--r-- | arch/arm64/kernel/suspend.c | 16 | ||||
-rw-r--r-- | arch/arm64/kernel/sys_compat.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 50 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/Makefile | 10 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso32/Makefile | 10 |
22 files changed, 409 insertions, 397 deletions
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c index b1990e38ae..e1be29e608 100644 --- a/arch/arm64/kernel/acpi_parking_protocol.c +++ b/arch/arm64/kernel/acpi_parking_protocol.c @@ -103,7 +103,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu) &mailbox->entry_point); writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); - arch_send_wakeup_ipi_mask(cpumask_of(cpu)); + arch_send_wakeup_ipi(cpu); return 0; } diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index e459cfd337..dd6ce86d43 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -52,10 +52,8 @@ struct insn_emulation { int min; int max; - /* - * sysctl for this emulation + a sentinal entry. - */ - struct ctl_table sysctl[2]; + /* sysctl for this emulation */ + struct ctl_table sysctl; }; #define ARM_OPCODE_CONDTEST_FAIL 0 @@ -558,7 +556,7 @@ static void __init register_insn_emulation(struct insn_emulation *insn) update_insn_emulation_mode(insn, INSN_UNDEF); if (insn->status != INSN_UNAVAILABLE) { - sysctl = &insn->sysctl[0]; + sysctl = &insn->sysctl; sysctl->mode = 0644; sysctl->maxlen = sizeof(int); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 87787a012b..76b8dd3709 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -121,22 +121,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); } -static DEFINE_RAW_SPINLOCK(reg_user_mask_modification); -static void __maybe_unused -cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused) -{ - struct arm64_ftr_reg *regp; - - regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1); - if (!regp) - return; - - raw_spin_lock(®_user_mask_modification); - if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK) - regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; - raw_spin_unlock(®_user_mask_modification); -} - #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ .matches = is_affected_midr_range, \ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) @@ -390,6 +374,7 @@ static const struct midr_range erratum_1463225[] = { static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { #ifdef CONFIG_ARM64_ERRATUM_2139208 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), #endif #ifdef CONFIG_ARM64_ERRATUM_2119858 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), @@ -403,6 +388,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { static const struct midr_range tsb_flush_fail_cpus[] = { #ifdef CONFIG_ARM64_ERRATUM_2067961 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), #endif #ifdef CONFIG_ARM64_ERRATUM_2054223 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), @@ -415,6 +401,7 @@ static const struct midr_range tsb_flush_fail_cpus[] = { static struct midr_range trbe_write_out_of_range_cpus[] = { #ifdef CONFIG_ARM64_ERRATUM_2253138 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), #endif #ifdef CONFIG_ARM64_ERRATUM_2224489 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), @@ -740,7 +727,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A510 r0p0 - r1p1 */ ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), - .cpu_enable = cpu_clear_bf16_from_user_emulation, }, #endif #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 444a73c2e6..91d2d67149 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -279,6 +279,8 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_B16B16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0), @@ -611,18 +613,6 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = { ARM64_FTR_END, }; -static const struct arm64_ftr_bits ftr_zcr[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */ - ARM64_FTR_END, -}; - -static const struct arm64_ftr_bits ftr_smcr[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */ - ARM64_FTR_END, -}; - /* * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of @@ -735,10 +725,6 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3), - /* Op1 = 0, CRn = 1, CRm = 2 */ - ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), - ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr), - /* Op1 = 1, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), @@ -1013,6 +999,37 @@ static void init_32bit_cpu_features(struct cpuinfo_32bit *info) init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); } +#ifdef CONFIG_ARM64_PSEUDO_NMI +static bool enable_pseudo_nmi; + +static int __init early_enable_pseudo_nmi(char *p) +{ + return kstrtobool(p, &enable_pseudo_nmi); +} +early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); + +static __init void detect_system_supports_pseudo_nmi(void) +{ + struct device_node *np; + + if (!enable_pseudo_nmi) + return; + + /* + * Detect broken MediaTek firmware that doesn't properly save and + * restore GIC priorities. + */ + np = of_find_compatible_node(NULL, NULL, "arm,gic-v3"); + if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw")) { + pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n"); + enable_pseudo_nmi = false; + } + of_node_put(np); +} +#else /* CONFIG_ARM64_PSEUDO_NMI */ +static inline void detect_system_supports_pseudo_nmi(void) { } +#endif + void __init init_cpu_features(struct cpuinfo_arm64 *info) { /* Before we start using the tables, make sure it is sorted */ @@ -1040,22 +1057,26 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) if (IS_ENABLED(CONFIG_ARM64_SVE) && id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { - info->reg_zcr = read_zcr_features(); - init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); + unsigned long cpacr = cpacr_save_enable_kernel_sve(); + vec_init_vq_map(ARM64_VEC_SVE); + + cpacr_restore(cpacr); } if (IS_ENABLED(CONFIG_ARM64_SME) && id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) { - info->reg_smcr = read_smcr_features(); + unsigned long cpacr = cpacr_save_enable_kernel_sme(); + /* * We mask out SMPS since even if the hardware * supports priorities the kernel does not at present * and we block access to them. */ info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS; - init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr); vec_init_vq_map(ARM64_VEC_SME); + + cpacr_restore(cpacr); } if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) @@ -1068,6 +1089,13 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpucap_indirect_list(); /* + * Detect broken pseudo-NMI. Must be called _before_ the call to + * setup_boot_cpu_capabilities() since it interacts with + * can_use_gic_priorities(). + */ + detect_system_supports_pseudo_nmi(); + + /* * Detect and enable early CPU capabilities based on the boot CPU, * after we have initialised the CPU feature infrastructure. */ @@ -1289,32 +1317,34 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu, info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0); + /* Probe vector lengths */ if (IS_ENABLED(CONFIG_ARM64_SVE) && id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { - info->reg_zcr = read_zcr_features(); - taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu, - info->reg_zcr, boot->reg_zcr); + if (!system_capabilities_finalized()) { + unsigned long cpacr = cpacr_save_enable_kernel_sve(); - /* Probe vector lengths */ - if (!system_capabilities_finalized()) vec_update_vq_map(ARM64_VEC_SVE); + + cpacr_restore(cpacr); + } } if (IS_ENABLED(CONFIG_ARM64_SME) && id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) { - info->reg_smcr = read_smcr_features(); + unsigned long cpacr = cpacr_save_enable_kernel_sme(); + /* * We mask out SMPS since even if the hardware * supports priorities the kernel does not at present * and we block access to them. */ info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS; - taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu, - info->reg_smcr, boot->reg_smcr); /* Probe vector lengths */ if (!system_capabilities_finalized()) vec_update_vq_map(ARM64_VEC_SME); + + cpacr_restore(cpacr); } /* @@ -1564,14 +1594,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _ MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); } -static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused) -{ - u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); - - return cpuid_feature_extract_signed_field(pfr0, - ID_AA64PFR0_EL1_FP_SHIFT) < 0; -} - static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, int scope) { @@ -1621,7 +1643,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) if (is_kdump_kernel()) return false; - if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP)) + if (cpus_have_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP)) return false; return has_cpuid_feature(entry, scope); @@ -1754,16 +1776,15 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, phys_addr_t (*pgtable_alloc)(int), int flags); -static phys_addr_t kpti_ng_temp_alloc; +static phys_addr_t __initdata kpti_ng_temp_alloc; -static phys_addr_t kpti_ng_pgd_alloc(int shift) +static phys_addr_t __init kpti_ng_pgd_alloc(int shift) { kpti_ng_temp_alloc -= PAGE_SIZE; return kpti_ng_temp_alloc; } -static void -kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) +static int __init __kpti_install_ng_mappings(void *__unused) { typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long); extern kpti_remap_fn idmap_kpti_install_ng_mappings; @@ -1776,20 +1797,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) pgd_t *kpti_ng_temp_pgd; u64 alloc = 0; - if (__this_cpu_read(this_cpu_vector) == vectors) { - const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); - - __this_cpu_write(this_cpu_vector, v); - } - - /* - * We don't need to rewrite the page-tables if either we've done - * it already or we have KASLR enabled and therefore have not - * created any global mappings at all. - */ - if (arm64_use_ng_mappings) - return; - remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); if (!cpu) { @@ -1826,14 +1833,43 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) free_pages(alloc, order); arm64_use_ng_mappings = true; } + + return 0; +} + +static void __init kpti_install_ng_mappings(void) +{ + /* Check whether KPTI is going to be used */ + if (!cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0)) + return; + + /* + * We don't need to rewrite the page-tables if either we've done + * it already or we have KASLR enabled and therefore have not + * created any global mappings at all. + */ + if (arm64_use_ng_mappings) + return; + + stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask); } + #else -static void -kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) +static inline void kpti_install_ng_mappings(void) { } #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ +static void cpu_enable_kpti(struct arm64_cpu_capabilities const *cap) +{ + if (__this_cpu_read(this_cpu_vector) == vectors) { + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); + + __this_cpu_write(this_cpu_vector, v); + } + +} + static int __init parse_kpti(char *str) { bool enabled; @@ -1848,6 +1884,8 @@ static int __init parse_kpti(char *str) early_param("kpti", parse_kpti); #ifdef CONFIG_ARM64_HW_AFDBM +static struct cpumask dbm_cpus __read_mostly; + static inline void __cpu_enable_hw_dbm(void) { u64 tcr = read_sysreg(tcr_el1) | TCR_HD; @@ -1883,35 +1921,22 @@ static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap) { - if (cpu_can_use_dbm(cap)) + if (cpu_can_use_dbm(cap)) { __cpu_enable_hw_dbm(); + cpumask_set_cpu(smp_processor_id(), &dbm_cpus); + } } static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, int __unused) { - static bool detected = false; /* * DBM is a non-conflicting feature. i.e, the kernel can safely * run a mix of CPUs with and without the feature. So, we * unconditionally enable the capability to allow any late CPU * to use the feature. We only enable the control bits on the - * CPU, if it actually supports. - * - * We have to make sure we print the "feature" detection only - * when at least one CPU actually uses it. So check if this CPU - * can actually use it and print the message exactly once. - * - * This is safe as all CPUs (including secondary CPUs - due to the - * LOCAL_CPU scope - and the hotplugged CPUs - via verification) - * goes through the "matches" check exactly once. Also if a CPU - * matches the criteria, it is guaranteed that the CPU will turn - * the DBM on, as the capability is unconditionally enabled. + * CPU, if it is supported. */ - if (!detected && cpu_can_use_dbm(cap)) { - detected = true; - pr_info("detected: Hardware dirty bit management\n"); - } return true; } @@ -1944,8 +1969,6 @@ int get_cpu_with_amu_feat(void) static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) { if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { - pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", - smp_processor_id()); cpumask_set_cpu(smp_processor_id(), &amu_cpus); /* 0 reference values signal broken/disabled counters */ @@ -2104,14 +2127,6 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) #endif /* CONFIG_ARM64_E0PD */ #ifdef CONFIG_ARM64_PSEUDO_NMI -static bool enable_pseudo_nmi; - -static int __init early_enable_pseudo_nmi(char *p) -{ - return kstrtobool(p, &enable_pseudo_nmi); -} -early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); - static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, int scope) { @@ -2190,12 +2205,23 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_MTE */ +static void user_feature_fixup(void) +{ + if (cpus_have_cap(ARM64_WORKAROUND_2658417)) { + struct arm64_ftr_reg *regp; + + regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1); + if (regp) + regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; + } +} + static void elf_hwcap_fixup(void) { -#ifdef CONFIG_ARM64_ERRATUM_1742098 - if (cpus_have_const_cap(ARM64_WORKAROUND_1742098)) +#ifdef CONFIG_COMPAT + if (cpus_have_cap(ARM64_WORKAROUND_1742098)) compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; -#endif /* ARM64_ERRATUM_1742098 */ +#endif /* CONFIG_COMPAT */ } #ifdef CONFIG_KVM @@ -2351,7 +2377,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .desc = "Kernel page table isolation (KPTI)", .capability = ARM64_UNMAP_KERNEL_AT_EL0, .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, - .cpu_enable = kpti_install_ng_mappings, + .cpu_enable = cpu_enable_kpti, .matches = unmap_kernel_at_el0, /* * The ID feature fields below are used to indicate that @@ -2361,11 +2387,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP) }, { - /* FP/SIMD is not implemented */ - .capability = ARM64_HAS_NO_FPSIMD, - .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, - .min_field_value = 0, - .matches = has_no_fpsimd, + .capability = ARM64_HAS_FPSIMD, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_fpsimd, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, FP, IMP) }, #ifdef CONFIG_ARM64_PMEM { @@ -2388,7 +2414,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .desc = "Scalable Vector Extension", .type = ARM64_CPUCAP_SYSTEM_FEATURE, .capability = ARM64_SVE, - .cpu_enable = sve_kernel_enable, + .cpu_enable = cpu_enable_sve, .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP) }, @@ -2405,16 +2431,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { #endif /* CONFIG_ARM64_RAS_EXTN */ #ifdef CONFIG_ARM64_AMU_EXTN { - /* - * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y. - * Therefore, don't provide .desc as we don't want the detection - * message to be shown until at least one CPU is detected to - * support the feature. - */ + .desc = "Activity Monitors Unit (AMU)", .capability = ARM64_HAS_AMU_EXTN, .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .matches = has_amu, .cpu_enable = cpu_amu_enable, + .cpus = &amu_cpus, ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, AMU, IMP) }, #endif /* CONFIG_ARM64_AMU_EXTN */ @@ -2454,18 +2476,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }, #ifdef CONFIG_ARM64_HW_AFDBM { - /* - * Since we turn this on always, we don't want the user to - * think that the feature is available when it may not be. - * So hide the description. - * - * .desc = "Hardware pagetable Dirty Bit Management", - * - */ + .desc = "Hardware dirty bit management", .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .capability = ARM64_HW_DBM, .matches = has_hw_dbm, .cpu_enable = cpu_enable_hw_dbm, + .cpus = &dbm_cpus, ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, DBM) }, #endif @@ -2641,7 +2657,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .capability = ARM64_SME, .matches = has_cpuid_feature, - .cpu_enable = sme_kernel_enable, + .cpu_enable = cpu_enable_sme, ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP) }, /* FA64 should be sorted after the base SME capability */ @@ -2650,7 +2666,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .capability = ARM64_SME_FA64, .matches = has_cpuid_feature, - .cpu_enable = fa64_kernel_enable, + .cpu_enable = cpu_enable_fa64, ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP) }, { @@ -2658,7 +2674,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .capability = ARM64_SME2, .matches = has_cpuid_feature, - .cpu_enable = sme2_kernel_enable, + .cpu_enable = cpu_enable_sme2, ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2) }, #endif /* CONFIG_ARM64_SME */ @@ -2787,6 +2803,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512), HWCAP_CAP(ID_AA64ISAR0_EL1, CRC32, IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32), HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, FEAT_LSE128, CAP_HWCAP, KERNEL_HWCAP_LSE128), HWCAP_CAP(ID_AA64ISAR0_EL1, RDM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), HWCAP_CAP(ID_AA64ISAR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3), HWCAP_CAP(ID_AA64ISAR0_EL1, SM3, IMP, CAP_HWCAP, KERNEL_HWCAP_SM3), @@ -2807,6 +2824,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR1_EL1, FCMA, IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA), HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC), HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), + HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC3, CAP_HWCAP, KERNEL_HWCAP_LRCPC3), HWCAP_CAP(ID_AA64ISAR1_EL1, FRINTTS, IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT), HWCAP_CAP(ID_AA64ISAR1_EL1, SB, IMP, CAP_HWCAP, KERNEL_HWCAP_SB), HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_BF16), @@ -2821,6 +2839,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), + HWCAP_CAP(ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16), HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), @@ -2981,7 +3000,7 @@ static void update_cpu_capabilities(u16 scope_mask) !caps->matches(caps, cpucap_default_scope(caps))) continue; - if (caps->desc) + if (caps->desc && !caps->cpus) pr_info("detected: %s\n", caps->desc); __set_bit(caps->capability, system_cpucaps); @@ -3153,36 +3172,28 @@ static void verify_local_elf_hwcaps(void) static void verify_sve_features(void) { - u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); - u64 zcr = read_zcr_features(); + unsigned long cpacr = cpacr_save_enable_kernel_sve(); - unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK; - unsigned int len = zcr & ZCR_ELx_LEN_MASK; - - if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) { + if (vec_verify_vq_map(ARM64_VEC_SVE)) { pr_crit("CPU%d: SVE: vector length support mismatch\n", smp_processor_id()); cpu_die_early(); } - /* Add checks on other ZCR bits here if necessary */ + cpacr_restore(cpacr); } static void verify_sme_features(void) { - u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1); - u64 smcr = read_smcr_features(); - - unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK; - unsigned int len = smcr & SMCR_ELx_LEN_MASK; + unsigned long cpacr = cpacr_save_enable_kernel_sme(); - if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) { + if (vec_verify_vq_map(ARM64_VEC_SME)) { pr_crit("CPU%d: SME: vector length support mismatch\n", smp_processor_id()); cpu_die_early(); } - /* Add checks on other SMCR bits here if necessary */ + cpacr_restore(cpacr); } static void verify_hyp_capabilities(void) @@ -3289,7 +3300,6 @@ EXPORT_SYMBOL_GPL(this_cpu_has_cap); * This helper function is used in a narrow window when, * - The system wide safe registers are set with all the SMP CPUs and, * - The SYSTEM_FEATURE system_cpucaps may not have been set. - * In all other cases cpus_have_{const_}cap() should be used. */ static bool __maybe_unused __system_matches_cap(unsigned int n) { @@ -3328,23 +3338,50 @@ unsigned long cpu_get_elf_hwcap2(void) return elf_hwcap[1]; } -static void __init setup_system_capabilities(void) +void __init setup_system_features(void) { + int i; /* - * We have finalised the system-wide safe feature - * registers, finalise the capabilities that depend - * on it. Also enable all the available capabilities, - * that are not enabled already. + * The system-wide safe feature feature register values have been + * finalized. Finalize and log the available system capabilities. */ update_cpu_capabilities(SCOPE_SYSTEM); + if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && + !cpus_have_cap(ARM64_HAS_PAN)) + pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); + + /* + * Enable all the available capabilities which have not been enabled + * already. + */ enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU); + + kpti_install_ng_mappings(); + + sve_setup(); + sme_setup(); + + /* + * Check for sane CTR_EL0.CWG value. + */ + if (!cache_type_cwg()) + pr_warn("No Cache Writeback Granule information, assuming %d\n", + ARCH_DMA_MINALIGN); + + for (i = 0; i < ARM64_NCAPS; i++) { + const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i]; + + if (caps && caps->cpus && caps->desc && + cpumask_any(caps->cpus) < nr_cpu_ids) + pr_info("detected: %s on CPU%*pbl\n", + caps->desc, cpumask_pr_args(caps->cpus)); + } } -void __init setup_cpu_features(void) +void __init setup_user_features(void) { - u32 cwg; + user_feature_fixup(); - setup_system_capabilities(); setup_elf_hwcaps(arm64_elf_hwcaps); if (system_supports_32bit_el0()) { @@ -3352,20 +3389,7 @@ void __init setup_cpu_features(void) elf_hwcap_fixup(); } - if (system_uses_ttbr0_pan()) - pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); - - sve_setup(); - sme_setup(); minsigstksz_setup(); - - /* - * Check for sane CTR_EL0.CWG value. - */ - cwg = cache_type_cwg(); - if (!cwg) - pr_warn("No Cache Writeback Granule information, assuming %d\n", - ARCH_DMA_MINALIGN); } static int enable_mismatched_32bit_el0(unsigned int cpu) @@ -3422,7 +3446,7 @@ subsys_initcall_sync(init_32bit_el0_mask); static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) { - cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); + cpu_enable_swapper_cnp(); } /* diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 98fda85005..a257da7b56 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -127,6 +127,9 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_SME_F16F16] = "smef16f16", [KERNEL_HWCAP_MOPS] = "mops", [KERNEL_HWCAP_HBC] = "hbc", + [KERNEL_HWCAP_SVE_B16B16] = "sveb16b16", + [KERNEL_HWCAP_LRCPC3] = "lrcpc3", + [KERNEL_HWCAP_LSE128] = "lse128", }; #ifdef CONFIG_COMPAT diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 2b478ca356..0228001347 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -71,10 +71,6 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) return pgprot_val(PAGE_KERNEL_EXEC); } -/* we will fill this structure from the stub, so don't put it in .bss */ -struct screen_info screen_info __section(".data"); -EXPORT_SYMBOL(screen_info); - int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) { pteval_t prot_val = create_mapping_protection(md); @@ -113,8 +109,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); if (md->attribute & EFI_MEMORY_XP) pte = set_pte_bit(pte, __pgprot(PTE_PXN)); - else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && - system_supports_bti() && spd->has_bti) + else if (system_supports_bti_kernel() && spd->has_bti) pte = set_pte_bit(pte, __pgprot(PTE_GP)); set_pte(ptep, pte); return 0; diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index f9b3adebcb..0898ac9979 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -589,7 +589,6 @@ static struct ctl_table sve_default_vl_table[] = { .proc_handler = vec_proc_do_default_vl, .extra1 = &vl_info[ARM64_VEC_SVE], }, - { } }; static int __init sve_sysctl_init(void) @@ -613,7 +612,6 @@ static struct ctl_table sme_default_vl_table[] = { .proc_handler = vec_proc_do_default_vl, .extra1 = &vl_info[ARM64_VEC_SME], }, - { } }; static int __init sme_sysctl_init(void) @@ -1160,44 +1158,20 @@ fail: panic("Cannot allocate percpu memory for EFI SVE save/restore"); } -/* - * Enable SVE for EL1. - * Intended for use by the cpufeatures code during CPU boot. - */ -void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p) { write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); isb(); } -/* - * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE - * vector length. - * - * Use only if SVE is present. - * This function clobbers the SVE vector length. - */ -u64 read_zcr_features(void) -{ - /* - * Set the maximum possible VL, and write zeroes to all other - * bits to see if they stick. - */ - sve_kernel_enable(NULL); - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); - - /* Return LEN value that would be written to get the maximum VL */ - return sve_vq_from_vl(sve_get_vl()) - 1; -} - void __init sve_setup(void) { struct vl_info *info = &vl_info[ARM64_VEC_SVE]; - u64 zcr; DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); unsigned long b; + int max_bit; - if (!system_supports_sve()) + if (!cpus_have_cap(ARM64_SVE)) return; /* @@ -1208,17 +1182,8 @@ void __init sve_setup(void) if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map))) set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map); - zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); - info->max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); - - /* - * Sanity-check that the max VL we determined through CPU features - * corresponds properly to sve_vq_map. If not, do our best: - */ - if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SVE, - info->max_vl))) - info->max_vl = find_supported_vector_length(ARM64_VEC_SVE, - info->max_vl); + max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX); + info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit)); /* * For the default VL, pick the maximum supported value <= 64. @@ -1298,7 +1263,7 @@ static void sme_free(struct task_struct *task) task->thread.sme_state = NULL; } -void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p) { /* Set priority for all PEs to architecturally defined minimum */ write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK, @@ -1313,80 +1278,48 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) isb(); } -/* - * This must be called after sme_kernel_enable(), we rely on the - * feature table being sorted to ensure this. - */ -void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p) { + /* This must be enabled after SME */ + BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME); + /* Allow use of ZT0 */ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK, SYS_SMCR_EL1); } -/* - * This must be called after sme_kernel_enable(), we rely on the - * feature table being sorted to ensure this. - */ -void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p) { + /* This must be enabled after SME */ + BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME); + /* Allow use of FA64 */ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK, SYS_SMCR_EL1); } -/* - * Read the pseudo-SMCR used by cpufeatures to identify the supported - * vector length. - * - * Use only if SME is present. - * This function clobbers the SME vector length. - */ -u64 read_smcr_features(void) -{ - sme_kernel_enable(NULL); - - /* - * Set the maximum possible VL. - */ - write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK, - SYS_SMCR_EL1); - - /* Return LEN value that would be written to get the maximum VL */ - return sve_vq_from_vl(sme_get_vl()) - 1; -} - void __init sme_setup(void) { struct vl_info *info = &vl_info[ARM64_VEC_SME]; - u64 smcr; - int min_bit; + int min_bit, max_bit; - if (!system_supports_sme()) + if (!cpus_have_cap(ARM64_SME)) return; /* * SME doesn't require any particular vector length be * supported but it does require at least one. We should have * disabled the feature entirely while bringing up CPUs but - * let's double check here. + * let's double check here. The bitmap is SVE_VQ_MAP sized for + * sharing with SVE. */ WARN_ON(bitmap_empty(info->vq_map, SVE_VQ_MAX)); min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX); info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit)); - smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1); - info->max_vl = sve_vl_from_vq((smcr & SMCR_ELx_LEN_MASK) + 1); - - /* - * Sanity-check that the max VL we determined through CPU features - * corresponds properly to sme_vq_map. If not, do our best: - */ - if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SME, - info->max_vl))) - info->max_vl = find_supported_vector_length(ARM64_VEC_SME, - info->max_vl); + max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX); + info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit)); WARN_ON(info->min_vl > info->max_vl); @@ -1406,6 +1339,22 @@ void __init sme_setup(void) get_sme_default_vl()); } +void sme_suspend_exit(void) +{ + u64 smcr = 0; + + if (!system_supports_sme()) + return; + + if (system_supports_fa64()) + smcr |= SMCR_ELx_FA64; + if (system_supports_sme2()) + smcr |= SMCR_ELx_EZT0; + + write_sysreg_s(smcr, SYS_SMCR_EL1); + write_sysreg_s(0, SYS_SMPRI_EL1); +} + #endif /* CONFIG_ARM64_SME */ static void sve_init_regs(void) @@ -1531,8 +1480,17 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) */ void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) { - /* TODO: implement lazy context saving/restoring */ - WARN_ON(1); + /* Even if we chose not to use FPSIMD, the hardware could still trap: */ + if (!system_supports_fpsimd()) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + /* + * When FPSIMD is enabled, we should never take a trap unless something + * has gone very wrong. + */ + BUG(); } /* @@ -1686,7 +1644,7 @@ void fpsimd_preserve_current_state(void) void fpsimd_signal_preserve_current_state(void) { fpsimd_preserve_current_state(); - if (test_thread_flag(TIF_SVE)) + if (current->thread.fp_type == FP_STATE_SVE) sve_to_fpsimd(current); } @@ -1773,13 +1731,23 @@ void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state) void fpsimd_restore_current_state(void) { /* - * For the tasks that were created before we detected the absence of - * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), - * e.g, init. This could be then inherited by the children processes. - * If we later detect that the system doesn't support FP/SIMD, - * we must clear the flag for all the tasks to indicate that the - * FPSTATE is clean (as we can't have one) to avoid looping for ever in - * do_notify_resume(). + * TIF_FOREIGN_FPSTATE is set on the init task and copied by + * arch_dup_task_struct() regardless of whether FP/SIMD is detected. + * Thus user threads can have this set even when FP/SIMD hasn't been + * detected. + * + * When FP/SIMD is detected, begin_new_exec() will set + * TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(), + * and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when + * switching tasks. We detect FP/SIMD before we exec the first user + * process, ensuring this has TIF_FOREIGN_FPSTATE set and + * do_notify_resume() will call fpsimd_restore_current_state() to + * install the user FP/SIMD context. + * + * When FP/SIMD is not detected, nothing else will clear or set + * TIF_FOREIGN_FPSTATE prior to the first return to userspace, and + * we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume() + * looping forever calling fpsimd_restore_current_state(). */ if (!system_supports_fpsimd()) { clear_thread_flag(TIF_FOREIGN_FPSTATE); @@ -2112,6 +2080,13 @@ static inline void fpsimd_hotplug_init(void) static inline void fpsimd_hotplug_init(void) { } #endif +void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__always_unused p) +{ + unsigned long enable = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN; + write_sysreg(read_sysreg(CPACR_EL1) | enable, CPACR_EL1); + isb(); +} + /* * FP/SIMD support code initialisation. */ diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c index c1125753fe..05cfb347ec 100644 --- a/arch/arm64/kernel/idle.c +++ b/arch/arm64/kernel/idle.c @@ -20,7 +20,7 @@ * ensure that interrupts are not masked at the PMR (because the core will * not wake up if we block the wake up signal in the interrupt controller). */ -void noinstr cpu_do_idle(void) +void __cpuidle cpu_do_idle(void) { struct arm_cpuidle_irq_context context; @@ -35,7 +35,7 @@ void noinstr cpu_do_idle(void) /* * This is our default idle handler. */ -void noinstr arch_cpu_idle(void) +void __cpuidle arch_cpu_idle(void) { /* * This should do all the clock switching and wait for interrupt diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 35f3c79595..5e4dc72ab1 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -27,7 +27,9 @@ PROVIDE(__efistub__text = _text); PROVIDE(__efistub__end = _end); PROVIDE(__efistub___inittext_end = __inittext_end); PROVIDE(__efistub__edata = _edata); +#if defined(CONFIG_EFI_EARLYCON) || defined(CONFIG_SYSFB) PROVIDE(__efistub_screen_info = screen_info); +#endif PROVIDE(__efistub__ctype = _ctype); PROVIDE(__pi___memcpy = __pi_memcpy); diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 6ad5c6ef53..85087e2df5 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <asm/daifflags.h> #include <asm/exception.h> +#include <asm/numa.h> #include <asm/softirq_stack.h> #include <asm/stacktrace.h> #include <asm/vmap_stack.h> @@ -47,17 +48,17 @@ static void init_irq_scs(void) for_each_possible_cpu(cpu) per_cpu(irq_shadow_call_stack_ptr, cpu) = - scs_alloc(cpu_to_node(cpu)); + scs_alloc(early_cpu_to_node(cpu)); } #ifdef CONFIG_VMAP_STACK -static void init_irq_stacks(void) +static void __init init_irq_stacks(void) { int cpu; unsigned long *p; for_each_possible_cpu(cpu) { - p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu)); + p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, early_cpu_to_node(cpu)); per_cpu(irq_stack_ptr, cpu) = p; } } diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 79200f21e1..bde32979c0 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -200,8 +200,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, break; case R_AARCH64_ADR_PREL_PG_HI21_NC: case R_AARCH64_ADR_PREL_PG_HI21: - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || - !cpus_have_const_cap(ARM64_WORKAROUND_843419)) + if (!cpus_have_final_cap(ARM64_WORKAROUND_843419)) break; /* @@ -236,13 +235,13 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, } } - if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && - cpus_have_const_cap(ARM64_WORKAROUND_843419)) + if (cpus_have_final_cap(ARM64_WORKAROUND_843419)) { /* * Add some slack so we can skip PLT slots that may trigger * the erratum due to the placement of the ADRP instruction. */ ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry))); + } return ret; } diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 4edecaac8f..a41ef3213e 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -35,10 +35,10 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode); #endif -void mte_sync_tags(pte_t pte) +void mte_sync_tags(pte_t pte, unsigned int nr_pages) { struct page *page = pte_page(pte); - long i, nr_pages = compound_nr(page); + unsigned int i; /* if PG_mte_tagged is set, tags have already been initialised */ for (i = 0; i < nr_pages; i++, page++) { @@ -411,8 +411,8 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, struct page *page = get_user_page_vma_remote(mm, addr, gup_flags, &vma); - if (IS_ERR_OR_NULL(page)) { - err = page == NULL ? -EIO : PTR_ERR(page); + if (IS_ERR(page)) { + err = PTR_ERR(page); break; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 0fcc4eb1a7..7387b68c74 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -454,7 +454,7 @@ static void ssbs_thread_switch(struct task_struct *next) * If all CPUs implement the SSBS extension, then we just need to * context-switch the PSTATE field. */ - if (cpus_have_const_cap(ARM64_SSBS)) + if (alternative_has_cap_unlikely(ARM64_SSBS)) return; spectre_v4_enable_task_mitigation(next); @@ -724,7 +724,6 @@ static struct ctl_table tagged_addr_sysctl_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, - { } }; static int __init tagged_addr_init(void) diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 05f40c4e18..6268a13a1d 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -972,7 +972,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) * When KPTI is in use, the vectors are switched when exiting to * user-space. */ - if (arm64_kernel_unmapped_at_el0()) + if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0)) return; write_sysreg(v, vbar_el1); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0e8beb3349..425b1bc17a 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -242,7 +242,7 @@ static int preserve_sve_context(struct sve_context __user *ctx) vl = task_get_sme_vl(current); vq = sve_vq_from_vl(vl); flags |= SVE_SIG_FLAG_SM; - } else if (test_thread_flag(TIF_SVE)) { + } else if (current->thread.fp_type == FP_STATE_SVE) { vq = sve_vq_from_vl(vl); } @@ -878,7 +878,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, if (system_supports_sve() || system_supports_sme()) { unsigned int vq = 0; - if (add_all || test_thread_flag(TIF_SVE) || + if (add_all || current->thread.fp_type == FP_STATE_SVE || thread_sm_enabled(¤t->thread)) { int vl = max(sve_max_vl(), sme_max_vl()); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 960b98b435..defbab84e9 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -32,7 +32,9 @@ #include <linux/irq_work.h> #include <linux/kernel_stat.h> #include <linux/kexec.h> +#include <linux/kgdb.h> #include <linux/kvm_host.h> +#include <linux/nmi.h> #include <asm/alternative.h> #include <asm/atomic.h> @@ -72,13 +74,19 @@ enum ipi_msg_type { IPI_CPU_CRASH_STOP, IPI_TIMER, IPI_IRQ_WORK, - IPI_WAKEUP, - NR_IPI + NR_IPI, + /* + * Any enum >= NR_IPI and < MAX_IPI is special and not tracable + * with trace_ipi_* + */ + IPI_CPU_BACKTRACE = NR_IPI, + IPI_KGDB_ROUNDUP, + MAX_IPI }; -static int ipi_irq_base __read_mostly; -static int nr_ipi __read_mostly = NR_IPI; -static struct irq_desc *ipi_desc[NR_IPI] __read_mostly; +static int ipi_irq_base __ro_after_init; +static int nr_ipi __ro_after_init = NR_IPI; +static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init; static void ipi_setup(int cpu); @@ -215,7 +223,7 @@ asmlinkage notrace void secondary_start_kernel(void) if (system_uses_irq_prio_masking()) init_gic_priority_masking(); - rcu_cpu_starting(cpu); + rcutree_report_cpu_starting(cpu); trace_hardirqs_off(); /* @@ -401,7 +409,7 @@ void __noreturn cpu_die_early(void) /* Mark this CPU absent */ set_cpu_present(cpu, 0); - rcu_report_dead(cpu); + rcutree_report_cpu_dead(); if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { update_cpu_boot_status(CPU_KILL_ME); @@ -431,9 +439,10 @@ static void __init hyp_mode_check(void) void __init smp_cpus_done(unsigned int max_cpus) { pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); - setup_cpu_features(); + setup_system_features(); hyp_mode_check(); apply_alternatives_all(); + setup_user_features(); mark_linear_text_alias_ro(); } @@ -520,7 +529,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) { u64 hwid = processor->arm_mpidr; - if (!(processor->flags & ACPI_MADT_ENABLED)) { + if (!acpi_gicc_is_usable(processor)) { pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); return; } @@ -764,7 +773,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", [IPI_TIMER] = "Timer broadcast interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", - [IPI_WAKEUP] = "CPU wake-up interrupts", }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); @@ -797,13 +805,6 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); } -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL -void arch_send_wakeup_ipi_mask(const struct cpumask *mask) -{ - smp_cross_call(mask, IPI_WAKEUP); -} -#endif - #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { @@ -854,6 +855,38 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs #endif } +static void arm64_backtrace_ipi(cpumask_t *mask) +{ + __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) +{ + /* + * NOTE: though nmi_trigger_cpumask_backtrace() has "nmi_" in the name, + * nothing about it truly needs to be implemented using an NMI, it's + * just that it's _allowed_ to work with NMIs. If ipi_should_be_nmi() + * returned false our backtrace attempt will just use a regular IPI. + */ + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_backtrace_ipi); +} + +#ifdef CONFIG_KGDB +void kgdb_roundup_cpus(void) +{ + int this_cpu = raw_smp_processor_id(); + int cpu; + + for_each_online_cpu(cpu) { + /* No need to roundup ourselves */ + if (cpu == this_cpu) + continue; + + __ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu); + } +} +#endif + /* * Main handler for inter-processor interrupts */ @@ -897,13 +930,17 @@ static void do_handle_IPI(int ipinr) break; #endif -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL - case IPI_WAKEUP: - WARN_ONCE(!acpi_parking_protocol_valid(cpu), - "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", - cpu); + case IPI_CPU_BACKTRACE: + /* + * NOTE: in some cases this _won't_ be NMI context. See the + * comment in arch_trigger_cpumask_backtrace(). + */ + nmi_cpu_backtrace(get_irq_regs()); + break; + + case IPI_KGDB_ROUNDUP: + kgdb_nmicallback(cpu, get_irq_regs()); break; -#endif default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); @@ -926,6 +963,22 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) __ipi_send_mask(ipi_desc[ipinr], target); } +static bool ipi_should_be_nmi(enum ipi_msg_type ipi) +{ + if (!system_uses_irq_prio_masking()) + return false; + + switch (ipi) { + case IPI_CPU_STOP: + case IPI_CPU_CRASH_STOP: + case IPI_CPU_BACKTRACE: + case IPI_KGDB_ROUNDUP: + return true; + default: + return false; + } +} + static void ipi_setup(int cpu) { int i; @@ -933,8 +986,14 @@ static void ipi_setup(int cpu) if (WARN_ON_ONCE(!ipi_irq_base)) return; - for (i = 0; i < nr_ipi; i++) - enable_percpu_irq(ipi_irq_base + i, 0); + for (i = 0; i < nr_ipi; i++) { + if (ipi_should_be_nmi(i)) { + prepare_percpu_nmi(ipi_irq_base + i); + enable_percpu_nmi(ipi_irq_base + i, 0); + } else { + enable_percpu_irq(ipi_irq_base + i, 0); + } + } } #ifdef CONFIG_HOTPLUG_CPU @@ -945,8 +1004,14 @@ static void ipi_teardown(int cpu) if (WARN_ON_ONCE(!ipi_irq_base)) return; - for (i = 0; i < nr_ipi; i++) - disable_percpu_irq(ipi_irq_base + i); + for (i = 0; i < nr_ipi; i++) { + if (ipi_should_be_nmi(i)) { + disable_percpu_nmi(ipi_irq_base + i); + teardown_percpu_nmi(ipi_irq_base + i); + } else { + disable_percpu_irq(ipi_irq_base + i); + } + } } #endif @@ -954,15 +1019,23 @@ void __init set_smp_ipi_range(int ipi_base, int n) { int i; - WARN_ON(n < NR_IPI); - nr_ipi = min(n, NR_IPI); + WARN_ON(n < MAX_IPI); + nr_ipi = min(n, MAX_IPI); for (i = 0; i < nr_ipi; i++) { int err; - err = request_percpu_irq(ipi_base + i, ipi_handler, - "IPI", &cpu_number); - WARN_ON(err); + if (ipi_should_be_nmi(i)) { + err = request_percpu_nmi(ipi_base + i, ipi_handler, + "IPI", &cpu_number); + WARN(err, "Could not request IPI %d as NMI, err=%d\n", + i, err); + } else { + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &cpu_number); + WARN(err, "Could not request IPI %d as IRQ, err=%d\n", + i, err); + } ipi_desc[i] = irq_to_desc(ipi_base + i); irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); @@ -979,6 +1052,17 @@ void arch_smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL +void arch_send_wakeup_ipi(unsigned int cpu) +{ + /* + * We use a scheduler IPI to wake the CPU as this avoids the need for a + * dedicated IPI and we can safely handle spurious scheduler IPIs. + */ + smp_send_reschedule(cpu); +} +#endif + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 0fbdf5fe64..eaaff94329 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -12,6 +12,7 @@ #include <asm/daifflags.h> #include <asm/debug-monitors.h> #include <asm/exec.h> +#include <asm/fpsimd.h> #include <asm/mte.h> #include <asm/memory.h> #include <asm/mmu_context.h> @@ -55,13 +56,13 @@ void notrace __cpu_suspend_exit(void) /* Restore CnP bit in TTBR1_EL1 */ if (system_supports_cnp()) - cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); + cpu_enable_swapper_cnp(); /* * PSTATE was not saved over suspend/resume, re-enable any detected * features that might not have been set correctly. */ - if (cpus_have_const_cap(ARM64_HAS_DIT)) + if (alternative_has_cap_unlikely(ARM64_HAS_DIT)) set_pstate_dit(1); __uaccess_enable_hw_pan(); @@ -80,6 +81,8 @@ void notrace __cpu_suspend_exit(void) */ spectre_v4_enable_mitigation(NULL); + sme_suspend_exit(); + /* Restore additional feature-specific configuration */ ptrauth_suspend_exit(); } @@ -98,6 +101,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) struct sleep_stack_data state; struct arm_cpuidle_irq_context context; + /* + * Some portions of CPU state (e.g. PSTATE.{PAN,DIT}) are initialized + * before alternatives are patched, but are only restored by + * __cpu_suspend_exit() after alternatives are patched. To avoid + * accidentally losing these bits we must not attempt to suspend until + * after alternatives have been patched. + */ + WARN_ON(!system_capabilities_finalized()); + /* Report any MTE async fault before going to suspend */ mte_suspend_enter(); diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index df14336c3a..4a609e9b65 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -31,7 +31,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end) if (fatal_signal_pending(current)) return 0; - if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) { /* * The workaround requires an inner-shareable tlbi. * We pick the reserved-ASID to minimise the impact. diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8b70759cdb..215e6d7f2d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -516,53 +516,7 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr) void do_el0_mops(struct pt_regs *regs, unsigned long esr) { - bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION; - bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A; - int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); - int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); - int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); - unsigned long dst, src, size; - - dst = pt_regs_read_reg(regs, dstreg); - src = pt_regs_read_reg(regs, srcreg); - size = pt_regs_read_reg(regs, sizereg); - - /* - * Put the registers back in the original format suitable for a - * prologue instruction, using the generic return routine from the - * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH. - */ - if (esr & ESR_ELx_MOPS_ISS_MEM_INST) { - /* SET* instruction */ - if (option_a ^ wrong_option) { - /* Format is from Option A; forward set */ - pt_regs_write_reg(regs, dstreg, dst + size); - pt_regs_write_reg(regs, sizereg, -size); - } - } else { - /* CPY* instruction */ - if (!(option_a ^ wrong_option)) { - /* Format is from Option B */ - if (regs->pstate & PSR_N_BIT) { - /* Backward copy */ - pt_regs_write_reg(regs, dstreg, dst - size); - pt_regs_write_reg(regs, srcreg, src - size); - } - } else { - /* Format is from Option A */ - if (size & BIT(63)) { - /* Forward copy */ - pt_regs_write_reg(regs, dstreg, dst + size); - pt_regs_write_reg(regs, srcreg, src + size); - pt_regs_write_reg(regs, sizereg, -size); - } - } - } - - if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE) - regs->pc -= 8; - else - regs->pc -= 4; + arm64_mops_reset_regs(®s->user_regs, esr); /* * If single stepping then finish the step before executing the @@ -631,7 +585,7 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) int rt = ESR_ELx_SYS64_ISS_RT(esr); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); - if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) { /* Hide DIC so that we can trap the unnecessary maintenance...*/ val &= ~BIT(CTR_EL0_DIC_SHIFT); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index d9e1355730..5562daf38a 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -212,7 +212,7 @@ static int __setup_additional_pages(enum vdso_abi abi, if (IS_ERR(ret)) goto up_fail; - if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) + if (system_supports_bti_kernel()) gp_flags = VM_ARM64_BTI; vdso_base += VVAR_NR_PAGES * PAGE_SIZE; diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index fe7a53c678..8818287f10 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -78,13 +78,3 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE # Actual build commands quiet_cmd_vdsold_and_vdso_check = LD $@ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) - -# Install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL $@ - cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ - -vdso.so: $(obj)/vdso.so.dbg - @mkdir -p $(MODLIB)/vdso - $(call cmd,vdso_install) - -vdso_install: vdso.so diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 2f73e5bca2..1f911a76c5 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -172,13 +172,3 @@ gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ # The AArch64 nm should be able to read an AArch32 binary cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ - -# Install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL32 $@ - cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so - -vdso.so: $(obj)/vdso.so.dbg - @mkdir -p $(MODLIB)/vdso - $(call cmd,vdso_install) - -vdso_install: vdso.so |