summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /arch/arm64/include
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/alternative-macros.h12
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h8
-rw-r--r--arch/arm64/include/asm/archrandom.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/cpu.h6
-rw-r--r--arch/arm64/include/asm/cpucaps.h67
-rw-r--r--arch/arm64/include/asm/cpufeature.h98
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/crash_core.h10
-rw-r--r--arch/arm64/include/asm/fpsimd.h38
-rw-r--r--arch/arm64/include/asm/hwcap.h3
-rw-r--r--arch/arm64/include/asm/irq.h3
-rw-r--r--arch/arm64/include/asm/irq_work.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h20
-rw-r--r--arch/arm64/include/asm/jump_label.h4
-rw-r--r--arch/arm64/include/asm/kprobes.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h19
-rw-r--r--arch/arm64/include/asm/kvm_host.h63
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h7
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h47
-rw-r--r--arch/arm64/include/asm/kvm_nested.h3
-rw-r--r--arch/arm64/include/asm/lse.h9
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/mmu_context.h28
-rw-r--r--arch/arm64/include/asm/module.h3
-rw-r--r--arch/arm64/include/asm/mte.h4
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h34
-rw-r--r--arch/arm64/include/asm/smp.h4
-rw-r--r--arch/arm64/include/asm/spectre.h2
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h4
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h45
-rw-r--r--arch/arm64/include/asm/tlb.h5
-rw-r--r--arch/arm64/include/asm/tlbflush.h15
-rw-r--r--arch/arm64/include/asm/traps.h54
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h12
-rw-r--r--arch/arm64/include/asm/vectors.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h3
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h32
43 files changed, 494 insertions, 199 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 5c8ee5a54..4b6d2d520 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -6,5 +6,5 @@ generic-y += qspinlock.h
generic-y += parport.h
generic-y += user.h
-generated-y += cpucaps.h
+generated-y += cpucap-defs.h
generated-y += sysreg-defs.h
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 94b486192..d328f549b 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -226,10 +226,10 @@ alternative_endif
static __always_inline bool
alternative_has_cap_likely(const unsigned long cpucap)
{
- compiletime_assert(cpucap < ARM64_NCAPS,
- "cpucap must be < ARM64_NCAPS");
+ if (!cpucap_is_possible(cpucap))
+ return false;
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
:
: [cpucap] "i" (cpucap)
@@ -244,10 +244,10 @@ l_no:
static __always_inline bool
alternative_has_cap_unlikely(const unsigned long cpucap)
{
- compiletime_assert(cpucap < ARM64_NCAPS,
- "cpucap must be < ARM64_NCAPS");
+ if (!cpucap_is_possible(cpucap))
+ return false;
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
:
: [cpucap] "i" (cpucap)
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 01281a533..5f1726116 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -79,6 +79,14 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
return 0x3ff;
}
+static u64 __maybe_unused gic_read_iar(void)
+{
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_CAVIUM_23154))
+ return gic_read_iar_cavium_thunderx();
+ else
+ return gic_read_iar_common();
+}
+
static inline void gic_write_ctlr(u32 val)
{
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index b0abc64f8..ecdb3cfcd 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -63,7 +63,7 @@ static __always_inline bool __cpu_has_rng(void)
{
if (unlikely(!system_capabilities_finalized() && !preemptible()))
return this_cpu_has_cap(ARM64_HAS_RNG);
- return cpus_have_const_cap(ARM64_HAS_RNG);
+ return alternative_has_cap_unlikely(ARM64_HAS_RNG);
}
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index d115451ed..fefac75fa 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -132,7 +132,7 @@ void flush_dcache_folio(struct folio *);
static __always_inline void icache_inval_all_pou(void)
{
- if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
+ if (alternative_has_cap_unlikely(ARM64_HAS_CACHE_DIC))
return;
asm("ic ialluis");
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index e749838b9..f3034099f 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -63,12 +63,6 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64smfr0;
struct cpuinfo_32bit aarch32;
-
- /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
- u64 reg_zcr;
-
- /* pseudo-SMCR for recording maximum SMCR_EL1 LEN value: */
- u64 reg_smcr;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
new file mode 100644
index 000000000..270680e2b
--- /dev/null
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CPUCAPS_H
+#define __ASM_CPUCAPS_H
+
+#include <asm/cpucap-defs.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+/*
+ * Check whether a cpucap is possible at compiletime.
+ */
+static __always_inline bool
+cpucap_is_possible(const unsigned int cap)
+{
+ compiletime_assert(__builtin_constant_p(cap),
+ "cap must be a constant");
+ compiletime_assert(cap < ARM64_NCAPS,
+ "cap must be < ARM64_NCAPS");
+
+ switch (cap) {
+ case ARM64_HAS_PAN:
+ return IS_ENABLED(CONFIG_ARM64_PAN);
+ case ARM64_HAS_EPAN:
+ return IS_ENABLED(CONFIG_ARM64_EPAN);
+ case ARM64_SVE:
+ return IS_ENABLED(CONFIG_ARM64_SVE);
+ case ARM64_SME:
+ case ARM64_SME2:
+ case ARM64_SME_FA64:
+ return IS_ENABLED(CONFIG_ARM64_SME);
+ case ARM64_HAS_CNP:
+ return IS_ENABLED(CONFIG_ARM64_CNP);
+ case ARM64_HAS_ADDRESS_AUTH:
+ case ARM64_HAS_GENERIC_AUTH:
+ return IS_ENABLED(CONFIG_ARM64_PTR_AUTH);
+ case ARM64_HAS_GIC_PRIO_MASKING:
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI);
+ case ARM64_MTE:
+ return IS_ENABLED(CONFIG_ARM64_MTE);
+ case ARM64_BTI:
+ return IS_ENABLED(CONFIG_ARM64_BTI);
+ case ARM64_HAS_TLB_RANGE:
+ return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
+ case ARM64_UNMAP_KERNEL_AT_EL0:
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
+ case ARM64_WORKAROUND_843419:
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419);
+ case ARM64_WORKAROUND_1742098:
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_1742098);
+ case ARM64_WORKAROUND_2645198:
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198);
+ case ARM64_WORKAROUND_2658417:
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_2658417);
+ case ARM64_WORKAROUND_CAVIUM_23154:
+ return IS_ENABLED(CONFIG_CAVIUM_ERRATUM_23154);
+ case ARM64_WORKAROUND_NVIDIA_CARMEL_CNP:
+ return IS_ENABLED(CONFIG_NVIDIA_CARMEL_CNP_ERRATUM);
+ case ARM64_WORKAROUND_REPEAT_TLBI:
+ return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
+ }
+
+ return true;
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 5bba39376..f6d416fe4 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -23,6 +23,7 @@
#include <linux/bug.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
+#include <linux/cpumask.h>
/*
* CPU feature register tracking
@@ -380,6 +381,7 @@ struct arm64_cpu_capabilities {
* method is robust against being called multiple times.
*/
const struct arm64_cpu_capabilities *match_list;
+ const struct cpumask *cpus;
};
static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
@@ -438,6 +440,11 @@ unsigned long cpu_get_elf_hwcap2(void);
#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
+static __always_inline bool boot_capabilities_finalized(void)
+{
+ return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
+}
+
static __always_inline bool system_capabilities_finalized(void)
{
return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
@@ -450,6 +457,8 @@ static __always_inline bool system_capabilities_finalized(void)
*/
static __always_inline bool cpus_have_cap(unsigned int num)
{
+ if (__builtin_constant_p(num) && !cpucap_is_possible(num))
+ return false;
if (num >= ARM64_NCAPS)
return false;
return arch_test_bit(num, system_cpucaps);
@@ -458,55 +467,37 @@ static __always_inline bool cpus_have_cap(unsigned int num)
/*
* Test for a capability without a runtime check.
*
- * Before capabilities are finalized, this returns false.
- * After capabilities are finalized, this is patched to avoid a runtime check.
+ * Before boot capabilities are finalized, this will BUG().
+ * After boot capabilities are finalized, this is patched to avoid a runtime
+ * check.
*
* @num must be a compile-time constant.
*/
-static __always_inline bool __cpus_have_const_cap(int num)
+static __always_inline bool cpus_have_final_boot_cap(int num)
{
- if (num >= ARM64_NCAPS)
- return false;
- return alternative_has_cap_unlikely(num);
+ if (boot_capabilities_finalized())
+ return alternative_has_cap_unlikely(num);
+ else
+ BUG();
}
/*
* Test for a capability without a runtime check.
*
- * Before capabilities are finalized, this will BUG().
- * After capabilities are finalized, this is patched to avoid a runtime check.
+ * Before system capabilities are finalized, this will BUG().
+ * After system capabilities are finalized, this is patched to avoid a runtime
+ * check.
*
* @num must be a compile-time constant.
*/
static __always_inline bool cpus_have_final_cap(int num)
{
if (system_capabilities_finalized())
- return __cpus_have_const_cap(num);
+ return alternative_has_cap_unlikely(num);
else
BUG();
}
-/*
- * Test for a capability, possibly with a runtime check for non-hyp code.
- *
- * For hyp code, this behaves the same as cpus_have_final_cap().
- *
- * For non-hyp code:
- * Before capabilities are finalized, this behaves as cpus_have_cap().
- * After capabilities are finalized, this is patched to avoid a runtime check.
- *
- * @num must be a compile-time constant.
- */
-static __always_inline bool cpus_have_const_cap(int num)
-{
- if (is_hyp_code())
- return cpus_have_final_cap(num);
- else if (system_capabilities_finalized())
- return __cpus_have_const_cap(num);
- else
- return cpus_have_cap(num);
-}
-
static inline int __attribute_const__
cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
{
@@ -626,7 +617,9 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
}
-void __init setup_cpu_features(void);
+void __init setup_system_features(void);
+void __init setup_user_features(void);
+
void check_local_cpu_capabilities(void);
u64 read_sanitised_ftr_reg(u32 id);
@@ -735,13 +728,12 @@ static inline bool system_supports_mixed_endian(void)
static __always_inline bool system_supports_fpsimd(void)
{
- return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
+ return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
}
static inline bool system_uses_hw_pan(void)
{
- return IS_ENABLED(CONFIG_ARM64_PAN) &&
- cpus_have_const_cap(ARM64_HAS_PAN);
+ return alternative_has_cap_unlikely(ARM64_HAS_PAN);
}
static inline bool system_uses_ttbr0_pan(void)
@@ -752,26 +744,22 @@ static inline bool system_uses_ttbr0_pan(void)
static __always_inline bool system_supports_sve(void)
{
- return IS_ENABLED(CONFIG_ARM64_SVE) &&
- cpus_have_const_cap(ARM64_SVE);
+ return alternative_has_cap_unlikely(ARM64_SVE);
}
static __always_inline bool system_supports_sme(void)
{
- return IS_ENABLED(CONFIG_ARM64_SME) &&
- cpus_have_const_cap(ARM64_SME);
+ return alternative_has_cap_unlikely(ARM64_SME);
}
static __always_inline bool system_supports_sme2(void)
{
- return IS_ENABLED(CONFIG_ARM64_SME) &&
- cpus_have_const_cap(ARM64_SME2);
+ return alternative_has_cap_unlikely(ARM64_SME2);
}
static __always_inline bool system_supports_fa64(void)
{
- return IS_ENABLED(CONFIG_ARM64_SME) &&
- cpus_have_const_cap(ARM64_SME_FA64);
+ return alternative_has_cap_unlikely(ARM64_SME_FA64);
}
static __always_inline bool system_supports_tpidr2(void)
@@ -781,20 +769,17 @@ static __always_inline bool system_supports_tpidr2(void)
static __always_inline bool system_supports_cnp(void)
{
- return IS_ENABLED(CONFIG_ARM64_CNP) &&
- cpus_have_const_cap(ARM64_HAS_CNP);
+ return alternative_has_cap_unlikely(ARM64_HAS_CNP);
}
static inline bool system_supports_address_auth(void)
{
- return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
- cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
+ return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
}
static inline bool system_supports_generic_auth(void)
{
- return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
- cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
+ return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
}
static inline bool system_has_full_ptr_auth(void)
@@ -804,14 +789,12 @@ static inline bool system_has_full_ptr_auth(void)
static __always_inline bool system_uses_irq_prio_masking(void)
{
- return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
- cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
+ return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
}
static inline bool system_supports_mte(void)
{
- return IS_ENABLED(CONFIG_ARM64_MTE) &&
- cpus_have_const_cap(ARM64_MTE);
+ return alternative_has_cap_unlikely(ARM64_MTE);
}
static inline bool system_has_prio_mask_debugging(void)
@@ -822,13 +805,18 @@ static inline bool system_has_prio_mask_debugging(void)
static inline bool system_supports_bti(void)
{
- return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
+ return cpus_have_final_cap(ARM64_BTI);
+}
+
+static inline bool system_supports_bti_kernel(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
+ cpus_have_final_boot_cap(ARM64_BTI);
}
static inline bool system_supports_tlb_range(void)
{
- return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
- cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
+ return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
}
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7c7493cb5..52f076afe 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -61,6 +61,7 @@
#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61
#define ARM_CPU_IMP_AMPERE 0xC0
+#define ARM_CPU_IMP_MICROSOFT 0x6D
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -135,6 +136,8 @@
#define AMPERE_CPU_PART_AMPERE1 0xAC3
+#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -193,6 +196,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
+#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
diff --git a/arch/arm64/include/asm/crash_core.h b/arch/arm64/include/asm/crash_core.h
new file mode 100644
index 000000000..9f5c8d339
--- /dev/null
+++ b/arch/arm64/include/asm/crash_core.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ARM64_CRASH_CORE_H
+#define _ARM64_CRASH_CORE_H
+
+/* Current arm64 boot protocol requires 2MB alignment */
+#define CRASH_ALIGN SZ_2M
+
+#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
+#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+#endif
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 8df46f186..7780d343e 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -32,6 +32,32 @@
#define VFP_STATE_SIZE ((32 * 8) + 4)
#endif
+static inline unsigned long cpacr_save_enable_kernel_sve(void)
+{
+ unsigned long old = read_sysreg(cpacr_el1);
+ unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN;
+
+ write_sysreg(old | set, cpacr_el1);
+ isb();
+ return old;
+}
+
+static inline unsigned long cpacr_save_enable_kernel_sme(void)
+{
+ unsigned long old = read_sysreg(cpacr_el1);
+ unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_SMEN_EL1EN;
+
+ write_sysreg(old | set, cpacr_el1);
+ isb();
+ return old;
+}
+
+static inline void cpacr_restore(unsigned long cpacr)
+{
+ write_sysreg(cpacr, cpacr_el1);
+ isb();
+}
+
/*
* When we defined the maximum SVE vector length we defined the ABI so
* that the maximum vector length included all the reserved for future
@@ -123,12 +149,12 @@ extern void sme_save_state(void *state, int zt);
extern void sme_load_state(void const *state, int zt);
struct arm64_cpu_capabilities;
-extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
-extern u64 read_zcr_features(void);
extern u64 read_smcr_features(void);
/*
@@ -360,6 +386,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
extern unsigned int sme_get_vl(void);
extern int sme_set_current_vl(unsigned long arg);
extern int sme_get_current_vl(void);
+extern void sme_suspend_exit(void);
/*
* Return how many bytes of memory are required to store the full SME
@@ -395,6 +422,7 @@ static inline int sme_max_vl(void) { return 0; }
static inline int sme_max_virtualisable_vl(void) { return 0; }
static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
static inline int sme_get_current_vl(void) { return -EINVAL; }
+static inline void sme_suspend_exit(void) { }
static inline size_t sme_state_size(struct task_struct const *task)
{
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 521267478..cd71e09ea 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -139,6 +139,9 @@
#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
#define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS)
#define KERNEL_HWCAP_HBC __khwcap2_feature(HBC)
+#define KERNEL_HWCAP_SVE_B16B16 __khwcap2_feature(SVE_B16B16)
+#define KERNEL_HWCAP_LRCPC3 __khwcap2_feature(LRCPC3)
+#define KERNEL_HWCAP_LSE128 __khwcap2_feature(LSE128)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index fac08e18b..50ce8b697 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -6,6 +6,9 @@
#include <asm-generic/irq.h>
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+
struct pt_regs;
int set_handle_irq(void (*handle_irq)(struct pt_regs *));
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index 81bbfa3a0..a1020285e 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -2,8 +2,6 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
-extern void arch_irq_work_raise(void);
-
static inline bool arch_irq_work_has_interrupt(void)
{
return true;
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 1f31ec146..0a7186a93 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -21,12 +21,6 @@
* exceptions should be unmasked.
*/
-static __always_inline bool __irqflags_uses_pmr(void)
-{
- return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
- alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
-}
-
static __always_inline void __daif_local_irq_enable(void)
{
barrier();
@@ -49,7 +43,7 @@ static __always_inline void __pmr_local_irq_enable(void)
static inline void arch_local_irq_enable(void)
{
- if (__irqflags_uses_pmr()) {
+ if (system_uses_irq_prio_masking()) {
__pmr_local_irq_enable();
} else {
__daif_local_irq_enable();
@@ -77,7 +71,7 @@ static __always_inline void __pmr_local_irq_disable(void)
static inline void arch_local_irq_disable(void)
{
- if (__irqflags_uses_pmr()) {
+ if (system_uses_irq_prio_masking()) {
__pmr_local_irq_disable();
} else {
__daif_local_irq_disable();
@@ -99,7 +93,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void)
*/
static inline unsigned long arch_local_save_flags(void)
{
- if (__irqflags_uses_pmr()) {
+ if (system_uses_irq_prio_masking()) {
return __pmr_local_save_flags();
} else {
return __daif_local_save_flags();
@@ -118,7 +112,7 @@ static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- if (__irqflags_uses_pmr()) {
+ if (system_uses_irq_prio_masking()) {
return __pmr_irqs_disabled_flags(flags);
} else {
return __daif_irqs_disabled_flags(flags);
@@ -137,7 +131,7 @@ static __always_inline bool __pmr_irqs_disabled(void)
static inline bool arch_irqs_disabled(void)
{
- if (__irqflags_uses_pmr()) {
+ if (system_uses_irq_prio_masking()) {
return __pmr_irqs_disabled();
} else {
return __daif_irqs_disabled();
@@ -169,7 +163,7 @@ static __always_inline unsigned long __pmr_local_irq_save(void)
static inline unsigned long arch_local_irq_save(void)
{
- if (__irqflags_uses_pmr()) {
+ if (system_uses_irq_prio_masking()) {
return __pmr_local_irq_save();
} else {
return __daif_local_irq_save();
@@ -196,7 +190,7 @@ static __always_inline void __pmr_local_irq_restore(unsigned long flags)
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
- if (__irqflags_uses_pmr()) {
+ if (system_uses_irq_prio_masking()) {
__pmr_local_irq_restore(flags);
} else {
__daif_local_irq_restore(flags);
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 48ddc0f45..6aafbb789 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -18,7 +18,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: nop \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
@@ -35,7 +35,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: b %l[l_yes] \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 05cd82eec..be7a3680d 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -37,8 +37,6 @@ struct kprobe_ctlblk {
void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
-int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
void __kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 1095c6647..b85f46a73 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -102,7 +102,9 @@
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
-#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
+#define HCRX_GUEST_FLAGS \
+ (HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \
+ (cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0))
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En)
/* TCR_EL2 Registers bits */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3d6725ff0..78a550537 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -54,6 +54,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
+static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature)
+{
+ return test_bit(feature, vcpu->kvm->arch.vcpu_features);
+}
+
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
@@ -62,7 +67,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
#else
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
- return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
+ return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
}
#endif
@@ -71,14 +76,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
if (has_vhe() || has_hvhe())
vcpu->arch.hcr_el2 |= HCR_E2H;
- if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
/* route synchronous external abort exceptions to EL2 */
vcpu->arch.hcr_el2 |= HCR_TEA;
/* trap error record accesses */
vcpu->arch.hcr_el2 |= HCR_TERR;
}
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
vcpu->arch.hcr_el2 |= HCR_FWB;
} else {
/*
@@ -465,7 +470,7 @@ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
+ return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
@@ -565,12 +570,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \
} while (0)
-
-static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
-{
- return test_bit(feature, vcpu->arch.features);
-}
-
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index af06ccb7e..824f29f04 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -78,7 +78,7 @@ extern unsigned int __ro_after_init kvm_sve_max_vl;
int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void);
-int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
struct kvm_hyp_memcache {
@@ -158,6 +158,16 @@ struct kvm_s2_mmu {
phys_addr_t pgd_phys;
struct kvm_pgtable *pgt;
+ /*
+ * VTCR value used on the host. For a non-NV guest (or a NV
+ * guest that runs in a context where its own S2 doesn't
+ * apply), its T0SZ value reflects that of the IPA size.
+ *
+ * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
+ * the guest.
+ */
+ u64 vtcr;
+
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
@@ -202,12 +212,34 @@ struct kvm_protected_vm {
struct kvm_hyp_memcache teardown_mc;
};
+struct kvm_mpidr_data {
+ u64 mpidr_mask;
+ DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
+};
+
+static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
+{
+ unsigned long mask = data->mpidr_mask;
+ u64 aff = mpidr & MPIDR_HWID_BITMASK;
+ int nbits, bit, bit_idx = 0;
+ u16 index = 0;
+
+ /*
+ * If this looks like RISC-V's BEXT or x86's PEXT
+ * instructions, it isn't by accident.
+ */
+ nbits = fls(mask);
+ for_each_set_bit(bit, &mask, nbits) {
+ index |= (aff & BIT(bit)) >> (bit - bit_idx);
+ bit_idx++;
+ }
+
+ return index;
+}
+
struct kvm_arch {
struct kvm_s2_mmu mmu;
- /* VTCR_EL2 value for this VM */
- u64 vtcr;
-
/* Interrupt controller */
struct vgic_dist vgic;
@@ -239,15 +271,16 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
/* Timer PPIs made immutable */
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
- /* SMCCC filter initialized for the VM */
-#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 7
/* Initial ID reg values loaded */
-#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 8
+#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
unsigned long flags;
/* VM-wide vCPU feature set */
DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
+ /* MPIDR to vcpu index mapping, optional */
+ struct kvm_mpidr_data *mpidr_data;
+
/*
* VM-wide PMU filter, implemented as a bitmap and big enough for
* up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
@@ -257,6 +290,9 @@ struct kvm_arch {
cpumask_var_t supported_cpus;
+ /* PMCR_EL0.N value for the guest */
+ u8 pmcr_n;
+
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
struct maple_tree smccc_filter;
@@ -574,9 +610,6 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
- /* feature flags */
- DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
-
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;
@@ -1025,7 +1058,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void);
-void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
@@ -1052,7 +1085,7 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
static inline bool kvm_system_needs_idmapped_vectors(void)
{
- return cpus_have_const_cap(ARM64_SPECTRE_V3A);
+ return cpus_have_final_cap(ARM64_SPECTRE_V3A);
}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@@ -1078,6 +1111,8 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
struct kvm_arm_copy_mte_tags *copy_tags);
int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
struct kvm_arm_counter_offset *offset);
+int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
+ struct reg_mask_range *range);
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
@@ -1109,8 +1144,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
}
#endif
-void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
-void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
+void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
+void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
int __init kvm_set_ipa_limit(void);
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 66efd67ea..145ce73fc 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -93,6 +93,8 @@ void __timer_disable_traps(struct kvm_vcpu *vcpu);
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
#else
+void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu);
+void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu);
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
@@ -111,11 +113,6 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
-#ifndef __KVM_NVHE_HYPERVISOR__
-void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
-void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
-#endif
-
u64 __guest_enter(struct kvm_vcpu *vcpu);
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 96a80e8f6..49e0d4b36 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -150,9 +150,9 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
*/
#define KVM_PHYS_SHIFT (40)
-#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
-#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
-#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
+#define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
+#define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu))
+#define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
@@ -218,22 +218,47 @@ static inline void __clean_dcache_guest_page(void *va, size_t size)
* faulting in pages. Furthermore, FWB implies IDC, so cleaning to
* PoU is not required either in this case.
*/
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
return;
kvm_flush_dcache_to_poc(va, size);
}
+static inline size_t __invalidate_icache_max_range(void)
+{
+ u8 iminline;
+ u64 ctr;
+
+ asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
+ "movk %0, #0, lsl #16\n"
+ "movk %0, #0, lsl #32\n"
+ "movk %0, #0, lsl #48\n",
+ ARM64_ALWAYS_SYSTEM,
+ kvm_compute_final_ctr_el0)
+ : "=r" (ctr));
+
+ iminline = SYS_FIELD_GET(CTR_EL0, IminLine, ctr) + 2;
+ return MAX_DVM_OPS << iminline;
+}
+
static inline void __invalidate_icache_guest_page(void *va, size_t size)
{
- if (icache_is_aliasing()) {
- /* any kind of VIPT cache */
+ /*
+ * VPIPT I-cache maintenance must be done from EL2. See comment in the
+ * nVHE flavor of __kvm_tlb_flush_vmid_ipa().
+ */
+ if (icache_is_vpipt() && read_sysreg(CurrentEL) != CurrentEL_EL2)
+ return;
+
+ /*
+ * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the
+ * invalidation range exceeds our arbitrary limit on invadations by
+ * cache line.
+ */
+ if (icache_is_aliasing() || size > __invalidate_icache_max_range())
icache_inval_all_pou();
- } else if (read_sysreg(CurrentEL) != CurrentEL_EL1 ||
- !icache_is_vpipt()) {
- /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
+ else
icache_inval_pou((unsigned long)va, (unsigned long)va + size);
- }
}
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
@@ -299,7 +324,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
struct kvm_arch *arch)
{
- write_sysreg(arch->vtcr, vtcr_el2);
+ write_sysreg(mmu->vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
/*
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index fa23cc9c2..6cec8e9c6 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -2,13 +2,14 @@
#ifndef __ARM64_KVM_NESTED_H
#define __ARM64_KVM_NESTED_H
+#include <asm/kvm_emulate.h>
#include <linux/kvm_host.h>
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
{
return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
- test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features));
+ vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
}
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index cbbcdc35c..3129a5819 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -16,14 +16,9 @@
#include <asm/atomic_lse.h>
#include <asm/cpucaps.h>
-static __always_inline bool system_uses_lse_atomics(void)
-{
- return alternative_has_cap_likely(ARM64_HAS_LSE_ATOMICS);
-}
-
#define __lse_ll_sc_body(op, ...) \
({ \
- system_uses_lse_atomics() ? \
+ alternative_has_cap_likely(ARM64_HAS_LSE_ATOMICS) ? \
__lse_##op(__VA_ARGS__) : \
__ll_sc_##op(__VA_ARGS__); \
})
@@ -34,8 +29,6 @@ static __always_inline bool system_uses_lse_atomics(void)
#else /* CONFIG_ARM64_LSE_ATOMICS */
-static inline bool system_uses_lse_atomics(void) { return false; }
-
#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__)
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 94b68850c..2fcf51231 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -57,7 +57,7 @@ typedef struct {
static inline bool arm64_kernel_unmapped_at_el0(void)
{
- return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+ return alternative_has_cap_unlikely(ARM64_UNMAP_KERNEL_AT_EL0);
}
extern void arm64_memblock_init(void);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a6fb32542..9ce420050 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -152,7 +152,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
-static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
+static inline void __cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap, bool cnp)
{
typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
@@ -162,17 +162,8 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
- if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
- /*
- * cpu_replace_ttbr1() is used when there's a boot CPU
- * up (i.e. cpufeature framework is not up yet) and
- * latter only when we enable CNP via cpufeature's
- * enable() callback.
- * Also we rely on the system_cpucaps bit being set before
- * calling the enable() function.
- */
+ if (cnp)
ttbr1 |= TTBR_CNP_BIT;
- }
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
@@ -189,6 +180,21 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
cpu_uninstall_idmap();
}
+static inline void cpu_enable_swapper_cnp(void)
+{
+ __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir, true);
+}
+
+static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
+{
+ /*
+ * Only for early TTBR1 replacement before cpucaps are finalized and
+ * before we've decided whether to use CNP.
+ */
+ WARN_ON(system_capabilities_finalized());
+ __cpu_replace_ttbr1(pgdp, idmap, false);
+}
+
/*
* It would be nice to return ASIDs back to the allocator, but unfortunately
* that introduces a race with a generation rollover where we could erroneously
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index bfa6638b4..79550b22b 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -44,8 +44,7 @@ struct plt_entry {
static inline bool is_forbidden_offset_for_adrp(void *place)
{
- return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
- cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
+ return cpus_have_final_cap(ARM64_WORKAROUND_843419) &&
((u64)place & 0xfff) >= 0xff8;
}
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 4cedbaa16..91fbd5c8a 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
}
void mte_zero_clear_page_tags(void *addr);
-void mte_sync_tags(pte_t pte);
+void mte_sync_tags(pte_t pte, unsigned int nr_pages);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
@@ -122,7 +122,7 @@ static inline bool try_page_mte_tagging(struct page *page)
static inline void mte_zero_clear_page_tags(void *addr)
{
}
-static inline void mte_sync_tags(pte_t pte)
+static inline void mte_sync_tags(pte_t pte, unsigned int nr_pages)
{
}
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index eed814b00..e9624f632 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -75,11 +75,7 @@ extern bool arm64_use_ng_mappings;
* If we have userspace only BTI we don't want to mark kernel pages
* guarded even if the system does support BTI.
*/
-#ifdef CONFIG_ARM64_BTI_KERNEL
-#define PTE_MAYBE_GP (system_supports_bti() ? PTE_GP : 0)
-#else
-#define PTE_MAYBE_GP 0
-#endif
+#define PTE_MAYBE_GP (system_supports_bti_kernel() ? PTE_GP : 0)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 07bdf5dd8..79ce70fbb 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -325,8 +325,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
__func__, pte_val(old_pte), pte_val(pte));
}
-static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
{
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte);
@@ -339,24 +338,22 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
if (system_supports_mte() && pte_access_permitted(pte, false) &&
!pte_special(pte) && pte_tagged(pte))
- mte_sync_tags(pte);
-
- __check_safe_pte_update(mm, ptep, pte);
-
- set_pte(ptep, pte);
+ mte_sync_tags(pte, nr_pages);
}
-static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned int nr)
+static inline void set_ptes(struct mm_struct *mm,
+ unsigned long __always_unused addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
page_table_check_ptes_set(mm, ptep, pte, nr);
+ __sync_cache_and_tags(pte, nr);
for (;;) {
- __set_pte_at(mm, addr, ptep, pte);
+ __check_safe_pte_update(mm, ptep, pte);
+ set_pte(ptep, pte);
if (--nr == 0)
break;
ptep++;
- addr += PAGE_SIZE;
pte_val(pte) += PAGE_SIZE;
}
}
@@ -531,18 +528,29 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
+static inline void __set_pte_at(struct mm_struct *mm,
+ unsigned long __always_unused addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ __sync_cache_and_tags(pte, nr);
+ __check_safe_pte_update(mm, ptep, pte);
+ set_pte(ptep, pte);
+}
+
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(mm, pmdp, pmd);
- return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+ return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
+ PMD_SIZE >> PAGE_SHIFT);
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
page_table_check_pud_set(mm, pudp, pud);
- return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+ return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
+ PUD_SIZE >> PAGE_SHIFT);
}
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 9b31e6d0d..efb13112b 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -89,9 +89,9 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
-extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern void arch_send_wakeup_ipi(unsigned int cpu);
#else
-static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+static inline void arch_send_wakeup_ipi(unsigned int cpu)
{
BUILD_BUG();
}
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
index 9cc501450..06c357d83 100644
--- a/arch/arm64/include/asm/spectre.h
+++ b/arch/arm64/include/asm/spectre.h
@@ -73,7 +73,7 @@ static __always_inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;
- if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
+ if (!alternative_has_cap_unlikely(ARM64_SPECTRE_V2))
return;
d = this_cpu_ptr(&bp_hardening_data);
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index c8dca8ae3..23d27623e 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -21,13 +21,13 @@
* (IPA_SHIFT - 4).
*/
#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
-#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
+#define kvm_stage2_levels(mmu) VTCR_EL2_LVLS((mmu)->vtcr)
/*
* kvm_mmmu_cache_min_pages() is the number of pages required to install
* a stage-2 translation. We pre-allocate the entry level page table at
* the VM creation.
*/
-#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
+#define kvm_mmu_cache_min_pages(mmu) (kvm_stage2_levels(mmu) - 1)
#endif /* __ARM64_S2_PGTABLE_H_ */
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
index 7a0e7b59b..abb57bc54 100644
--- a/arch/arm64/include/asm/syscall_wrapper.h
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -51,7 +51,6 @@
ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
{ \
return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 38296579a..5e65f51c1 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -270,6 +270,8 @@
/* ETM */
#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4)
+#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0)
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -484,6 +486,7 @@
#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
#define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1)
+#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3)
#define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0)
#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1)
#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2)
@@ -497,10 +500,15 @@
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
+#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
#define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0)
+#define SYS_SPSR_irq sys_reg(3, 4, 4, 3, 0)
+#define SYS_SPSR_abt sys_reg(3, 4, 4, 3, 1)
+#define SYS_SPSR_und sys_reg(3, 4, 4, 3, 2)
+#define SYS_SPSR_fiq sys_reg(3, 4, 4, 3, 3)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
#define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0)
#define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1)
@@ -514,6 +522,18 @@
#define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0)
#define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0)
+#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0)
+#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1)
+#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0)
+#define __SYS__MPAMVPMx_EL2(x) sys_reg(3, 4, 10, 6, x)
+#define SYS_MPAMVPM0_EL2 __SYS__MPAMVPMx_EL2(0)
+#define SYS_MPAMVPM1_EL2 __SYS__MPAMVPMx_EL2(1)
+#define SYS_MPAMVPM2_EL2 __SYS__MPAMVPMx_EL2(2)
+#define SYS_MPAMVPM3_EL2 __SYS__MPAMVPMx_EL2(3)
+#define SYS_MPAMVPM4_EL2 __SYS__MPAMVPMx_EL2(4)
+#define SYS_MPAMVPM5_EL2 __SYS__MPAMVPMx_EL2(5)
+#define SYS_MPAMVPM6_EL2 __SYS__MPAMVPMx_EL2(6)
+#define SYS_MPAMVPM7_EL2 __SYS__MPAMVPMx_EL2(7)
#define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0)
#define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1)
@@ -562,24 +582,49 @@
#define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1)
#define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2)
+#define SYS_SCXTNUM_EL2 sys_reg(3, 4, 13, 0, 7)
+
+#define __AMEV_op2(m) (m & 0x7)
+#define __AMEV_CRm(n, m) (n | ((m & 0x8) >> 3))
+#define __SYS__AMEVCNTVOFF0n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0x8, m), __AMEV_op2(m))
+#define SYS_AMEVCNTVOFF0n_EL2(m) __SYS__AMEVCNTVOFF0n_EL2(m)
+#define __SYS__AMEVCNTVOFF1n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0xA, m), __AMEV_op2(m))
+#define SYS_AMEVCNTVOFF1n_EL2(m) __SYS__AMEVCNTVOFF1n_EL2(m)
#define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3)
#define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0)
+#define SYS_CNTHP_TVAL_EL2 sys_reg(3, 4, 14, 2, 0)
+#define SYS_CNTHP_CTL_EL2 sys_reg(3, 4, 14, 2, 1)
+#define SYS_CNTHP_CVAL_EL2 sys_reg(3, 4, 14, 2, 2)
+#define SYS_CNTHV_TVAL_EL2 sys_reg(3, 4, 14, 3, 0)
+#define SYS_CNTHV_CTL_EL2 sys_reg(3, 4, 14, 3, 1)
+#define SYS_CNTHV_CVAL_EL2 sys_reg(3, 4, 14, 3, 2)
/* VHE encodings for architectural EL0/1 system registers */
+#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0)
#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
+#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
+#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3)
+#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1)
+#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6)
#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
+#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3)
#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
+#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
+#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
+#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
+#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7)
#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 2c29239d0..846c56368 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -96,7 +96,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
- tlb_remove_ptdesc(tlb, virt_to_ptdesc(pudp));
+ struct ptdesc *ptdesc = virt_to_ptdesc(pudp);
+
+ pagetable_pud_dtor(ptdesc);
+ tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index b149cf9f9..bb2c2833a 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -105,7 +105,7 @@ static inline unsigned long get_trans_granule(void)
#define __tlbi_level(op, addr, level) do { \
u64 arg = addr; \
\
- if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \
+ if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
level) { \
u64 ttl = level & 3; \
ttl |= get_trans_granule() << 2; \
@@ -284,16 +284,15 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
-#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
/*
* TLB flush deferral is not required on systems which are affected by
* ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
* will have two consecutive TLBI instructions with a dsb(ish) in between
* defeating the purpose (i.e save overall 'dsb ish' cost).
*/
- if (unlikely(cpus_have_const_cap(ARM64_WORKAROUND_REPEAT_TLBI)))
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
return false;
-#endif
+
return true;
}
@@ -333,7 +332,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
*/
-#define MAX_TLBI_OPS PTRS_PER_PTE
+#define MAX_DVM_OPS PTRS_PER_PTE
/*
* __flush_tlb_range_op - Perform TLBI operation upon a range
@@ -413,12 +412,12 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
/*
* When not uses TLB range ops, we can handle up to
- * (MAX_TLBI_OPS - 1) pages;
+ * (MAX_DVM_OPS - 1) pages;
* When uses TLB range ops, we can handle up to
* (MAX_TLBI_RANGE_PAGES - 1) pages.
*/
if ((!system_supports_tlb_range() &&
- (end - start) >= (MAX_TLBI_OPS * stride)) ||
+ (end - start) >= (MAX_DVM_OPS * stride)) ||
pages >= MAX_TLBI_RANGE_PAGES) {
flush_tlb_mm(vma->vm_mm);
return;
@@ -451,7 +450,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
{
unsigned long addr;
- if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
+ if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
flush_tlb_all();
return;
}
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d66dfb3a7..eefe766d6 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -9,10 +9,9 @@
#include <linux/list.h>
#include <asm/esr.h>
+#include <asm/ptrace.h>
#include <asm/sections.h>
-struct pt_regs;
-
#ifdef CONFIG_ARMV8_DEPRECATED
bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn);
#else
@@ -101,4 +100,55 @@ static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr)
bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr);
void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr);
+
+static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned long esr)
+{
+ bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION;
+ bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A;
+ int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
+ int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
+ int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
+ unsigned long dst, src, size;
+
+ dst = regs->regs[dstreg];
+ src = regs->regs[srcreg];
+ size = regs->regs[sizereg];
+
+ /*
+ * Put the registers back in the original format suitable for a
+ * prologue instruction, using the generic return routine from the
+ * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH.
+ */
+ if (esr & ESR_ELx_MOPS_ISS_MEM_INST) {
+ /* SET* instruction */
+ if (option_a ^ wrong_option) {
+ /* Format is from Option A; forward set */
+ regs->regs[dstreg] = dst + size;
+ regs->regs[sizereg] = -size;
+ }
+ } else {
+ /* CPY* instruction */
+ if (!(option_a ^ wrong_option)) {
+ /* Format is from Option B */
+ if (regs->pstate & PSR_N_BIT) {
+ /* Backward copy */
+ regs->regs[dstreg] = dst - size;
+ regs->regs[srcreg] = src - size;
+ }
+ } else {
+ /* Format is from Option A */
+ if (size & BIT(63)) {
+ /* Forward copy */
+ regs->regs[dstreg] = dst + size;
+ regs->regs[srcreg] = src + size;
+ regs->regs[sizereg] = -size;
+ }
+ }
+ }
+
+ if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE)
+ regs->pc -= 8;
+ else
+ regs->pc -= 4;
+}
#endif
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index bd77253b6..531effca5 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -39,7 +39,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 453
+#define __NR_compat_syscalls 457
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 78b68311e..9f7c1bf99 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -508,8 +508,8 @@ __SYSCALL(__NR_io_submit, compat_sys_io_submit)
__SYSCALL(__NR_io_cancel, sys_io_cancel)
#define __NR_exit_group 248
__SYSCALL(__NR_exit_group, sys_exit_group)
-#define __NR_lookup_dcookie 249
-__SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie)
+ /* 249 was lookup_dcookie */
+__SYSCALL(249, sys_ni_syscall)
#define __NR_epoll_create 250
__SYSCALL(__NR_epoll_create, sys_epoll_create)
#define __NR_epoll_ctl 251
@@ -911,6 +911,14 @@ __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
__SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+#define __NR_map_shadow_stack 453
+__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
+#define __NR_futex_wake 454
+__SYSCALL(__NR_futex_wake, sys_futex_wake)
+#define __NR_futex_wait 455
+__SYSCALL(__NR_futex_wait, sys_futex_wait)
+#define __NR_futex_requeue 456
+__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h
index bc9a2145f..b815d8f2c 100644
--- a/arch/arm64/include/asm/vectors.h
+++ b/arch/arm64/include/asm/vectors.h
@@ -62,7 +62,7 @@ DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
static inline const char *
arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
{
- if (arm64_kernel_unmapped_at_el0())
+ if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
return (char *)(TRAMP_VALIAS + SZ_2K * slot);
WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 53026f45a..5023599fa 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -104,5 +104,8 @@
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
#define HWCAP2_HBC (1UL << 44)
+#define HWCAP2_SVE_B16B16 (1UL << 45)
+#define HWCAP2_LRCPC3 (1UL << 46)
+#define HWCAP2_LSE128 (1UL << 47)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index f7ddd73a8..89d2fc872 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -505,6 +505,38 @@ struct kvm_smccc_filter {
#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
+/*
+ * Get feature ID registers userspace writable mask.
+ *
+ * From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model
+ * Feature Register 2"):
+ *
+ * "The Feature ID space is defined as the System register space in
+ * AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7},
+ * op2=={0-7}."
+ *
+ * This covers all currently known R/O registers that indicate
+ * anything useful feature wise, including the ID registers.
+ *
+ * If we ever need to introduce a new range, it will be described as
+ * such in the range field.
+ */
+#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \
+ ({ \
+ __u64 __op1 = (op1) & 3; \
+ __op1 -= (__op1 == 3); \
+ (__op1 << 6 | ((crm) & 7) << 3 | (op2)); \
+ })
+
+#define KVM_ARM_FEATURE_ID_RANGE 0
+#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8)
+
+struct reg_mask_range {
+ __u64 addr; /* Pointer to mask array */
+ __u32 range; /* Requested range */
+ __u32 reserved[13];
+};
+
#endif
#endif /* __ARM_KVM_H__ */