diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /arch/arm64/kvm/hyp | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/switch.h | 17 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 3 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/ffa.c | 10 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/mem_protect.c | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/pkvm.c | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/switch.c | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/pgtable.c | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vhe/switch.c | 34 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 11 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vhe/tlb.c | 18 |
10 files changed, 78 insertions, 37 deletions
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9cfe6bd1db..f99d8af0b9 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -30,6 +30,7 @@ #include <asm/fpsimd.h> #include <asm/debug-monitors.h> #include <asm/processor.h> +#include <asm/traps.h> struct kvm_exception_table_entry { int insn, fixup; @@ -265,6 +266,22 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); } +static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); + arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); + write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); + + /* + * Finish potential single step before executing the prologue + * instruction. + */ + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; + write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); + + return true; +} + static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) { sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 37440e1dda..e91922daa8 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -197,7 +197,8 @@ #define PVM_ID_AA64ISAR2_ALLOW (\ ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) \ + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \ ) u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 6e4dba9ead..320f2eaa14 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -423,6 +423,7 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, DECLARE_REG(u32, fraglen, ctxt, 2); DECLARE_REG(u64, addr_mbz, ctxt, 3); DECLARE_REG(u32, npages_mbz, ctxt, 4); + struct ffa_mem_region_attributes *ep_mem_access; struct ffa_composite_mem_region *reg; struct ffa_mem_region *buf; u32 offset, nr_ranges; @@ -452,7 +453,9 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, buf = hyp_buffers.tx; memcpy(buf, host_buffers.tx, fraglen); - offset = buf->ep_mem_access[0].composite_off; + ep_mem_access = (void *)buf + + ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0); + offset = ep_mem_access->composite_off; if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) { ret = FFA_RET_INVALID_PARAMETERS; goto out_unlock; @@ -504,6 +507,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res, DECLARE_REG(u32, handle_lo, ctxt, 1); DECLARE_REG(u32, handle_hi, ctxt, 2); DECLARE_REG(u32, flags, ctxt, 3); + struct ffa_mem_region_attributes *ep_mem_access; struct ffa_composite_mem_region *reg; u32 offset, len, fraglen, fragoff; struct ffa_mem_region *buf; @@ -528,7 +532,9 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res, len = res->a1; fraglen = res->a2; - offset = buf->ep_mem_access[0].composite_off; + ep_mem_access = (void *)buf + + ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0); + offset = ep_mem_access->composite_off; /* * We can trust the SPMD to get this right, but let's at least * check that we end up with something that doesn't look _completely_ diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 9d70344127..8d0a5834e8 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -129,8 +129,8 @@ static void prepare_host_vtcr(void) parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); - host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, - id_aa64mmfr1_el1_sys_val, phys_shift); + host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, + id_aa64mmfr1_el1_sys_val, phys_shift); } static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot); @@ -235,7 +235,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) unsigned long nr_pages; int ret; - nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT; + nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT; ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); if (ret) return ret; @@ -295,7 +295,7 @@ int __pkvm_prot_finalize(void) return -EPERM; params->vttbr = kvm_get_vttbr(mmu); - params->vtcr = host_mmu.arch.vtcr; + params->vtcr = mmu->vtcr; params->hcr_el2 |= HCR_VM; /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 8033ef353a..9d23a51d7f 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -303,7 +303,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, { hyp_vm->host_kvm = host_kvm; hyp_vm->kvm.created_vcpus = nr_vcpus; - hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr; + hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; } static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, @@ -483,7 +483,7 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, } vm_size = pkvm_get_hyp_vm_size(nr_vcpus); - pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr); + pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr); ret = -ENOMEM; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c353a06ee7..c50f8459e4 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -192,6 +192,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn pvm_exit_handlers[] = { @@ -203,6 +204,7 @@ static const exit_handler_fn pvm_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f155b8c9e9..1966fdee74 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -401,7 +401,7 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) if (device) return -EINVAL; - if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) + if (system_supports_bti_kernel()) attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP; } else { attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN; @@ -664,7 +664,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) static bool stage2_has_fwb(struct kvm_pgtable *pgt) { - if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) return false; return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); @@ -1314,7 +1314,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED); - if (!ret) + if (!ret || ret == -EAGAIN) kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level); return ret; } @@ -1511,7 +1511,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, kvm_pgtable_force_pte_cb_t force_pte_cb) { size_t pgd_sz; - u64 vtcr = mmu->arch->vtcr; + u64 vtcr = mmu->vtcr; u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 448b17080d..1581df6aec 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -137,12 +137,12 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) NOKPROBE_SYMBOL(__deactivate_traps); /* - * Disable IRQs in {activate,deactivate}_traps_vhe_{load,put}() to + * Disable IRQs in __vcpu_{load,put}_{activate,deactivate}_traps() to * prevent a race condition between context switching of PMUSERENR_EL0 * in __{activate,deactivate}_traps_common() and IPIs that attempts to * update PMUSERENR_EL0. See also kvm_set_pmuserenr(). */ -void activate_traps_vhe_load(struct kvm_vcpu *vcpu) +static void __vcpu_load_activate_traps(struct kvm_vcpu *vcpu) { unsigned long flags; @@ -151,7 +151,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu) local_irq_restore(flags); } -void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu) +static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu) { unsigned long flags; @@ -160,6 +160,19 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu) local_irq_restore(flags); } +void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu) +{ + __vcpu_load_switch_sysregs(vcpu); + __vcpu_load_activate_traps(vcpu); + __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); +} + +void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu) +{ + __vcpu_put_deactivate_traps(vcpu); + __vcpu_put_switch_sysregs(vcpu); +} + static const exit_handler_fn hyp_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = NULL, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, @@ -170,6 +183,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) @@ -214,17 +228,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_save_host_state_vhe(host_ctxt); /* - * ARM erratum 1165522 requires us to configure both stage 1 and - * stage 2 translation for the guest context before we clear - * HCR_EL2.TGE. - * - * We have already configured the guest's stage 1 translation in - * kvm_vcpu_load_sysregs_vhe above. We must now call - * __load_stage2 before __activate_traps, because - * __load_stage2 configures stage 2 translation, and - * __activate_traps clear HCR_EL2.TGE (among other things). + * Note that ARM erratum 1165522 requires us to configure both stage 1 + * and stage 2 translation for the guest context before we clear + * HCR_EL2.TGE. The stage 1 and stage 2 guest context has already been + * loaded on the CPU in kvm_vcpu_load_vhe(). */ - __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); __activate_traps(vcpu); __kvm_adjust_pc(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index b35a178e7e..8e1e0d5033 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -52,7 +52,7 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); /** - * kvm_vcpu_load_sysregs_vhe - Load guest system registers to the physical CPU + * __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU * * @vcpu: The VCPU pointer * @@ -62,7 +62,7 @@ NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); * and loading system register state early avoids having to load them on * every entry to the VM. */ -void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) +void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *host_ctxt; @@ -92,12 +92,10 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) __sysreg_restore_el1_state(guest_ctxt); vcpu_set_flag(vcpu, SYSREGS_ON_CPU); - - activate_traps_vhe_load(vcpu); } /** - * kvm_vcpu_put_sysregs_vhe - Restore host system registers to the physical CPU + * __vcpu_put_switch_syregs - Restore host system registers to the physical CPU * * @vcpu: The VCPU pointer * @@ -107,13 +105,12 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) * and deferring saving system register state until we're no longer running the * VCPU avoids having to save them on every exit from the VM. */ -void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu) +void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *host_ctxt; host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; - deactivate_traps_vhe_put(vcpu); __sysreg_save_el1_state(guest_ctxt); __sysreg_save_user_state(guest_ctxt); diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index 46bd43f61d..b636b4111d 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -11,18 +11,25 @@ #include <asm/tlbflush.h> struct tlb_inv_context { - unsigned long flags; - u64 tcr; - u64 sctlr; + struct kvm_s2_mmu *mmu; + unsigned long flags; + u64 tcr; + u64 sctlr; }; static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, struct tlb_inv_context *cxt) { + struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); u64 val; local_irq_save(cxt->flags); + if (vcpu && mmu != vcpu->arch.hw_mmu) + cxt->mmu = vcpu->arch.hw_mmu; + else + cxt->mmu = NULL; + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { /* * For CPUs that are affected by ARM errata 1165522 or 1530923, @@ -66,10 +73,13 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt) * We're done with the TLB operation, let's restore the host's * view of HCR_EL2. */ - write_sysreg(0, vttbr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); + /* ... and the stage-2 MMU context that we switched away from */ + if (cxt->mmu) + __load_stage2(cxt->mmu, cxt->mmu->arch); + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { /* Restore the registers to what they were */ write_sysreg_el1(cxt->tcr, SYS_TCR); |