diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:40:19 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:40:19 +0000 |
commit | 9f0fc191371843c4fc000a226b0a26b6c059aacd (patch) | |
tree | 35f8be3ef04506ac891ad001e8c41e535ae8d01d /arch/powerpc/kvm | |
parent | Releasing progress-linux version 6.6.15-2~progress7.99u1. (diff) | |
download | linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.tar.xz linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
22 files changed, 2318 insertions, 173 deletions
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 5319d889b1..4bd9d12308 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -87,8 +87,12 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ book3s_hv_ras.o \ book3s_hv_builtin.o \ book3s_hv_p9_perf.o \ + book3s_hv_nestedv2.o \ + guest-state-buffer.o \ $(kvm-book3s_64-builtin-tm-objs-y) \ $(kvm-book3s_64-builtin-xics-objs-y) + +obj-$(CONFIG_GUEST_STATE_BUFFER_TEST) += test-guest-state-buffer.o endif kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 686d8d9eda..6cd20ab9e9 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -565,7 +565,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->msr = kvmppc_get_msr(vcpu); regs->srr0 = kvmppc_get_srr0(vcpu); regs->srr1 = kvmppc_get_srr1(vcpu); - regs->pid = vcpu->arch.pid; + regs->pid = kvmppc_get_pid(vcpu); regs->sprg0 = kvmppc_get_sprg0(vcpu); regs->sprg1 = kvmppc_get_sprg1(vcpu); regs->sprg2 = kvmppc_get_sprg2(vcpu); @@ -636,17 +636,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = id - KVM_REG_PPC_FPR0; - *val = get_reg_val(id, VCPU_FPR(vcpu, i)); + *val = get_reg_val(id, kvmppc_get_fpr(vcpu, i)); break; case KVM_REG_PPC_FPSCR: - *val = get_reg_val(id, vcpu->arch.fp.fpscr); + *val = get_reg_val(id, kvmppc_get_fpscr(vcpu)); break; #ifdef CONFIG_VSX case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: if (cpu_has_feature(CPU_FTR_VSX)) { i = id - KVM_REG_PPC_VSR0; - val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; - val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; + val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0); + val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1); } else { r = -ENXIO; } @@ -683,19 +683,19 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.fscr); break; case KVM_REG_PPC_TAR: - *val = get_reg_val(id, vcpu->arch.tar); + *val = get_reg_val(id, kvmppc_get_tar(vcpu)); break; case KVM_REG_PPC_EBBHR: - *val = get_reg_val(id, vcpu->arch.ebbhr); + *val = get_reg_val(id, kvmppc_get_ebbhr(vcpu)); break; case KVM_REG_PPC_EBBRR: - *val = get_reg_val(id, vcpu->arch.ebbrr); + *val = get_reg_val(id, kvmppc_get_ebbrr(vcpu)); break; case KVM_REG_PPC_BESCR: - *val = get_reg_val(id, vcpu->arch.bescr); + *val = get_reg_val(id, kvmppc_get_bescr(vcpu)); break; case KVM_REG_PPC_IC: - *val = get_reg_val(id, vcpu->arch.ic); + *val = get_reg_val(id, kvmppc_get_ic(vcpu)); break; default: r = -EINVAL; @@ -724,7 +724,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = id - KVM_REG_PPC_FPR0; - VCPU_FPR(vcpu, i) = set_reg_val(id, *val); + kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val)); break; case KVM_REG_PPC_FPSCR: vcpu->arch.fp.fpscr = set_reg_val(id, *val); @@ -733,8 +733,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: if (cpu_has_feature(CPU_FTR_VSX)) { i = id - KVM_REG_PPC_VSR0; - vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; - vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; + kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]); + kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]); } else { r = -ENXIO; } @@ -765,22 +765,22 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, break; #endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: - vcpu->arch.fscr = set_reg_val(id, *val); + kvmppc_set_fpscr(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_TAR: - vcpu->arch.tar = set_reg_val(id, *val); + kvmppc_set_tar(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_EBBHR: - vcpu->arch.ebbhr = set_reg_val(id, *val); + kvmppc_set_ebbhr(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_EBBRR: - vcpu->arch.ebbrr = set_reg_val(id, *val); + kvmppc_set_ebbrr(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_BESCR: - vcpu->arch.bescr = set_reg_val(id, *val); + kvmppc_set_bescr(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_IC: - vcpu->arch.ic = set_reg_val(id, *val); + kvmppc_set_ic(vcpu, set_reg_val(id, *val)); break; default: r = -EINVAL; diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S index 3b361af873..a9ab92abff 100644 --- a/arch/powerpc/kvm/book3s_64_entry.S +++ b/arch/powerpc/kvm/book3s_64_entry.S @@ -19,7 +19,7 @@ /* * This is a hcall, so register convention is as - * Documentation/powerpc/papr_hcalls.rst. + * Documentation/arch/powerpc/papr_hcalls.rst. * * This may also be a syscall from PR-KVM userspace that is to be * reflected to the PR guest kernel, so registers may be set up for diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index fdfc2a62dd..2b1f0cdd8c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -121,7 +121,7 @@ void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) kvm->arch.hpt = *info; kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); - pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n", + pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n", info->virt, (long)info->order, kvm->arch.lpid); } diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 10aacbf924..175a8eb268 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -97,7 +97,7 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, void *from, unsigned long n) { int lpid = vcpu->kvm->arch.lpid; - int pid = vcpu->arch.pid; + int pid = kvmppc_get_pid(vcpu); /* This would cause a data segment intr so don't allow the access */ if (eaddr & (0x3FFUL << 52)) @@ -271,7 +271,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Work out effective PID */ switch (eaddr >> 62) { case 0: - pid = vcpu->arch.pid; + pid = kvmppc_get_pid(vcpu); break; case 3: pid = 0; @@ -308,7 +308,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, } void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, - unsigned int pshift, unsigned int lpid) + unsigned int pshift, u64 lpid) { unsigned long psize = PAGE_SIZE; int psi; @@ -345,7 +345,7 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } -static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) +static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid) { long rc; @@ -418,7 +418,7 @@ static void kvmppc_pmd_free(pmd_t *pmdp) void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, - unsigned int lpid) + u64 lpid) { unsigned long old; @@ -469,7 +469,7 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, * (or 4kB) mappings (of sub-pages of the same 2MB page). */ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, - unsigned int lpid) + u64 lpid) { if (full) { memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE); @@ -490,7 +490,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, } static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, - unsigned int lpid) + u64 lpid) { unsigned long im; pmd_t *p = pmd; @@ -519,7 +519,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, } static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, - unsigned int lpid) + u64 lpid) { unsigned long iu; pud_t *p = pud; @@ -540,7 +540,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, pud_free(kvm->mm, pud); } -void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) +void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid) { unsigned long ig; @@ -567,7 +567,7 @@ void kvmppc_free_radix(struct kvm *kvm) } static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { pte_t *pte = pte_offset_kernel(pmd, 0); @@ -583,7 +583,7 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, } static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { pmd_t *pmd = pmd_offset(pud, 0); @@ -609,7 +609,7 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, - unsigned long mmu_seq, unsigned int lpid, + unsigned long mmu_seq, u64 lpid, unsigned long *rmapp, struct rmap_nested **n_rmap) { pgd_t *pgd; @@ -786,7 +786,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, } bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { unsigned long pgflags; unsigned int shift; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 93b695b289..14c6d7e318 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -77,8 +77,8 @@ static void kvm_spapr_tce_liobn_put(struct kref *kref) call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free); } -extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, - struct iommu_group *grp) +void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, + struct iommu_group *grp) { int i; struct kvmppc_spapr_tce_table *stt; @@ -105,8 +105,8 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, rcu_read_unlock(); } -extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, - struct iommu_group *grp) +long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, + struct iommu_group *grp) { struct kvmppc_spapr_tce_table *stt = NULL; bool found = false; @@ -786,12 +786,12 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, idx = (ioba >> stt->page_shift) - stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; if (!page) { - vcpu->arch.regs.gpr[4] = 0; + kvmppc_set_gpr(vcpu, 4, 0); return H_SUCCESS; } tbl = (u64 *)page_address(page); - vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; + kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]); return H_SUCCESS; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0429488ba1..b5c6af0bef 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -391,9 +391,27 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) /* Dummy value used in computing PCR value below */ #define PCR_ARCH_31 (PCR_ARCH_300 << 1) +static inline unsigned long map_pcr_to_cap(unsigned long pcr) +{ + unsigned long cap = 0; + + switch (pcr) { + case PCR_ARCH_300: + cap = H_GUEST_CAP_POWER9; + break; + case PCR_ARCH_31: + cap = H_GUEST_CAP_POWER10; + break; + default: + break; + } + + return cap; +} + static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) { - unsigned long host_pcr_bit = 0, guest_pcr_bit = 0; + unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0; struct kvmppc_vcore *vc = vcpu->arch.vcore; /* We can (emulate) our own architecture version and anything older */ @@ -437,8 +455,20 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) if (guest_pcr_bit > host_pcr_bit) return -EINVAL; + if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) { + /* + * 'arch_compat == 0' would mean the guest should default to + * L1's compatibility. In this case, the guest would pick + * host's PCR and evaluate the corresponding capabilities. + */ + cap = map_pcr_to_cap(guest_pcr_bit); + if (!(cap & nested_capabilities)) + return -EINVAL; + } + spin_lock(&vc->lock); vc->arch_compat = arch_compat; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR); /* * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit * Also set all reserved PCR bits @@ -794,7 +824,7 @@ static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu, vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen); - __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen); + __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen); vcpu->arch.vpa.dirty = true; } @@ -845,9 +875,9 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) { - if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) + if (kvmppc_get_arch_compat(vcpu) >= PVR_ARCH_207) return true; - if ((!vcpu->arch.vcore->arch_compat) && + if ((!kvmppc_get_arch_compat(vcpu)) && cpu_has_feature(CPU_FTR_ARCH_207S)) return true; return false; @@ -1267,10 +1297,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) return RESUME_HOST; break; #endif - case H_RANDOM: - if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1)) + case H_RANDOM: { + unsigned long rand; + + if (!arch_get_random_seed_longs(&rand, 1)) ret = H_HARDWARE; + kvmppc_set_gpr(vcpu, 4, rand); break; + } case H_RPT_INVALIDATE: ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), @@ -2183,6 +2217,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, } vc->lpcr = new_lpcr; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR); spin_unlock(&vc->lock); } @@ -2279,7 +2314,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.vcore->dpdes); break; case KVM_REG_PPC_VTB: - *val = get_reg_val(id, vcpu->arch.vcore->vtb); + *val = get_reg_val(id, kvmppc_get_vtb(vcpu)); break; case KVM_REG_PPC_DAWR: *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu)); @@ -2306,7 +2341,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.tcscr); break; case KVM_REG_PPC_PID: - *val = get_reg_val(id, vcpu->arch.pid); + *val = get_reg_val(id, kvmppc_get_pid(vcpu)); break; case KVM_REG_PPC_ACOP: *val = get_reg_val(id, vcpu->arch.acop); @@ -2338,11 +2373,11 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, spin_unlock(&vcpu->arch.vpa_update_lock); break; case KVM_REG_PPC_TB_OFFSET: - *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); + *val = get_reg_val(id, kvmppc_get_tb_offset(vcpu)); break; case KVM_REG_PPC_LPCR: case KVM_REG_PPC_LPCR_64: - *val = get_reg_val(id, vcpu->arch.vcore->lpcr); + *val = get_reg_val(id, kvmppc_get_lpcr(vcpu)); break; case KVM_REG_PPC_PPR: *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu)); @@ -2414,10 +2449,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, break; #endif case KVM_REG_PPC_ARCH_COMPAT: - *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); + *val = get_reg_val(id, kvmppc_get_arch_compat(vcpu)); break; case KVM_REG_PPC_DEC_EXPIRY: - *val = get_reg_val(id, vcpu->arch.dec_expires); + *val = get_reg_val(id, kvmppc_get_dec_expires(vcpu)); break; case KVM_REG_PPC_ONLINE: *val = get_reg_val(id, vcpu->arch.online); @@ -2522,7 +2557,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.vcore->dpdes = set_reg_val(id, *val); break; case KVM_REG_PPC_VTB: - vcpu->arch.vcore->vtb = set_reg_val(id, *val); + kvmppc_set_vtb(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_DAWR: kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val)); @@ -2552,7 +2587,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.tcscr = set_reg_val(id, *val); break; case KVM_REG_PPC_PID: - vcpu->arch.pid = set_reg_val(id, *val); + kvmppc_set_pid(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_ACOP: vcpu->arch.acop = set_reg_val(id, *val); @@ -2605,10 +2640,11 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, * decrementer, which is better than a large one that * causes a hang. */ - if (!vcpu->arch.dec_expires && tb_offset) - vcpu->arch.dec_expires = get_tb() + tb_offset; + kvmppc_set_tb_offset(vcpu, tb_offset); + if (!kvmppc_get_dec_expires(vcpu) && tb_offset) + kvmppc_set_dec_expires(vcpu, get_tb() + tb_offset); - vcpu->arch.vcore->tb_offset = tb_offset; + kvmppc_set_tb_offset(vcpu, tb_offset); break; } case KVM_REG_PPC_LPCR: @@ -2689,7 +2725,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_DEC_EXPIRY: - vcpu->arch.dec_expires = set_reg_val(id, *val); + kvmppc_set_dec_expires(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_ONLINE: i = set_reg_val(id, *val); @@ -2922,8 +2958,14 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) vcpu->arch.shared_big_endian = false; #endif #endif - kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC); + if (kvmhv_is_nestedv2()) { + err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io); + if (err < 0) + return err; + } + + kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC); if (cpu_has_feature(CPU_FTR_ARCH_31)) { kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT); kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE); @@ -3079,6 +3121,8 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); spin_unlock(&vcpu->arch.vpa_update_lock); + if (kvmhv_is_nestedv2()) + kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io); } static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) @@ -4043,10 +4087,58 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) } } +static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr, u64 *tb) +{ + struct kvmhv_nestedv2_io *io; + unsigned long msr, i; + int trap; + long rc; + + io = &vcpu->arch.nestedv2_io; + + msr = mfmsr(); + kvmppc_msr_hard_disable_set_facilities(vcpu, msr); + if (lazy_irq_pending()) + return 0; + + rc = kvmhv_nestedv2_flush_vcpu(vcpu, time_limit); + if (rc < 0) + return -EINVAL; + + accumulate_time(vcpu, &vcpu->arch.in_guest); + rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id, + &trap, &i); + + if (rc != H_SUCCESS) { + pr_err("KVM Guest Run VCPU hcall failed\n"); + if (rc == H_INVALID_ELEMENT_ID) + pr_err("KVM: Guest Run VCPU invalid element id at %ld\n", i); + else if (rc == H_INVALID_ELEMENT_SIZE) + pr_err("KVM: Guest Run VCPU invalid element size at %ld\n", i); + else if (rc == H_INVALID_ELEMENT_VALUE) + pr_err("KVM: Guest Run VCPU invalid element value at %ld\n", i); + return -EINVAL; + } + accumulate_time(vcpu, &vcpu->arch.guest_exit); + + *tb = mftb(); + kvmppc_gsm_reset(io->vcpu_message); + kvmppc_gsm_reset(io->vcore_message); + kvmppc_gsbm_zero(&io->valids); + + rc = kvmhv_nestedv2_parse_output(vcpu); + if (rc < 0) + return -EINVAL; + + timer_rearm_host_dec(*tb); + + return trap; +} + /* call our hypervisor to load up HV regs and go */ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { - struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long host_psscr; unsigned long msr; struct hv_guest_state hvregs; @@ -4126,7 +4218,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ dec = (s32) dec; *tb = mftb(); - vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset); + vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu)); timer_rearm_host_dec(*tb); @@ -4161,7 +4253,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu_vpa_increment_dispatch(vcpu); if (kvmhv_on_pseries()) { - trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); + if (kvmhv_is_nestedv1()) + trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); + else + trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb); /* H_CEDE has to be handled now, not later */ if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested && @@ -4691,7 +4786,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, tb = mftb(); - kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset); + kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + kvmppc_get_tb_offset(vcpu)); trace_kvm_guest_enter(vcpu); @@ -5147,6 +5242,14 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) if (++cores_done >= kvm->arch.online_vcores) break; } + + if (kvmhv_is_nestedv2()) { + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR); + } + } } void kvmppc_setup_partition_table(struct kvm *kvm) @@ -5413,15 +5516,43 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) /* Allocate the guest's logical partition ID */ - lpid = kvmppc_alloc_lpid(); - if ((long)lpid < 0) - return -ENOMEM; - kvm->arch.lpid = lpid; + if (!kvmhv_is_nestedv2()) { + lpid = kvmppc_alloc_lpid(); + if ((long)lpid < 0) + return -ENOMEM; + kvm->arch.lpid = lpid; + } kvmppc_alloc_host_rm_ops(); kvmhv_vm_nested_init(kvm); + if (kvmhv_is_nestedv2()) { + long rc; + unsigned long guest_id; + + rc = plpar_guest_create(0, &guest_id); + + if (rc != H_SUCCESS) + pr_err("KVM: Create Guest hcall failed, rc=%ld\n", rc); + + switch (rc) { + case H_PARAMETER: + case H_FUNCTION: + case H_STATE: + return -EINVAL; + case H_NOT_ENOUGH_RESOURCES: + case H_ABORTED: + return -ENOMEM; + case H_AUTHORITY: + return -EPERM; + case H_NOT_AVAILABLE: + return -EBUSY; + } + kvm->arch.lpid = guest_id; + } + + /* * Since we don't flush the TLB when tearing down a VM, * and this lpid might have previously been used, @@ -5491,7 +5622,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) lpcr |= LPCR_HAIL; ret = kvmppc_init_vm_radix(kvm); if (ret) { - kvmppc_free_lpid(kvm->arch.lpid); + if (kvmhv_is_nestedv2()) + plpar_guest_delete(0, kvm->arch.lpid); + else + kvmppc_free_lpid(kvm->arch.lpid); return ret; } kvmppc_setup_partition_table(kvm); @@ -5581,10 +5715,14 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) kvm->arch.process_table = 0; if (kvm->arch.secure_guest) uv_svm_terminate(kvm->arch.lpid); - kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); + if (!kvmhv_is_nestedv2()) + kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } - kvmppc_free_lpid(kvm->arch.lpid); + if (kvmhv_is_nestedv2()) + plpar_guest_delete(0, kvm->arch.lpid); + else + kvmppc_free_lpid(kvm->arch.lpid); kvmppc_free_pimap(kvm); } @@ -5996,6 +6134,8 @@ static int kvmhv_enable_nested(struct kvm *kvm) return -ENODEV; if (!radix_enabled()) return -ENODEV; + if (kvmhv_is_nestedv2()) + return -ENODEV; /* kvm == NULL means the caller is testing if the capability exists */ if (kvm) diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h index 95241764df..47b2c81564 100644 --- a/arch/powerpc/kvm/book3s_hv.h +++ b/arch/powerpc/kvm/book3s_hv.h @@ -3,6 +3,8 @@ /* * Privileged (non-hypervisor) host registers to save. */ +#include "asm/guest-state-buffer.h" + struct p9_host_os_sprs { unsigned long iamr; unsigned long amr; @@ -54,67 +56,73 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next); static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val) { vcpu->arch.shregs.msr = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR); } static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_MSR) < 0); return vcpu->arch.shregs.msr; } -#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \ +#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \ static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \ { \ vcpu->arch.reg = val; \ + kvmhv_nestedv2_mark_dirty(vcpu, iden); \ } -#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \ +#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \ static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \ { \ + kvmhv_nestedv2_cached_reload(vcpu, iden); \ return vcpu->arch.reg; \ } -#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size) \ - KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \ - KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \ +#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size, iden) \ + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \ + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \ -#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \ +#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \ static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \ { \ vcpu->arch.reg[i] = val; \ + kvmhv_nestedv2_mark_dirty(vcpu, iden(i)); \ } -#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \ +#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \ static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \ { \ + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden(i)) < 0); \ return vcpu->arch.reg[i]; \ } -#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size) \ - KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \ - KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \ - -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64) -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64) - -KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64) -KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64) -KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32) - -KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32) +#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size, iden) \ + KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \ + KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \ + +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64, KVMPPC_GSID_MMCRA) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64, KVMPPC_GSID_HFSCR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64, KVMPPC_GSID_FSCR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64, KVMPPC_GSID_DSCR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64, KVMPPC_GSID_PURR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64, KVMPPC_GSID_SPURR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64, KVMPPC_GSID_AMR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64, KVMPPC_GSID_UAMOR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64, KVMPPC_GSID_SIAR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64, KVMPPC_GSID_SDAR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64, KVMPPC_GSID_IAMR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64, KVMPPC_GSID_CTRL); + +KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64, KVMPPC_GSID_MMCR) +KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64, KVMPPC_GSID_SIER) +KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32, KVMPPC_GSID_PMC) + +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32, KVMPPC_GSID_PSPB) diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 663f5222f3..fa0e3a22ca 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -183,9 +183,13 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); long kvmppc_rm_h_random(struct kvm_vcpu *vcpu) { + unsigned long rand; + if (ppc_md.get_random_seed && - ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4])) + ppc_md.get_random_seed(&rand)) { + kvmppc_set_gpr(vcpu, 4, rand); return H_SUCCESS; + } return H_HARDWARE; } diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 377d0b4a05..3b658b8696 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -428,10 +428,12 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) return vcpu->arch.trap; } +unsigned long nested_capabilities; + long kvmhv_nested_init(void) { long int ptb_order; - unsigned long ptcr; + unsigned long ptcr, host_capabilities; long rc; if (!kvmhv_on_pseries()) @@ -439,6 +441,29 @@ long kvmhv_nested_init(void) if (!radix_enabled()) return -ENODEV; + rc = plpar_guest_get_capabilities(0, &host_capabilities); + if (rc == H_SUCCESS) { + unsigned long capabilities = 0; + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + capabilities |= H_GUEST_CAP_POWER10; + if (cpu_has_feature(CPU_FTR_ARCH_300)) + capabilities |= H_GUEST_CAP_POWER9; + + nested_capabilities = capabilities & host_capabilities; + rc = plpar_guest_set_capabilities(0, nested_capabilities); + if (rc != H_SUCCESS) { + pr_err("kvm-hv: Could not configure parent hypervisor capabilities (rc=%ld)", + rc); + return -ENODEV; + } + + static_branch_enable(&__kvmhv_is_nestedv2); + return 0; + } + + pr_info("kvm-hv: nestedv2 get capabilities hcall failed, falling back to nestedv1 (rc=%ld)\n", + rc); /* Partition table entry is 1<<4 bytes in size, hence the 4. */ ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4; /* Minimum partition table size is 1<<12 bytes */ @@ -478,7 +503,7 @@ void kvmhv_nested_exit(void) } } -static void kvmhv_flush_lpid(unsigned int lpid) +static void kvmhv_flush_lpid(u64 lpid) { long rc; @@ -500,17 +525,22 @@ static void kvmhv_flush_lpid(unsigned int lpid) pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); } -void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) +void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1) { if (!kvmhv_on_pseries()) { mmu_partition_table_set_entry(lpid, dw0, dw1, true); return; } - pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); - pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); - /* L0 will do the necessary barriers */ - kvmhv_flush_lpid(lpid); + if (kvmhv_is_nestedv1()) { + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + /* L0 will do the necessary barriers */ + kvmhv_flush_lpid(lpid); + } + + if (kvmhv_is_nestedv2()) + kvmhv_nestedv2_set_ptbl_entry(lpid, dw0, dw1); } static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c new file mode 100644 index 0000000000..f354af7e85 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com> + * + * Authors: + * Jordan Niethe <jniethe5@gmail.com> + * + * Description: KVM functions specific to running on Book 3S + * processors as a NESTEDv2 guest. + * + */ + +#include "linux/blk-mq.h" +#include "linux/console.h" +#include "linux/gfp_types.h" +#include "linux/signal.h" +#include <linux/kernel.h> +#include <linux/kvm_host.h> +#include <linux/pgtable.h> + +#include <asm/kvm_ppc.h> +#include <asm/kvm_book3s.h> +#include <asm/hvcall.h> +#include <asm/pgalloc.h> +#include <asm/reg.h> +#include <asm/plpar_wrappers.h> +#include <asm/guest-state-buffer.h> +#include "trace_hv.h" + +struct static_key_false __kvmhv_is_nestedv2 __read_mostly; +EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2); + + +static size_t +gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm) +{ + u16 ids[] = { + KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE, + KVMPPC_GSID_RUN_INPUT, + KVMPPC_GSID_RUN_OUTPUT, + + }; + size_t size = 0; + + for (int i = 0; i < ARRAY_SIZE(ids); i++) + size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); + return size; +} + +static int +gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + struct kvmhv_nestedv2_config *cfg; + int rc; + + cfg = gsm->data; + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) { + rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE, + cfg->vcpu_run_output_size); + if (rc < 0) + return rc; + } + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) { + rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, + cfg->vcpu_run_input_cfg); + if (rc < 0) + return rc; + } + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) { + kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT, + cfg->vcpu_run_output_cfg); + if (rc < 0) + return rc; + } + + return 0; +} + +static int +gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + struct kvmhv_nestedv2_config *cfg; + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmppc_gs_elem *gse; + int rc; + + cfg = gsm->data; + + rc = kvmppc_gse_parse(&gsp, gsb); + if (rc < 0) + return rc; + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE); + if (gse) + cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse); + return 0; +} + +static struct kvmppc_gs_msg_ops config_msg_ops = { + .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size, + .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info, + .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info, +}; + +static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm) +{ + struct kvmppc_gs_bitmap gsbm = { 0 }; + size_t size = 0; + u16 iden; + + kvmppc_gsbm_fill(&gsbm); + kvmppc_gsbm_for_each(&gsbm, iden) + { + switch (iden) { + case KVMPPC_GSID_HOST_STATE_SIZE: + case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE: + case KVMPPC_GSID_PARTITION_TABLE: + case KVMPPC_GSID_PROCESS_TABLE: + case KVMPPC_GSID_RUN_INPUT: + case KVMPPC_GSID_RUN_OUTPUT: + break; + default: + size += kvmppc_gse_total_size(kvmppc_gsid_size(iden)); + } + } + return size; +} + +static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + struct kvm_vcpu *vcpu; + vector128 v; + int rc, i; + u16 iden; + u32 arch_compat = 0; + + vcpu = gsm->data; + + kvmppc_gsm_for_each(gsm, iden) + { + rc = 0; + + if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) != + (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE)) + continue; + + switch (iden) { + case KVMPPC_GSID_DSCR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr); + break; + case KVMPPC_GSID_MMCRA: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra); + break; + case KVMPPC_GSID_HFSCR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr); + break; + case KVMPPC_GSID_PURR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr); + break; + case KVMPPC_GSID_SPURR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr); + break; + case KVMPPC_GSID_AMR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr); + break; + case KVMPPC_GSID_UAMOR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor); + break; + case KVMPPC_GSID_SIAR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar); + break; + case KVMPPC_GSID_SDAR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar); + break; + case KVMPPC_GSID_IAMR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr); + break; + case KVMPPC_GSID_DAWR0: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0); + break; + case KVMPPC_GSID_DAWR1: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1); + break; + case KVMPPC_GSID_DAWRX0: + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0); + break; + case KVMPPC_GSID_DAWRX1: + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1); + break; + case KVMPPC_GSID_CIABR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr); + break; + case KVMPPC_GSID_WORT: + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort); + break; + case KVMPPC_GSID_PPR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr); + break; + case KVMPPC_GSID_PSPB: + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb); + break; + case KVMPPC_GSID_TAR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar); + break; + case KVMPPC_GSID_FSCR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr); + break; + case KVMPPC_GSID_EBBHR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr); + break; + case KVMPPC_GSID_EBBRR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr); + break; + case KVMPPC_GSID_BESCR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr); + break; + case KVMPPC_GSID_IC: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic); + break; + case KVMPPC_GSID_CTRL: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl); + break; + case KVMPPC_GSID_PIDR: + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid); + break; + case KVMPPC_GSID_AMOR: { + u64 amor = ~0; + + rc = kvmppc_gse_put_u64(gsb, iden, amor); + break; + } + case KVMPPC_GSID_VRSAVE: + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave); + break; + case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3): + i = iden - KVMPPC_GSID_MMCR(0); + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]); + break; + case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2): + i = iden - KVMPPC_GSID_SIER(0); + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]); + break; + case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5): + i = iden - KVMPPC_GSID_PMC(0); + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]); + break; + case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31): + i = iden - KVMPPC_GSID_GPR(0); + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.regs.gpr[i]); + break; + case KVMPPC_GSID_CR: + rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr); + break; + case KVMPPC_GSID_XER: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer); + break; + case KVMPPC_GSID_CTR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr); + break; + case KVMPPC_GSID_LR: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.regs.link); + break; + case KVMPPC_GSID_NIA: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip); + break; + case KVMPPC_GSID_SRR0: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.srr0); + break; + case KVMPPC_GSID_SRR1: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.srr1); + break; + case KVMPPC_GSID_SPRG0: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.sprg0); + break; + case KVMPPC_GSID_SPRG1: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.sprg1); + break; + case KVMPPC_GSID_SPRG2: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.sprg2); + break; + case KVMPPC_GSID_SPRG3: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.sprg3); + break; + case KVMPPC_GSID_DAR: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.dar); + break; + case KVMPPC_GSID_DSISR: + rc = kvmppc_gse_put_u32(gsb, iden, + vcpu->arch.shregs.dsisr); + break; + case KVMPPC_GSID_MSR: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.shregs.msr); + break; + case KVMPPC_GSID_VTB: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.vcore->vtb); + break; + case KVMPPC_GSID_LPCR: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.vcore->lpcr); + break; + case KVMPPC_GSID_TB_OFFSET: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.vcore->tb_offset); + break; + case KVMPPC_GSID_FPSCR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr); + break; + case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31): + i = iden - KVMPPC_GSID_VSRS(0); + memcpy(&v, &vcpu->arch.fp.fpr[i], + sizeof(vcpu->arch.fp.fpr[i])); + rc = kvmppc_gse_put_vector128(gsb, iden, &v); + break; +#ifdef CONFIG_VSX + case KVMPPC_GSID_VSCR: + rc = kvmppc_gse_put_u32(gsb, iden, + vcpu->arch.vr.vscr.u[3]); + break; + case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63): + i = iden - KVMPPC_GSID_VSRS(32); + rc = kvmppc_gse_put_vector128(gsb, iden, + &vcpu->arch.vr.vr[i]); + break; +#endif + case KVMPPC_GSID_DEC_EXPIRY_TB: { + u64 dw; + + dw = vcpu->arch.dec_expires - + vcpu->arch.vcore->tb_offset; + rc = kvmppc_gse_put_u64(gsb, iden, dw); + break; + } + case KVMPPC_GSID_LOGICAL_PVR: + /* + * Though 'arch_compat == 0' would mean the default + * compatibility, arch_compat, being a Guest Wide + * Element, cannot be filled with a value of 0 in GSB + * as this would result into a kernel trap. + * Hence, when `arch_compat == 0`, arch_compat should + * default to L1's PVR. + */ + if (!vcpu->arch.vcore->arch_compat) { + if (cpu_has_feature(CPU_FTR_ARCH_31)) + arch_compat = PVR_ARCH_31; + else if (cpu_has_feature(CPU_FTR_ARCH_300)) + arch_compat = PVR_ARCH_300; + } else { + arch_compat = vcpu->arch.vcore->arch_compat; + } + rc = kvmppc_gse_put_u32(gsb, iden, arch_compat); + break; + } + + if (rc < 0) + return rc; + } + + return 0; +} + +static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmhv_nestedv2_io *io; + struct kvmppc_gs_bitmap *valids; + struct kvm_vcpu *vcpu; + struct kvmppc_gs_elem *gse; + vector128 v; + int rc, i; + u16 iden; + + vcpu = gsm->data; + + rc = kvmppc_gse_parse(&gsp, gsb); + if (rc < 0) + return rc; + + io = &vcpu->arch.nestedv2_io; + valids = &io->valids; + + kvmppc_gsp_for_each(&gsp, iden, gse) + { + switch (iden) { + case KVMPPC_GSID_DSCR: + vcpu->arch.dscr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_MMCRA: + vcpu->arch.mmcra = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_HFSCR: + vcpu->arch.hfscr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_PURR: + vcpu->arch.purr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SPURR: + vcpu->arch.spurr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_AMR: + vcpu->arch.amr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_UAMOR: + vcpu->arch.uamor = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SIAR: + vcpu->arch.siar = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SDAR: + vcpu->arch.sdar = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_IAMR: + vcpu->arch.iamr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_DAWR0: + vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_DAWR1: + vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_DAWRX0: + vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_DAWRX1: + vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_CIABR: + vcpu->arch.ciabr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_WORT: + vcpu->arch.wort = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_PPR: + vcpu->arch.ppr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_PSPB: + vcpu->arch.pspb = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_TAR: + vcpu->arch.tar = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_FSCR: + vcpu->arch.fscr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_EBBHR: + vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_EBBRR: + vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_BESCR: + vcpu->arch.bescr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_IC: + vcpu->arch.ic = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_CTRL: + vcpu->arch.ctrl = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_PIDR: + vcpu->arch.pid = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_AMOR: + break; + case KVMPPC_GSID_VRSAVE: + vcpu->arch.vrsave = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3): + i = iden - KVMPPC_GSID_MMCR(0); + vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2): + i = iden - KVMPPC_GSID_SIER(0); + vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5): + i = iden - KVMPPC_GSID_PMC(0); + vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31): + i = iden - KVMPPC_GSID_GPR(0); + vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_CR: + vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_XER: + vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_CTR: + vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_LR: + vcpu->arch.regs.link = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_NIA: + vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SRR0: + vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SRR1: + vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SPRG0: + vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SPRG1: + vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SPRG2: + vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_SPRG3: + vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_DAR: + vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_DSISR: + vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_MSR: + vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_VTB: + vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_LPCR: + vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_TB_OFFSET: + vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_FPSCR: + vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31): + kvmppc_gse_get_vector128(gse, &v); + i = iden - KVMPPC_GSID_VSRS(0); + memcpy(&vcpu->arch.fp.fpr[i], &v, + sizeof(vcpu->arch.fp.fpr[i])); + break; +#ifdef CONFIG_VSX + case KVMPPC_GSID_VSCR: + vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63): + i = iden - KVMPPC_GSID_VSRS(32); + kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]); + break; +#endif + case KVMPPC_GSID_HDAR: + vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_HDSISR: + vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse); + break; + case KVMPPC_GSID_ASDR: + vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_HEIR: + vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_DEC_EXPIRY_TB: { + u64 dw; + + dw = kvmppc_gse_get_u64(gse); + vcpu->arch.dec_expires = + dw + vcpu->arch.vcore->tb_offset; + break; + } + case KVMPPC_GSID_LOGICAL_PVR: + vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse); + break; + default: + continue; + } + kvmppc_gsbm_set(valids, iden); + } + + return 0; +} + +static struct kvmppc_gs_msg_ops vcpu_message_ops = { + .get_size = gs_msg_ops_vcpu_get_size, + .fill_info = gs_msg_ops_vcpu_fill_info, + .refresh_info = gs_msg_ops_vcpu_refresh_info, +}; + +static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu, + struct kvmhv_nestedv2_io *io) +{ + struct kvmhv_nestedv2_config *cfg; + struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input; + unsigned long guest_id, vcpu_id; + struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message; + int rc; + + cfg = &io->cfg; + guest_id = vcpu->kvm->arch.lpid; + vcpu_id = vcpu->vcpu_id; + + gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE, + GFP_KERNEL); + if (!gsm) { + rc = -ENOMEM; + goto err; + } + + gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id, + GFP_KERNEL); + if (!gsb) { + rc = -ENOMEM; + goto free_gsm; + } + + rc = kvmppc_gsb_receive_datum(gsb, gsm, + KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE); + if (rc < 0) { + pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n"); + goto free_gsb; + } + + vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id, + vcpu_id, GFP_KERNEL); + if (!vcpu_run_output) { + rc = -ENOMEM; + goto free_gsb; + } + + cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output); + cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output); + io->vcpu_run_output = vcpu_run_output; + + gsm->flags = 0; + rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT); + if (rc < 0) { + pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n"); + goto free_gs_out; + } + + vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL); + if (!vcpu_message) { + rc = -ENOMEM; + goto free_gs_out; + } + kvmppc_gsm_include_all(vcpu_message); + + io->vcpu_message = vcpu_message; + + vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id, + vcpu_id, GFP_KERNEL); + if (!vcpu_run_input) { + rc = -ENOMEM; + goto free_vcpu_message; + } + + io->vcpu_run_input = vcpu_run_input; + cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input); + cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input); + rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT); + if (rc < 0) { + pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n"); + goto free_vcpu_run_input; + } + + vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, + KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL); + if (!vcore_message) { + rc = -ENOMEM; + goto free_vcpu_run_input; + } + + kvmppc_gsm_include_all(vcore_message); + kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR); + io->vcore_message = vcore_message; + + kvmppc_gsbm_fill(&io->valids); + kvmppc_gsm_free(gsm); + kvmppc_gsb_free(gsb); + return 0; + +free_vcpu_run_input: + kvmppc_gsb_free(vcpu_run_input); +free_vcpu_message: + kvmppc_gsm_free(vcpu_message); +free_gs_out: + kvmppc_gsb_free(vcpu_run_output); +free_gsb: + kvmppc_gsb_free(gsb); +free_gsm: + kvmppc_gsm_free(gsm); +err: + return rc; +} + +/** + * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host + * @vcpu: vcpu + * @iden: guest state ID + * + * Mark a guest state ID as having been changed by the L1 host and thus + * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu() + */ +int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) +{ + struct kvmhv_nestedv2_io *io; + struct kvmppc_gs_bitmap *valids; + struct kvmppc_gs_msg *gsm; + + if (!iden) + return 0; + + io = &vcpu->arch.nestedv2_io; + valids = &io->valids; + gsm = io->vcpu_message; + kvmppc_gsm_include(gsm, iden); + gsm = io->vcore_message; + kvmppc_gsm_include(gsm, iden); + kvmppc_gsbm_set(valids, iden); + return 0; +} +EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty); + +/** + * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host + * @vcpu: vcpu + * @iden: guest state ID + * + * Reload the value for the guest state ID from the L0 host into the L1 host. + * This is cached so that going out to the L0 host only happens if necessary. + */ +int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) +{ + struct kvmhv_nestedv2_io *io; + struct kvmppc_gs_bitmap *valids; + struct kvmppc_gs_buff *gsb; + struct kvmppc_gs_msg gsm; + int rc; + + if (!iden) + return 0; + + io = &vcpu->arch.nestedv2_io; + valids = &io->valids; + if (kvmppc_gsbm_test(valids, iden)) + return 0; + + gsb = io->vcpu_run_input; + kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden)); + rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden); + if (rc < 0) { + pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden); + return rc; + } + return 0; +} +EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload); + +/** + * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host + * @vcpu: vcpu + * @time_limit: hdec expiry tb + * + * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host. + * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest + * wide values need to be sent with H_GUEST_SET first. + * + * The hdec tb offset is always sent to L0 host. + */ +int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit) +{ + struct kvmhv_nestedv2_io *io; + struct kvmppc_gs_buff *gsb; + struct kvmppc_gs_msg *gsm; + int rc; + + io = &vcpu->arch.nestedv2_io; + gsb = io->vcpu_run_input; + gsm = io->vcore_message; + rc = kvmppc_gsb_send_data(gsb, gsm); + if (rc < 0) { + pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n"); + return rc; + } + + gsm = io->vcpu_message; + kvmppc_gsb_reset(gsb); + rc = kvmppc_gsm_fill_info(gsm, gsb); + if (rc < 0) { + pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n"); + return rc; + } + + rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit); + if (rc < 0) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu); + +/** + * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to + * L0 host + * @lpid: guest id + * @dw0: partition table double word + * @dw1: process table double word + */ +int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1) +{ + struct kvmppc_gs_part_table patbl; + struct kvmppc_gs_proc_table prtbl; + struct kvmppc_gs_buff *gsb; + size_t size; + int rc; + + size = kvmppc_gse_total_size( + kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) + + kvmppc_gse_total_size( + kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) + + sizeof(struct kvmppc_gs_header); + gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL); + if (!gsb) + return -ENOMEM; + + patbl.address = dw0 & RPDB_MASK; + patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) | + ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) + + 31); + patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3); + rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl); + if (rc < 0) + goto free_gsb; + + prtbl.address = dw1 & PRTB_MASK; + prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12); + rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl); + if (rc < 0) + goto free_gsb; + + rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE); + if (rc < 0) { + pr_err("KVM-NESTEDv2: couldn't set the PATE\n"); + goto free_gsb; + } + + kvmppc_gsb_free(gsb); + return 0; + +free_gsb: + kvmppc_gsb_free(gsb); + return rc; +} +EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry); + +/** + * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output + * @vcpu: vcpu + * + * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu. + */ +int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu) +{ + struct kvmhv_nestedv2_io *io; + struct kvmppc_gs_buff *gsb; + struct kvmppc_gs_msg gsm; + + io = &vcpu->arch.nestedv2_io; + gsb = io->vcpu_run_output; + + vcpu->arch.fault_dar = 0; + vcpu->arch.fault_dsisr = 0; + vcpu->arch.fault_gpa = 0; + vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED; + + kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0); + return kvmppc_gsm_refresh_info(&gsm, gsb); +} +EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output); + +static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu, + struct kvmhv_nestedv2_io *io) +{ + kvmppc_gsm_free(io->vcpu_message); + kvmppc_gsm_free(io->vcore_message); + kvmppc_gsb_free(io->vcpu_run_input); + kvmppc_gsb_free(io->vcpu_run_output); +} + +int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs) +{ + struct kvmhv_nestedv2_io *io; + struct kvmppc_gs_bitmap *valids; + struct kvmppc_gs_buff *gsb; + struct kvmppc_gs_msg gsm; + int rc = 0; + + + io = &vcpu->arch.nestedv2_io; + valids = &io->valids; + + gsb = io->vcpu_run_input; + kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0); + + for (int i = 0; i < 32; i++) { + if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i))) + kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i)); + } + + if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR)) + kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR); + + if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER)) + kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER); + + if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR)) + kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR); + + if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR)) + kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR); + + if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA)) + kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA); + + rc = kvmppc_gsb_receive_data(gsb, &gsm); + if (rc < 0) + pr_err("KVM-NESTEDv2: couldn't reload ptregs\n"); + + return rc; +} +EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs); + +int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, + struct pt_regs *regs) +{ + for (int i = 0; i < 32; i++) + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i)); + + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR); + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER); + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR); + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR); + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA); + + return 0; +} +EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs); + +/** + * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API + * @vcpu: vcpu + * @io: NESTEDv2 nested io state + * + * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu. + */ +int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, + struct kvmhv_nestedv2_io *io) +{ + long rc; + + rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id); + + if (rc != H_SUCCESS) { + pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc); + switch (rc) { + case H_NOT_ENOUGH_RESOURCES: + case H_ABORTED: + return -ENOMEM; + case H_AUTHORITY: + return -EPERM; + default: + return -EINVAL; + } + } + + rc = kvmhv_nestedv2_host_create(vcpu, io); + + return rc; +} +EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create); + +/** + * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state + * @vcpu: vcpu + * @io: NESTEDv2 nested io state + */ +void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, + struct kvmhv_nestedv2_io *io) +{ + kvmhv_nestedv2_host_free(vcpu, io); +} +EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free); diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 34f1db2128..34bc0a8a12 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -305,7 +305,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 u32 pid; lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; - pid = vcpu->arch.pid; + pid = kvmppc_get_pid(vcpu); /* * Prior memory accesses to host PID Q3 must be completed before we @@ -330,7 +330,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 int i; lpid = kvm->arch.lpid; - pid = vcpu->arch.pid; + pid = kvmppc_get_pid(vcpu); /* * See switch_mmu_to_guest_radix. ptesync should not be required here diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index 82be6d8751..9012acadbc 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -174,14 +174,14 @@ long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu) ppc_md.hmi_exception_early(NULL); out: - if (vc->tb_offset) { + if (kvmppc_get_tb_offset(vcpu)) { u64 new_tb = mftb() + vc->tb_offset; mtspr(SPRN_TBU40, new_tb); if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) { new_tb += 0x1000000; mtspr(SPRN_TBU40, new_tb); } - vc->tb_offset_applied = vc->tb_offset; + vc->tb_offset_applied = kvmppc_get_tb_offset(vcpu); } return ret; diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9182324dbe..17cb75a127 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -776,8 +776,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); r &= ~HPTE_GR_RESERVED; } - vcpu->arch.regs.gpr[4 + i * 2] = v; - vcpu->arch.regs.gpr[5 + i * 2] = r; + kvmppc_set_gpr(vcpu, 4 + i * 2, v); + kvmppc_set_gpr(vcpu, 5 + i * 2, r); } return H_SUCCESS; } @@ -824,7 +824,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, } } } - vcpu->arch.regs.gpr[4] = gr; + kvmppc_set_gpr(vcpu, 4, gr); ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); @@ -872,7 +872,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, kvmppc_set_dirty_from_hpte(kvm, v, gr); } } - vcpu->arch.regs.gpr[4] = gr; + kvmppc_set_gpr(vcpu, 4, gr); ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index e165bfa842..e429848785 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -481,7 +481,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu) { - vcpu->arch.regs.gpr[5] = get_tb(); + kvmppc_set_gpr(vcpu, 5, get_tb()); return xics_rm_h_xirr(vcpu); } @@ -518,7 +518,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) } while (!icp_rm_try_update(icp, old_state, new_state)); /* Return the result in GPR4 */ - vcpu->arch.regs.gpr[4] = xirr; + kvmppc_set_gpr(vcpu, 4, xirr); return check_too_hard(xics, icp); } diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index e2d6f9327f..92f3311514 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -858,7 +858,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) } kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; - pr_info("LPID %d went secure\n", kvm->arch.lpid); + pr_info("LPID %lld went secure\n", kvm->arch.lpid); out: srcu_read_unlock(&kvm->srcu, srcu_idx); diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index f4115819e7..29a3822497 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -328,7 +328,7 @@ static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu) */ /* Return interrupt and old CPPR in GPR4 */ - vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); + kvmppc_set_gpr(vcpu, 4, hirq | (old_cppr << 24)); return H_SUCCESS; } @@ -364,7 +364,7 @@ static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server hirq = xive_vm_scan_interrupts(xc, pending, scan_poll); /* Return interrupt and old CPPR in GPR4 */ - vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); + kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24)); return H_SUCCESS; } @@ -884,10 +884,10 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, } if (single_escalation) - name = kasprintf(GFP_KERNEL, "kvm-%d-%d", + name = kasprintf(GFP_KERNEL, "kvm-%lld-%d", vcpu->kvm->arch.lpid, xc->server_num); else - name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", + name = kasprintf(GFP_KERNEL, "kvm-%lld-%d-%d", vcpu->kvm->arch.lpid, xc->server_num, prio); if (!name) { pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n", @@ -2779,8 +2779,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req) { - struct kvmppc_vcore *vc = vcpu->arch.vcore; - /* The VM should have configured XICS mode before doing XICS hcalls. */ if (!kvmppc_xics_enabled(vcpu)) return H_TOO_HARD; @@ -2799,7 +2797,7 @@ int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req) return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); case H_XIRR_X: xive_vm_h_xirr(vcpu); - kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset); + kvmppc_set_gpr(vcpu, 5, get_tb() + kvmppc_get_tb_offset(vcpu)); return H_SUCCESS; } diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 712ab91ced..6e2ebbd8aa 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -567,7 +567,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, u8 priority; struct kvm_ppc_xive_eq kvm_eq; int rc; - __be32 *qaddr = 0; + __be32 *qaddr = NULL; struct page *page; struct xive_q *q; gfn_t gfn; diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 059c08ae03..077fd88a0b 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -92,7 +92,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_host_swabbed = 0; emulated = EMULATE_FAIL; - vcpu->arch.regs.msr = vcpu->arch.shared->msr; + vcpu->arch.regs.msr = kvmppc_get_msr(vcpu); + kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs); if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); @@ -250,7 +251,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_sp64_extend = 1; emulated = kvmppc_handle_store(vcpu, - VCPU_FPR(vcpu, op.reg), size, 1); + kvmppc_get_fpr(vcpu, op.reg), size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) kvmppc_set_gpr(vcpu, op.update_reg, op.ea); @@ -357,6 +358,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) } trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated); + kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs); /* Advance past emulated instruction. */ if (emulated != EMULATE_FAIL) diff --git a/arch/powerpc/kvm/guest-state-buffer.c b/arch/powerpc/kvm/guest-state-buffer.c new file mode 100644 index 0000000000..b80dbc5862 --- /dev/null +++ b/arch/powerpc/kvm/guest-state-buffer.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "asm/hvcall.h" +#include <linux/log2.h> +#include <asm/pgalloc.h> +#include <asm/guest-state-buffer.h> + +static const u16 kvmppc_gse_iden_len[__KVMPPC_GSE_TYPE_MAX] = { + [KVMPPC_GSE_BE32] = sizeof(__be32), + [KVMPPC_GSE_BE64] = sizeof(__be64), + [KVMPPC_GSE_VEC128] = sizeof(vector128), + [KVMPPC_GSE_PARTITION_TABLE] = sizeof(struct kvmppc_gs_part_table), + [KVMPPC_GSE_PROCESS_TABLE] = sizeof(struct kvmppc_gs_proc_table), + [KVMPPC_GSE_BUFFER] = sizeof(struct kvmppc_gs_buff_info), +}; + +/** + * kvmppc_gsb_new() - create a new guest state buffer + * @size: total size of the guest state buffer (includes header) + * @guest_id: guest_id + * @vcpu_id: vcpu_id + * @flags: GFP flags + * + * Returns a guest state buffer. + */ +struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id, + unsigned long vcpu_id, gfp_t flags) +{ + struct kvmppc_gs_buff *gsb; + + gsb = kzalloc(sizeof(*gsb), flags); + if (!gsb) + return NULL; + + size = roundup_pow_of_two(size); + gsb->hdr = kzalloc(size, GFP_KERNEL); + if (!gsb->hdr) + goto free; + + gsb->capacity = size; + gsb->len = sizeof(struct kvmppc_gs_header); + gsb->vcpu_id = vcpu_id; + gsb->guest_id = guest_id; + + gsb->hdr->nelems = cpu_to_be32(0); + + return gsb; + +free: + kfree(gsb); + return NULL; +} +EXPORT_SYMBOL_GPL(kvmppc_gsb_new); + +/** + * kvmppc_gsb_free() - free a guest state buffer + * @gsb: guest state buffer + */ +void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb) +{ + kfree(gsb->hdr); + kfree(gsb); +} +EXPORT_SYMBOL_GPL(kvmppc_gsb_free); + +/** + * kvmppc_gsb_put() - allocate space in a guest state buffer + * @gsb: buffer to allocate in + * @size: amount of space to allocate + * + * Returns a pointer to the amount of space requested within the buffer and + * increments the count of elements in the buffer. + * + * Does not check if there is enough space in the buffer. + */ +void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size) +{ + u32 nelems = kvmppc_gsb_nelems(gsb); + void *p; + + p = (void *)kvmppc_gsb_header(gsb) + kvmppc_gsb_len(gsb); + gsb->len += size; + + kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(nelems + 1); + return p; +} +EXPORT_SYMBOL_GPL(kvmppc_gsb_put); + +static int kvmppc_gsid_class(u16 iden) +{ + if ((iden >= KVMPPC_GSE_GUESTWIDE_START) && + (iden <= KVMPPC_GSE_GUESTWIDE_END)) + return KVMPPC_GS_CLASS_GUESTWIDE; + + if ((iden >= KVMPPC_GSE_META_START) && (iden <= KVMPPC_GSE_META_END)) + return KVMPPC_GS_CLASS_META; + + if ((iden >= KVMPPC_GSE_DW_REGS_START) && + (iden <= KVMPPC_GSE_DW_REGS_END)) + return KVMPPC_GS_CLASS_DWORD_REG; + + if ((iden >= KVMPPC_GSE_W_REGS_START) && + (iden <= KVMPPC_GSE_W_REGS_END)) + return KVMPPC_GS_CLASS_WORD_REG; + + if ((iden >= KVMPPC_GSE_VSRS_START) && (iden <= KVMPPC_GSE_VSRS_END)) + return KVMPPC_GS_CLASS_VECTOR; + + if ((iden >= KVMPPC_GSE_INTR_REGS_START) && + (iden <= KVMPPC_GSE_INTR_REGS_END)) + return KVMPPC_GS_CLASS_INTR; + + return -1; +} + +static int kvmppc_gsid_type(u16 iden) +{ + int type = -1; + + switch (kvmppc_gsid_class(iden)) { + case KVMPPC_GS_CLASS_GUESTWIDE: + switch (iden) { + case KVMPPC_GSID_HOST_STATE_SIZE: + case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE: + case KVMPPC_GSID_TB_OFFSET: + type = KVMPPC_GSE_BE64; + break; + case KVMPPC_GSID_PARTITION_TABLE: + type = KVMPPC_GSE_PARTITION_TABLE; + break; + case KVMPPC_GSID_PROCESS_TABLE: + type = KVMPPC_GSE_PROCESS_TABLE; + break; + case KVMPPC_GSID_LOGICAL_PVR: + type = KVMPPC_GSE_BE32; + break; + } + break; + case KVMPPC_GS_CLASS_META: + switch (iden) { + case KVMPPC_GSID_RUN_INPUT: + case KVMPPC_GSID_RUN_OUTPUT: + type = KVMPPC_GSE_BUFFER; + break; + case KVMPPC_GSID_VPA: + type = KVMPPC_GSE_BE64; + break; + } + break; + case KVMPPC_GS_CLASS_DWORD_REG: + type = KVMPPC_GSE_BE64; + break; + case KVMPPC_GS_CLASS_WORD_REG: + type = KVMPPC_GSE_BE32; + break; + case KVMPPC_GS_CLASS_VECTOR: + type = KVMPPC_GSE_VEC128; + break; + case KVMPPC_GS_CLASS_INTR: + switch (iden) { + case KVMPPC_GSID_HDAR: + case KVMPPC_GSID_ASDR: + case KVMPPC_GSID_HEIR: + type = KVMPPC_GSE_BE64; + break; + case KVMPPC_GSID_HDSISR: + type = KVMPPC_GSE_BE32; + break; + } + break; + } + + return type; +} + +/** + * kvmppc_gsid_flags() - the flags for a guest state ID + * @iden: guest state ID + * + * Returns any flags for the guest state ID. + */ +unsigned long kvmppc_gsid_flags(u16 iden) +{ + unsigned long flags = 0; + + switch (kvmppc_gsid_class(iden)) { + case KVMPPC_GS_CLASS_GUESTWIDE: + flags = KVMPPC_GS_FLAGS_WIDE; + break; + case KVMPPC_GS_CLASS_META: + case KVMPPC_GS_CLASS_DWORD_REG: + case KVMPPC_GS_CLASS_WORD_REG: + case KVMPPC_GS_CLASS_VECTOR: + case KVMPPC_GS_CLASS_INTR: + break; + } + + return flags; +} +EXPORT_SYMBOL_GPL(kvmppc_gsid_flags); + +/** + * kvmppc_gsid_size() - the size of a guest state ID + * @iden: guest state ID + * + * Returns the size of guest state ID. + */ +u16 kvmppc_gsid_size(u16 iden) +{ + int type; + + type = kvmppc_gsid_type(iden); + if (type == -1) + return 0; + + if (type >= __KVMPPC_GSE_TYPE_MAX) + return 0; + + return kvmppc_gse_iden_len[type]; +} +EXPORT_SYMBOL_GPL(kvmppc_gsid_size); + +/** + * kvmppc_gsid_mask() - the settable bits of a guest state ID + * @iden: guest state ID + * + * Returns a mask of settable bits for a guest state ID. + */ +u64 kvmppc_gsid_mask(u16 iden) +{ + u64 mask = ~0ull; + + switch (iden) { + case KVMPPC_GSID_LPCR: + mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER | + LPCR_GTSE; + break; + case KVMPPC_GSID_MSR: + mask = ~(MSR_HV | MSR_S | MSR_ME); + break; + } + + return mask; +} +EXPORT_SYMBOL_GPL(kvmppc_gsid_mask); + +/** + * __kvmppc_gse_put() - add a guest state element to a buffer + * @gsb: buffer to the element to + * @iden: guest state ID + * @size: length of data + * @data: pointer to data + */ +int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size, + const void *data) +{ + struct kvmppc_gs_elem *gse; + u16 total_size; + + total_size = sizeof(*gse) + size; + if (total_size + kvmppc_gsb_len(gsb) > kvmppc_gsb_capacity(gsb)) + return -ENOMEM; + + if (kvmppc_gsid_size(iden) != size) + return -EINVAL; + + gse = kvmppc_gsb_put(gsb, total_size); + gse->iden = cpu_to_be16(iden); + gse->len = cpu_to_be16(size); + memcpy(gse->data, data, size); + + return 0; +} +EXPORT_SYMBOL_GPL(__kvmppc_gse_put); + +/** + * kvmppc_gse_parse() - create a parse map from a guest state buffer + * @gsp: guest state parser + * @gsb: guest state buffer + */ +int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb) +{ + struct kvmppc_gs_elem *curr; + int rem, i; + + kvmppc_gsb_for_each_elem(i, curr, gsb, rem) { + if (kvmppc_gse_len(curr) != + kvmppc_gsid_size(kvmppc_gse_iden(curr))) + return -EINVAL; + kvmppc_gsp_insert(gsp, kvmppc_gse_iden(curr), curr); + } + + if (kvmppc_gsb_nelems(gsb) != i) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_gse_parse); + +static inline int kvmppc_gse_flatten_iden(u16 iden) +{ + int bit = 0; + int class; + + class = kvmppc_gsid_class(iden); + + if (class == KVMPPC_GS_CLASS_GUESTWIDE) { + bit += iden - KVMPPC_GSE_GUESTWIDE_START; + return bit; + } + + bit += KVMPPC_GSE_GUESTWIDE_COUNT; + + if (class == KVMPPC_GS_CLASS_META) { + bit += iden - KVMPPC_GSE_META_START; + return bit; + } + + bit += KVMPPC_GSE_META_COUNT; + + if (class == KVMPPC_GS_CLASS_DWORD_REG) { + bit += iden - KVMPPC_GSE_DW_REGS_START; + return bit; + } + + bit += KVMPPC_GSE_DW_REGS_COUNT; + + if (class == KVMPPC_GS_CLASS_WORD_REG) { + bit += iden - KVMPPC_GSE_W_REGS_START; + return bit; + } + + bit += KVMPPC_GSE_W_REGS_COUNT; + + if (class == KVMPPC_GS_CLASS_VECTOR) { + bit += iden - KVMPPC_GSE_VSRS_START; + return bit; + } + + bit += KVMPPC_GSE_VSRS_COUNT; + + if (class == KVMPPC_GS_CLASS_INTR) { + bit += iden - KVMPPC_GSE_INTR_REGS_START; + return bit; + } + + return 0; +} + +static inline u16 kvmppc_gse_unflatten_iden(int bit) +{ + u16 iden; + + if (bit < KVMPPC_GSE_GUESTWIDE_COUNT) { + iden = KVMPPC_GSE_GUESTWIDE_START + bit; + return iden; + } + bit -= KVMPPC_GSE_GUESTWIDE_COUNT; + + if (bit < KVMPPC_GSE_META_COUNT) { + iden = KVMPPC_GSE_META_START + bit; + return iden; + } + bit -= KVMPPC_GSE_META_COUNT; + + if (bit < KVMPPC_GSE_DW_REGS_COUNT) { + iden = KVMPPC_GSE_DW_REGS_START + bit; + return iden; + } + bit -= KVMPPC_GSE_DW_REGS_COUNT; + + if (bit < KVMPPC_GSE_W_REGS_COUNT) { + iden = KVMPPC_GSE_W_REGS_START + bit; + return iden; + } + bit -= KVMPPC_GSE_W_REGS_COUNT; + + if (bit < KVMPPC_GSE_VSRS_COUNT) { + iden = KVMPPC_GSE_VSRS_START + bit; + return iden; + } + bit -= KVMPPC_GSE_VSRS_COUNT; + + if (bit < KVMPPC_GSE_IDEN_COUNT) { + iden = KVMPPC_GSE_INTR_REGS_START + bit; + return iden; + } + + return 0; +} + +/** + * kvmppc_gsp_insert() - add a mapping from an guest state ID to an element + * @gsp: guest state parser + * @iden: guest state id (key) + * @gse: guest state element (value) + */ +void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden, + struct kvmppc_gs_elem *gse) +{ + int i; + + i = kvmppc_gse_flatten_iden(iden); + kvmppc_gsbm_set(&gsp->iterator, iden); + gsp->gses[i] = gse; +} +EXPORT_SYMBOL_GPL(kvmppc_gsp_insert); + +/** + * kvmppc_gsp_lookup() - lookup an element from a guest state ID + * @gsp: guest state parser + * @iden: guest state ID (key) + * + * Returns the guest state element if present. + */ +struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp, u16 iden) +{ + int i; + + i = kvmppc_gse_flatten_iden(iden); + return gsp->gses[i]; +} +EXPORT_SYMBOL_GPL(kvmppc_gsp_lookup); + +/** + * kvmppc_gsbm_set() - set the guest state ID + * @gsbm: guest state bitmap + * @iden: guest state ID + */ +void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden) +{ + set_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap); +} +EXPORT_SYMBOL_GPL(kvmppc_gsbm_set); + +/** + * kvmppc_gsbm_clear() - clear the guest state ID + * @gsbm: guest state bitmap + * @iden: guest state ID + */ +void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden) +{ + clear_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap); +} +EXPORT_SYMBOL_GPL(kvmppc_gsbm_clear); + +/** + * kvmppc_gsbm_test() - test the guest state ID + * @gsbm: guest state bitmap + * @iden: guest state ID + */ +bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden) +{ + return test_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap); +} +EXPORT_SYMBOL_GPL(kvmppc_gsbm_test); + +/** + * kvmppc_gsbm_next() - return the next set guest state ID + * @gsbm: guest state bitmap + * @prev: last guest state ID + */ +u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev) +{ + int bit, pbit; + + pbit = prev ? kvmppc_gse_flatten_iden(prev) + 1 : 0; + bit = find_next_bit(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT, pbit); + + if (bit < KVMPPC_GSE_IDEN_COUNT) + return kvmppc_gse_unflatten_iden(bit); + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_gsbm_next); + +/** + * kvmppc_gsm_init() - initialize a guest state message + * @gsm: guest state message + * @ops: callbacks + * @data: private data + * @flags: guest wide or thread wide + */ +int kvmppc_gsm_init(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_msg_ops *ops, + void *data, unsigned long flags) +{ + memset(gsm, 0, sizeof(*gsm)); + gsm->ops = ops; + gsm->data = data; + gsm->flags = flags; + + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_gsm_init); + +/** + * kvmppc_gsm_new() - creates a new guest state message + * @ops: callbacks + * @data: private data + * @flags: guest wide or thread wide + * @gfp_flags: GFP allocation flags + * + * Returns an initialized guest state message. + */ +struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data, + unsigned long flags, gfp_t gfp_flags) +{ + struct kvmppc_gs_msg *gsm; + + gsm = kzalloc(sizeof(*gsm), gfp_flags); + if (!gsm) + return NULL; + + kvmppc_gsm_init(gsm, ops, data, flags); + + return gsm; +} +EXPORT_SYMBOL_GPL(kvmppc_gsm_new); + +/** + * kvmppc_gsm_size() - creates a new guest state message + * @gsm: self + * + * Returns the size required for the message. + */ +size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm) +{ + if (gsm->ops->get_size) + return gsm->ops->get_size(gsm); + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_gsm_size); + +/** + * kvmppc_gsm_free() - free guest state message + * @gsm: guest state message + * + * Returns the size required for the message. + */ +void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm) +{ + kfree(gsm); +} +EXPORT_SYMBOL_GPL(kvmppc_gsm_free); + +/** + * kvmppc_gsm_fill_info() - serialises message to guest state buffer format + * @gsm: self + * @gsb: buffer to serialise into + */ +int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb) +{ + if (!gsm->ops->fill_info) + return -EINVAL; + + return gsm->ops->fill_info(gsb, gsm); +} +EXPORT_SYMBOL_GPL(kvmppc_gsm_fill_info); + +/** + * kvmppc_gsm_refresh_info() - deserialises from guest state buffer + * @gsm: self + * @gsb: buffer to serialise from + */ +int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + if (!gsm->ops->fill_info) + return -EINVAL; + + return gsm->ops->refresh_info(gsm, gsb); +} +EXPORT_SYMBOL_GPL(kvmppc_gsm_refresh_info); + +/** + * kvmppc_gsb_send - send all elements in the buffer to the hypervisor. + * @gsb: guest state buffer + * @flags: guest wide or thread wide + * + * Performs the H_GUEST_SET_STATE hcall for the guest state buffer. + */ +int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags) +{ + unsigned long hflags = 0; + unsigned long i; + int rc; + + if (kvmppc_gsb_nelems(gsb) == 0) + return 0; + + if (flags & KVMPPC_GS_FLAGS_WIDE) + hflags |= H_GUEST_FLAGS_WIDE; + + rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id, + __pa(gsb->hdr), gsb->capacity, &i); + return rc; +} +EXPORT_SYMBOL_GPL(kvmppc_gsb_send); + +/** + * kvmppc_gsb_recv - request all elements in the buffer have their value + * updated. + * @gsb: guest state buffer + * @flags: guest wide or thread wide + * + * Performs the H_GUEST_GET_STATE hcall for the guest state buffer. + * After returning from the hcall the guest state elements that were + * present in the buffer will have updated values from the hypervisor. + */ +int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags) +{ + unsigned long hflags = 0; + unsigned long i; + int rc; + + if (flags & KVMPPC_GS_FLAGS_WIDE) + hflags |= H_GUEST_FLAGS_WIDE; + + rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id, + __pa(gsb->hdr), gsb->capacity, &i); + return rc; +} +EXPORT_SYMBOL_GPL(kvmppc_gsb_recv); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 7197c82566..f6af752698 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, return; if (index >= 32) { - val.vval = VCPU_VSX_VR(vcpu, index - 32); + kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); val.vsxval[offset] = gpr; - VCPU_VSX_VR(vcpu, index - 32) = val.vval; + kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); } else { - VCPU_VSX_FPR(vcpu, index, offset) = gpr; + kvmppc_set_vsx_fpr(vcpu, index, offset, gpr); } } @@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; if (index >= 32) { - val.vval = VCPU_VSX_VR(vcpu, index - 32); + kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); val.vsxval[0] = gpr; val.vsxval[1] = gpr; - VCPU_VSX_VR(vcpu, index - 32) = val.vval; + kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); } else { - VCPU_VSX_FPR(vcpu, index, 0) = gpr; - VCPU_VSX_FPR(vcpu, index, 1) = gpr; + kvmppc_set_vsx_fpr(vcpu, index, 0, gpr); + kvmppc_set_vsx_fpr(vcpu, index, 1, gpr); } } @@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, val.vsx32val[1] = gpr; val.vsx32val[2] = gpr; val.vsx32val[3] = gpr; - VCPU_VSX_VR(vcpu, index - 32) = val.vval; + kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); } else { val.vsx32val[0] = gpr; val.vsx32val[1] = gpr; - VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; - VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; + kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]); + kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]); } } @@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, return; if (index >= 32) { - val.vval = VCPU_VSX_VR(vcpu, index - 32); + kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); val.vsx32val[offset] = gpr32; - VCPU_VSX_VR(vcpu, index - 32) = val.vval; + kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); } else { dword_offset = offset / 2; word_offset = offset % 2; - val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); + val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset); val.vsx32val[word_offset] = gpr32; - VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; + kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]); } } #endif /* CONFIG_VSX */ @@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, if (offset == -1) return; - val.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, &val.vval); val.vsxval[offset] = gpr; - VCPU_VSX_VR(vcpu, index) = val.vval; + kvmppc_set_vsx_vr(vcpu, index, &val.vval); } static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, @@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, if (offset == -1) return; - val.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, &val.vval); val.vsx32val[offset] = gpr32; - VCPU_VSX_VR(vcpu, index) = val.vval; + kvmppc_set_vsx_vr(vcpu, index, &val.vval); } static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, @@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, if (offset == -1) return; - val.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, &val.vval); val.vsx16val[offset] = gpr16; - VCPU_VSX_VR(vcpu, index) = val.vval; + kvmppc_set_vsx_vr(vcpu, index, &val.vval); } static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, @@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, if (offset == -1) return; - val.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, &val.vval); val.vsx8val[offset] = gpr8; - VCPU_VSX_VR(vcpu, index) = val.vval; + kvmppc_set_vsx_vr(vcpu, index, &val.vval); } #endif /* CONFIG_ALTIVEC */ @@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); - VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; + kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: - VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; + kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif @@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) } if (rs < 32) { - *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); + *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset); } else { - reg.vval = VCPU_VSX_VR(vcpu, rs - 32); + kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); *val = reg.vsxval[vsx_offset]; } break; @@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) if (rs < 32) { dword_offset = vsx_offset / 2; word_offset = vsx_offset % 2; - reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); + reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset); *val = reg.vsx32val[word_offset]; } else { - reg.vval = VCPU_VSX_VR(vcpu, rs - 32); + kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); *val = reg.vsx32val[vsx_offset]; } break; @@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) if (vmx_offset == -1) return -1; - reg.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, ®.vval); *val = reg.vsxval[vmx_offset]; return result; @@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) if (vmx_offset == -1) return -1; - reg.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, ®.vval); *val = reg.vsx32val[vmx_offset]; return result; @@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) if (vmx_offset == -1) return -1; - reg.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, ®.vval); *val = reg.vsx16val[vmx_offset]; return result; @@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) if (vmx_offset == -1) return -1; - reg.vval = VCPU_VSX_VR(vcpu, index); + kvmppc_get_vsx_vr(vcpu, index, ®.vval); *val = reg.vsx8val[vmx_offset]; return result; @@ -1719,17 +1719,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = -ENXIO; break; } - val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; + kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } - val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); + val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu)); break; case KVM_REG_PPC_VRSAVE: - val = get_reg_val(reg->id, vcpu->arch.vrsave); + val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu)); break; #endif /* CONFIG_ALTIVEC */ default: @@ -1770,21 +1770,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = -ENXIO; break; } - vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; + kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } - vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); + kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val)); break; case KVM_REG_PPC_VRSAVE: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } - vcpu->arch.vrsave = set_reg_val(reg->id, val); + kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val)); break; #endif /* CONFIG_ALTIVEC */ default: diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c new file mode 100644 index 0000000000..4720b8dc88 --- /dev/null +++ b/arch/powerpc/kvm/test-guest-state-buffer.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/init.h> +#include <linux/log2.h> +#include <kunit/test.h> + +#include <asm/guest-state-buffer.h> + +static void test_creating_buffer(struct kunit *test) +{ + struct kvmppc_gs_buff *gsb; + size_t size = 0x100; + + gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb->hdr); + + KUNIT_EXPECT_EQ(test, gsb->capacity, roundup_pow_of_two(size)); + KUNIT_EXPECT_EQ(test, gsb->len, sizeof(__be32)); + + kvmppc_gsb_free(gsb); +} + +static void test_adding_element(struct kunit *test) +{ + const struct kvmppc_gs_elem *head, *curr; + union { + __vector128 v; + u64 dw[2]; + } u; + int rem; + struct kvmppc_gs_buff *gsb; + size_t size = 0x1000; + int i, rc; + u64 data; + + gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + /* Single elements, direct use of __kvmppc_gse_put() */ + data = 0xdeadbeef; + rc = __kvmppc_gse_put(gsb, KVMPPC_GSID_GPR(0), 8, &data); + KUNIT_EXPECT_GE(test, rc, 0); + + head = kvmppc_gsb_data(gsb); + KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(head), KVMPPC_GSID_GPR(0)); + KUNIT_EXPECT_EQ(test, kvmppc_gse_len(head), 8); + data = 0; + memcpy(&data, kvmppc_gse_data(head), 8); + KUNIT_EXPECT_EQ(test, data, 0xdeadbeef); + + /* Multiple elements, simple wrapper */ + rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(1), 0xcafef00d); + KUNIT_EXPECT_GE(test, rc, 0); + + u.dw[0] = 0x1; + u.dw[1] = 0x2; + rc = kvmppc_gse_put_vector128(gsb, KVMPPC_GSID_VSRS(0), &u.v); + KUNIT_EXPECT_GE(test, rc, 0); + u.dw[0] = 0x0; + u.dw[1] = 0x0; + + kvmppc_gsb_for_each_elem(i, curr, gsb, rem) { + switch (i) { + case 0: + KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr), + KVMPPC_GSID_GPR(0)); + KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8); + KUNIT_EXPECT_EQ(test, kvmppc_gse_get_be64(curr), + 0xdeadbeef); + break; + case 1: + KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr), + KVMPPC_GSID_GPR(1)); + KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8); + KUNIT_EXPECT_EQ(test, kvmppc_gse_get_u64(curr), + 0xcafef00d); + break; + case 2: + KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr), + KVMPPC_GSID_VSRS(0)); + KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 16); + kvmppc_gse_get_vector128(curr, &u.v); + KUNIT_EXPECT_EQ(test, u.dw[0], 0x1); + KUNIT_EXPECT_EQ(test, u.dw[1], 0x2); + break; + } + } + KUNIT_EXPECT_EQ(test, i, 3); + + kvmppc_gsb_reset(gsb); + KUNIT_EXPECT_EQ(test, kvmppc_gsb_nelems(gsb), 0); + KUNIT_EXPECT_EQ(test, kvmppc_gsb_len(gsb), + sizeof(struct kvmppc_gs_header)); + + kvmppc_gsb_free(gsb); +} + +static void test_gs_parsing(struct kunit *test) +{ + struct kvmppc_gs_elem *gse; + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmppc_gs_buff *gsb; + size_t size = 0x1000; + u64 tmp1, tmp2; + + gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + tmp1 = 0xdeadbeefull; + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), tmp1); + + KUNIT_EXPECT_GE(test, kvmppc_gse_parse(&gsp, gsb), 0); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gse); + + tmp2 = kvmppc_gse_get_u64(gse); + KUNIT_EXPECT_EQ(test, tmp2, 0xdeadbeefull); + + kvmppc_gsb_free(gsb); +} + +static void test_gs_bitmap(struct kunit *test) +{ + struct kvmppc_gs_bitmap gsbm = { 0 }; + struct kvmppc_gs_bitmap gsbm1 = { 0 }; + struct kvmppc_gs_bitmap gsbm2 = { 0 }; + u16 iden; + int i, j; + + i = 0; + for (u16 iden = KVMPPC_GSID_HOST_STATE_SIZE; + iden <= KVMPPC_GSID_PROCESS_TABLE; iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + + for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA; + iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + + for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + + for (u16 iden = KVMPPC_GSID_CR; iden <= KVMPPC_GSID_PSPB; iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + + for (u16 iden = KVMPPC_GSID_VSRS(0); iden <= KVMPPC_GSID_VSRS(63); + iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + + for (u16 iden = KVMPPC_GSID_HDAR; iden <= KVMPPC_GSID_ASDR; iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + + j = 0; + kvmppc_gsbm_for_each(&gsbm1, iden) + { + kvmppc_gsbm_set(&gsbm2, iden); + j++; + } + KUNIT_EXPECT_EQ(test, i, j); + KUNIT_EXPECT_MEMEQ(test, &gsbm1, &gsbm2, sizeof(gsbm1)); +} + +struct kvmppc_gs_msg_test1_data { + u64 a; + u32 b; + struct kvmppc_gs_part_table c; + struct kvmppc_gs_proc_table d; + struct kvmppc_gs_buff_info e; +}; + +static size_t test1_get_size(struct kvmppc_gs_msg *gsm) +{ + size_t size = 0; + u16 ids[] = { + KVMPPC_GSID_PARTITION_TABLE, + KVMPPC_GSID_PROCESS_TABLE, + KVMPPC_GSID_RUN_INPUT, + KVMPPC_GSID_GPR(0), + KVMPPC_GSID_CR, + }; + + for (int i = 0; i < ARRAY_SIZE(ids); i++) + size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); + return size; +} + +static int test1_fill_info(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + struct kvmppc_gs_msg_test1_data *data = gsm->data; + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_GPR(0))) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), data->a); + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_CR)) + kvmppc_gse_put_u32(gsb, KVMPPC_GSID_CR, data->b); + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PARTITION_TABLE)) + kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, + data->c); + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PROCESS_TABLE)) + kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PARTITION_TABLE, + data->d); + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) + kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, data->e); + + return 0; +} + +static int test1_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmppc_gs_msg_test1_data *data = gsm->data; + struct kvmppc_gs_elem *gse; + int rc; + + rc = kvmppc_gse_parse(&gsp, gsb); + if (rc < 0) + return rc; + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0)); + if (gse) + data->a = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_CR); + if (gse) + data->b = kvmppc_gse_get_u32(gse); + + return 0; +} + +static struct kvmppc_gs_msg_ops gs_msg_test1_ops = { + .get_size = test1_get_size, + .fill_info = test1_fill_info, + .refresh_info = test1_refresh_info, +}; + +static void test_gs_msg(struct kunit *test) +{ + struct kvmppc_gs_msg_test1_data test1_data = { + .a = 0xdeadbeef, + .b = 0x1, + }; + struct kvmppc_gs_msg *gsm; + struct kvmppc_gs_buff *gsb; + + gsm = kvmppc_gsm_new(&gs_msg_test1_ops, &test1_data, GSM_SEND, + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm); + + gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_PARTITION_TABLE); + kvmppc_gsm_include(gsm, KVMPPC_GSID_PROCESS_TABLE); + kvmppc_gsm_include(gsm, KVMPPC_GSID_RUN_INPUT); + kvmppc_gsm_include(gsm, KVMPPC_GSID_GPR(0)); + kvmppc_gsm_include(gsm, KVMPPC_GSID_CR); + + kvmppc_gsm_fill_info(gsm, gsb); + + memset(&test1_data, 0, sizeof(test1_data)); + + kvmppc_gsm_refresh_info(gsm, gsb); + KUNIT_EXPECT_EQ(test, test1_data.a, 0xdeadbeef); + KUNIT_EXPECT_EQ(test, test1_data.b, 0x1); + + kvmppc_gsm_free(gsm); +} + +static struct kunit_case guest_state_buffer_testcases[] = { + KUNIT_CASE(test_creating_buffer), + KUNIT_CASE(test_adding_element), + KUNIT_CASE(test_gs_bitmap), + KUNIT_CASE(test_gs_parsing), + KUNIT_CASE(test_gs_msg), + {} +}; + +static struct kunit_suite guest_state_buffer_test_suite = { + .name = "guest_state_buffer_test", + .test_cases = guest_state_buffer_testcases, +}; + +kunit_test_suites(&guest_state_buffer_test_suite); + +MODULE_LICENSE("GPL"); |