summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu/mmu.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /arch/x86/kvm/mmu/mmu.c
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r--arch/x86/kvm/mmu/mmu.c55
1 files changed, 36 insertions, 19 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f7901cb4d2..c57e181bba 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3425,8 +3425,8 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
struct kvm_mmu_page *sp;
int ret = RET_PF_INVALID;
- u64 spte = 0ull;
- u64 *sptep = NULL;
+ u64 spte;
+ u64 *sptep;
uint retry_count = 0;
if (!page_fault_can_be_fast(fault))
@@ -3442,6 +3442,14 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
else
sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
+ /*
+ * It's entirely possible for the mapping to have been zapped
+ * by a different task, but the root page should always be
+ * available as the vCPU holds a reference to its root(s).
+ */
+ if (WARN_ON_ONCE(!sptep))
+ spte = REMOVED_SPTE;
+
if (!is_shadow_present_pte(spte))
break;
@@ -4479,21 +4487,28 @@ out_unlock:
}
#endif
-int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma)
{
/*
- * If the guest's MTRRs may be used to compute the "real" memtype,
- * restrict the mapping level to ensure KVM uses a consistent memtype
- * across the entire mapping. If the host MTRRs are ignored by TDP
- * (shadow_memtype_mask is non-zero), and the VM has non-coherent DMA
- * (DMA doesn't snoop CPU caches), KVM's ABI is to honor the memtype
- * from the guest's MTRRs so that guest accesses to memory that is
- * DMA'd aren't cached against the guest's wishes.
+ * If host MTRRs are ignored (shadow_memtype_mask is non-zero), and the
+ * VM has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is
+ * to honor the memtype from the guest's MTRRs so that guest accesses
+ * to memory that is DMA'd aren't cached against the guest's wishes.
*
* Note, KVM may still ultimately ignore guest MTRRs for certain PFNs,
* e.g. KVM will force UC memtype for host MMIO.
*/
- if (shadow_memtype_mask && kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+ return vm_has_noncoherent_dma && shadow_memtype_mask;
+}
+
+int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+{
+ /*
+ * If the guest's MTRRs may be used to compute the "real" memtype,
+ * restrict the mapping level to ensure KVM uses a consistent memtype
+ * across the entire mapping.
+ */
+ if (kvm_mmu_honors_guest_mtrrs(vcpu->kvm)) {
for ( ; fault->max_level > PG_LEVEL_4K; --fault->max_level) {
int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
gfn_t base = gfn_round_for_level(fault->gfn,
@@ -6785,11 +6800,7 @@ static unsigned long mmu_shrink_count(struct shrinker *shrink,
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}
-static struct shrinker mmu_shrinker = {
- .count_objects = mmu_shrink_count,
- .scan_objects = mmu_shrink_scan,
- .seeks = DEFAULT_SEEKS * 10,
-};
+static struct shrinker *mmu_shrinker;
static void mmu_destroy_caches(void)
{
@@ -6922,10 +6933,16 @@ int kvm_mmu_vendor_module_init(void)
if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
goto out;
- ret = register_shrinker(&mmu_shrinker, "x86-mmu");
- if (ret)
+ mmu_shrinker = shrinker_alloc(0, "x86-mmu");
+ if (!mmu_shrinker)
goto out_shrinker;
+ mmu_shrinker->count_objects = mmu_shrink_count;
+ mmu_shrinker->scan_objects = mmu_shrink_scan;
+ mmu_shrinker->seeks = DEFAULT_SEEKS * 10;
+
+ shrinker_register(mmu_shrinker);
+
return 0;
out_shrinker:
@@ -6947,7 +6964,7 @@ void kvm_mmu_vendor_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);
- unregister_shrinker(&mmu_shrinker);
+ shrinker_free(mmu_shrinker);
}
/*