summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:15:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:15:43 +0000
commit4a1c948bc688e5ab5e51d12d2f6dd6746e3ac2d9 (patch)
tree7ee50e6ccf13f75f02fd343cf90750cbd9950194 /virt/kvm
parentReleasing progress-linux version 6.1.82-1~progress6.99u1. (diff)
downloadlinux-4a1c948bc688e5ab5e51d12d2f6dd6746e3ac2d9.tar.xz
linux-4a1c948bc688e5ab5e51d12d2f6dd6746e3ac2d9.zip
Merging upstream version 6.1.85.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/async_pf.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 9bfe1d6f6..adaf6f141 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -88,7 +88,27 @@ static void async_pf_execute(struct work_struct *work)
__kvm_vcpu_wake_up(vcpu);
mmput(mm);
- kvm_put_kvm(vcpu->kvm);
+}
+
+static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
+{
+ /*
+ * The async #PF is "done", but KVM must wait for the work item itself,
+ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
+ * KVM must ensure *no* code owned by the KVM (the module) can be run
+ * after the last call to module_put(). Note, flushing the work item
+ * is always required when the item is taken off the completion queue.
+ * E.g. even if the vCPU handles the item in the "normal" path, the VM
+ * could be terminated before async_pf_execute() completes.
+ *
+ * Wake all events skip the queue and go straight done, i.e. don't
+ * need to be flushed (but sanity check that the work wasn't queued).
+ */
+ if (work->wakeup_all)
+ WARN_ON_ONCE(work->work.func);
+ else
+ flush_work(&work->work);
+ kmem_cache_free(async_pf_cache, work);
}
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
@@ -115,7 +135,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
#else
if (cancel_work_sync(&work->work)) {
mmput(work->mm);
- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
#endif
@@ -127,7 +146,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_first_entry(&vcpu->async_pf.done,
typeof(*work), link);
list_del(&work->link);
- kmem_cache_free(async_pf_cache, work);
+
+ spin_unlock(&vcpu->async_pf.lock);
+ kvm_flush_and_free_async_pf_work(work);
+ spin_lock(&vcpu->async_pf.lock);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -152,7 +174,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->queue);
vcpu->async_pf.queued--;
- kmem_cache_free(async_pf_cache, work);
+ kvm_flush_and_free_async_pf_work(work);
}
}
@@ -187,7 +209,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
work->arch = *arch;
work->mm = current->mm;
mmget(work->mm);
- kvm_get_kvm(work->vcpu->kvm);
INIT_WORK(&work->work, async_pf_execute);