summaryrefslogtreecommitdiffstats
path: root/virt/kvm/async_pf.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/async_pf.c')
-rw-r--r--virt/kvm/async_pf.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index dd777688d..952afb1bc 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -88,7 +88,27 @@ static void async_pf_execute(struct work_struct *work)
rcuwait_wake_up(&vcpu->wait);
mmput(mm);
- kvm_put_kvm(vcpu->kvm);
+}
+
+static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
+{
+ /*
+ * The async #PF is "done", but KVM must wait for the work item itself,
+ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
+ * KVM must ensure *no* code owned by the KVM (the module) can be run
+ * after the last call to module_put(). Note, flushing the work item
+ * is always required when the item is taken off the completion queue.
+ * E.g. even if the vCPU handles the item in the "normal" path, the VM
+ * could be terminated before async_pf_execute() completes.
+ *
+ * Wake all events skip the queue and go straight done, i.e. don't
+ * need to be flushed (but sanity check that the work wasn't queued).
+ */
+ if (work->wakeup_all)
+ WARN_ON_ONCE(work->work.func);
+ else
+ flush_work(&work->work);
+ kmem_cache_free(async_pf_cache, work);
}
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
@@ -115,7 +135,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
#else
if (cancel_work_sync(&work->work)) {
mmput(work->mm);
- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
#endif
@@ -127,7 +146,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_first_entry(&vcpu->async_pf.done,
typeof(*work), link);
list_del(&work->link);
- kmem_cache_free(async_pf_cache, work);
+
+ spin_unlock(&vcpu->async_pf.lock);
+ kvm_flush_and_free_async_pf_work(work);
+ spin_lock(&vcpu->async_pf.lock);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -152,7 +174,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->queue);
vcpu->async_pf.queued--;
- kmem_cache_free(async_pf_cache, work);
+ kvm_flush_and_free_async_pf_work(work);
}
}
@@ -187,7 +209,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
work->arch = *arch;
work->mm = current->mm;
mmget(work->mm);
- kvm_get_kvm(work->vcpu->kvm);
INIT_WORK(&work->work, async_pf_execute);