summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 17:45:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 17:45:29 +0000
commit83506c85f8d4332b3edfdc8f1fd07aa691415350 (patch)
tree316b9630e093bb3b80e5d6e1c304151b5597901e /virt
parentAdding upstream version 5.10.209. (diff)
downloadlinux-upstream/5.10.216.tar.xz
linux-upstream/5.10.216.zip
Adding upstream version 5.10.216.upstream/5.10.216
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--virt/kvm/async_pf.c31
-rw-r--r--virt/kvm/kvm_main.c4
2 files changed, 28 insertions, 7 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index dd777688d..952afb1bc 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -88,7 +88,27 @@ static void async_pf_execute(struct work_struct *work)
rcuwait_wake_up(&vcpu->wait);
mmput(mm);
- kvm_put_kvm(vcpu->kvm);
+}
+
+static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
+{
+ /*
+ * The async #PF is "done", but KVM must wait for the work item itself,
+ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
+ * KVM must ensure *no* code owned by the KVM (the module) can be run
+ * after the last call to module_put(). Note, flushing the work item
+ * is always required when the item is taken off the completion queue.
+ * E.g. even if the vCPU handles the item in the "normal" path, the VM
+ * could be terminated before async_pf_execute() completes.
+ *
+ * Wake all events skip the queue and go straight done, i.e. don't
+ * need to be flushed (but sanity check that the work wasn't queued).
+ */
+ if (work->wakeup_all)
+ WARN_ON_ONCE(work->work.func);
+ else
+ flush_work(&work->work);
+ kmem_cache_free(async_pf_cache, work);
}
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
@@ -115,7 +135,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
#else
if (cancel_work_sync(&work->work)) {
mmput(work->mm);
- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
#endif
@@ -127,7 +146,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_first_entry(&vcpu->async_pf.done,
typeof(*work), link);
list_del(&work->link);
- kmem_cache_free(async_pf_cache, work);
+
+ spin_unlock(&vcpu->async_pf.lock);
+ kvm_flush_and_free_async_pf_work(work);
+ spin_lock(&vcpu->async_pf.lock);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -152,7 +174,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->queue);
vcpu->async_pf.queued--;
- kmem_cache_free(async_pf_cache, work);
+ kvm_flush_and_free_async_pf_work(work);
}
}
@@ -187,7 +209,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
work->arch = *arch;
work->mm = current->mm;
mmget(work->mm);
- kvm_get_kvm(work->vcpu->kvm);
INIT_WORK(&work->work, async_pf_execute);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 356fd5d1a..b7638c3c9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1008,9 +1008,9 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
*/
static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
{
- unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
+ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
- memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
+ memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
if (!memslot->dirty_bitmap)
return -ENOMEM;