aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--virt/kvm/pfncache.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 08f97cf972643..346e47f155724 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -81,6 +81,9 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{
struct kvm_memslots *slots = kvm_memslots(kvm);
+ if (!gpc->active)
+ return false;
+
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
return false;
@@ -240,10 +243,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{
struct kvm_memslots *slots = kvm_memslots(kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
- kvm_pfn_t old_pfn, new_pfn;
+ bool unmap_old = false;
unsigned long old_uhva;
+ kvm_pfn_t old_pfn;
void *old_khva;
- int ret = 0;
+ int ret;
/*
* If must fit within a single page. The 'len' argument is
@@ -261,6 +265,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
write_lock_irq(&gpc->lock);
+ if (!gpc->active) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
old_pfn = gpc->pfn;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
@@ -291,6 +300,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
/* If the HVA→PFN mapping was already valid, don't unmap it. */
old_pfn = KVM_PFN_ERR_FAULT;
old_khva = NULL;
+ ret = 0;
}
out:
@@ -305,14 +315,15 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->khva = NULL;
}
- /* Snapshot the new pfn before dropping the lock! */
- new_pfn = gpc->pfn;
+ /* Detect a pfn change before dropping the lock! */
+ unmap_old = (old_pfn != gpc->pfn);
+out_unlock:
write_unlock_irq(&gpc->lock);
mutex_unlock(&gpc->refresh_lock);
- if (old_pfn != new_pfn)
+ if (unmap_old)
gpc_unmap_khva(kvm, old_pfn, old_khva);
return ret;
@@ -366,11 +377,19 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->vcpu = vcpu;
gpc->usage = usage;
gpc->valid = false;
- gpc->active = true;
spin_lock(&kvm->gpc_lock);
list_add(&gpc->list, &kvm->gpc_list);
spin_unlock(&kvm->gpc_lock);
+
+ /*
+ * Activate the cache after adding it to the list, a concurrent
+ * refresh must not establish a mapping until the cache is
+ * reachable by mmu_notifier events.
+ */
+ write_lock_irq(&gpc->lock);
+ gpc->active = true;
+ write_unlock_irq(&gpc->lock);
}
return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
}
@@ -379,12 +398,20 @@ EXPORT_SYMBOL_GPL(kvm_gpc_activate);
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
if (gpc->active) {
+ /*
+ * Deactivate the cache before removing it from the list, KVM
+ * must stall mmu_notifier events until all users go away, i.e.
+ * until gpc->lock is dropped and refresh is guaranteed to fail.
+ */
+ write_lock_irq(&gpc->lock);
+ gpc->active = false;
+ write_unlock_irq(&gpc->lock);
+
spin_lock(&kvm->gpc_lock);
list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock);
kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
- gpc->active = false;
}
}
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);