aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-05-19 16:29:22 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2015-05-20 12:32:07 +0200
commit02f4051c1d58d6ee085c68967eb79db5c10558a8 (patch)
tree93e8b38f98383b10672b563e9aec7cb2278e859c
parent2c851bdfbfdf4a22f5899f3ef84cbe5780a8b464 (diff)
downloadkvm-02f4051c1d58d6ee085c68967eb79db5c10558a8.tar.gz
KVM: x86: pass struct kvm_mmu_page to account/unaccount_shadowed
Prepare for multiple address spaces this way, since a VCPU is not available where unaccount_shadowed is called. We will get to the right kvm_memslots struct through the role field in struct kvm_mmu_page. Reviewed-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ceed1c591bc555..6ea24812007a8d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -804,12 +804,14 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
return &slot->arch.lpage_info[level - 2][idx];
}
-static void account_shadowed(struct kvm *kvm, gfn_t gfn)
+static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
+ gfn_t gfn;
int i;
+ gfn = sp->gfn;
slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
@@ -818,12 +820,14 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
kvm->arch.indirect_shadow_pages++;
}
-static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
+static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
+ gfn_t gfn;
int i;
+ gfn = sp->gfn;
slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
@@ -2131,7 +2135,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
kvm_sync_pages(vcpu, gfn);
- account_shadowed(vcpu->kvm, gfn);
+ account_shadowed(vcpu->kvm, sp);
}
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
init_shadow_page_table(sp);
@@ -2312,7 +2316,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_mmu_unlink_parents(kvm, sp);
if (!sp->role.invalid && !sp->role.direct)
- unaccount_shadowed(kvm, sp->gfn);
+ unaccount_shadowed(kvm, sp);
if (sp->unsync)
kvm_unlink_unsync_page(kvm, sp);