aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuraj Jitindar Singh <sjitindarsingh@gmail.com>2018-09-05 15:54:24 +1000
committerPaul Mackerras <paulus@ozlabs.org>2018-10-09 12:02:57 +1100
commit2351bc9a5e9ed804d248732327b4c6d1bb02d86f (patch)
tree3a6519a81c527c0db9b995b95f5b1e11884a85b3
parente36381ec81cb094bc730cf86a17a14ae057da8a4 (diff)
downloadpowerpc-2351bc9a5e9ed804d248732327b4c6d1bb02d86f.tar.gz
KVM: PPC: Book3S HV: Invalidate TLB when nested vcpu moves physical cpu
This is only done at level 0, since only level 0 knows which physical CPU a vcpu is running on. This does for nested guests what L0 already did for its own guests, which is to flush the TLB on a pCPU when it goes to run a vCPU there, and there is another vCPU in the same VM which previously ran on this pCPU and has now started to run on another pCPU. This is to handle the situation where the other vCPU touched a mapping, moved to another pCPU and did a tlbiel (local-only tlbie) on that new pCPU and thus left behind a stale TLB entry on this pCPU. This introduces a limit on the the vcpu_token values used in the H_ENTER_NESTED hcall -- they must now be less than NR_CPUS. [paulus@ozlabs.org - made prev_cpu array be short[] to reduce memory consumption.] Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c101
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c5
3 files changed, 71 insertions, 38 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 719b317235671c..83d4def5f1ed37 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -52,6 +52,9 @@ struct kvm_nested_guest {
long refcnt; /* number of pointers to this struct */
struct mutex tlb_lock; /* serialize page faults and tlbies */
struct kvm_nested_guest *next;
+ cpumask_t need_tlb_flush;
+ cpumask_t cpu_in_guest;
+ short prev_cpu[NR_CPUS];
};
/*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 49f07de504940e..24a668388a6ee9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2397,10 +2397,18 @@ static void kvmppc_release_hwthread(int cpu)
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ cpumask_t *cpu_in_guest;
int i;
cpu = cpu_first_thread_sibling(cpu);
- cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
+ if (nested) {
+ cpumask_set_cpu(cpu, &nested->need_tlb_flush);
+ cpu_in_guest = &nested->cpu_in_guest;
+ } else {
+ cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
+ cpu_in_guest = &kvm->arch.cpu_in_guest;
+ }
/*
* Make sure setting of bit in need_tlb_flush precedes
* testing of cpu_in_guest bits. The matching barrier on
@@ -2408,13 +2416,23 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
*/
smp_mb();
for (i = 0; i < threads_per_core; ++i)
- if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest))
+ if (cpumask_test_cpu(cpu + i, cpu_in_guest))
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvm *kvm = vcpu->kvm;
+ int prev_cpu;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return;
+
+ if (nested)
+ prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
+ else
+ prev_cpu = vcpu->arch.prev_cpu;
/*
* With radix, the guest can do TLB invalidations itself,
@@ -2428,12 +2446,46 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
* ran to flush the TLB. The TLB is shared between threads,
* so we use a single bit in .need_tlb_flush for all 4 threads.
*/
- if (vcpu->arch.prev_cpu != pcpu) {
- if (vcpu->arch.prev_cpu >= 0 &&
- cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
+ if (prev_cpu != pcpu) {
+ if (prev_cpu >= 0 &&
+ cpu_first_thread_sibling(prev_cpu) !=
cpu_first_thread_sibling(pcpu))
- radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
- vcpu->arch.prev_cpu = pcpu;
+ radix_flush_cpu(kvm, prev_cpu, vcpu);
+ if (nested)
+ nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
+ else
+ vcpu->arch.prev_cpu = pcpu;
+ }
+}
+
+static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested)
+{
+ cpumask_t *need_tlb_flush;
+ int lpid;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ pcpu &= ~0x3UL;
+
+ if (nested) {
+ lpid = nested->shadow_lpid;
+ need_tlb_flush = &nested->need_tlb_flush;
+ } else {
+ lpid = kvm->arch.lpid;
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+ }
+
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ smp_mb();
+
+ if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ radix__local_flush_tlb_lpid_guest(lpid);
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, need_tlb_flush);
}
}
@@ -3127,8 +3179,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
spin_unlock(&core_info.vc[sub]->lock);
if (kvm_is_radix(vc->kvm)) {
- int tmp = pcpu;
-
/*
* Do we need to flush the process scoped TLB for the LPAR?
*
@@ -3139,17 +3189,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*
* Hash must be flushed in realmode in order to use tlbiel.
*/
- mtspr(SPRN_LPID, vc->kvm->arch.lpid);
- isync();
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- tmp &= ~0x3UL;
-
- if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) {
- radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid);
- /* Clear the bit after the TLB flush */
- cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush);
- }
+ kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL);
}
/*
@@ -3868,12 +3908,11 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
- int trap, r, pcpu, pcpu0;
+ int trap, r, pcpu;
int srcu_idx;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
- unsigned long lpid;
trace_kvmppc_run_vcpu_enter(vcpu);
@@ -3946,22 +3985,8 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0);
- lpid = vc->kvm->arch.lpid;
- if (nested)
- lpid = nested->shadow_lpid;
- mtspr(SPRN_LPID, lpid);
- isync();
-
- /* See comment above in kvmppc_run_core() about this */
- pcpu0 = pcpu;
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu0 &= ~0x3UL;
-
- if (cpumask_test_cpu(pcpu0, &kvm->arch.need_tlb_flush)) {
- radix__local_flush_tlb_lpid_guest(lpid);
- /* Clear the bit after the TLB flush */
- cpumask_clear_cpu(pcpu0, &kvm->arch.need_tlb_flush);
- }
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested);
trace_hardirqs_on();
guest_enter_irqoff();
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 486d9007c288c1..a876dc3c8e1ffb 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -168,6 +168,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (err)
return H_PARAMETER;
+ if (l2_hv.vcpu_token >= NR_CPUS)
+ return H_PARAMETER;
+
/* translate lpid */
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
if (!l2)
@@ -412,6 +415,8 @@ struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
goto out_free2;
gp->shadow_lpid = shadow_lpid;
+ memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
+
return gp;
out_free2: