aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@au1.ibm.com>2014-02-03 14:18:14 +1100
committerCrístian Viana <vianac@linux.vnet.ibm.com>2014-02-04 21:33:31 -0200
commit6f7a6dcaeea1ff9957a24ffc8efc4396079421f9 (patch)
treec8f5c27b8a6e82f5836c1124ee556f9bbc7a188e
parentf29bb69a882f6aaf147c7e7ab0425bddfaa8c31a (diff)
downloadpowerkvm-6f7a6dcaeea1ff9957a24ffc8efc4396079421f9.tar.gz
KVM: PPC: Book3S HV: Fix register usage when loading/saving VRSAVE
Commit 595e4f7e697e ("KVM: PPC: Book3S HV: Use load/store_fp_state functions in HV guest entry/exit") changed the register usage in kvmppc_save_fp() and kvmppc_load_fp() but omitted changing the instructions that load and save VRSAVE. The result is that the VRSAVE value was loaded from a constant address, and saved to a location past the end of the vcpu struct, causing host kernel memory corruption and various kinds of host kernel crashes. This fixes the problem by using register r31, which contains the vcpu pointer, instead of r3 and r4. This should help resolve several bugzillas involving guest or host crashes and hangs, including 98456, 102775, 103534, 100504, and possibly others. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2bb60b28344e26..a2b306f6179726 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2423,6 +2423,8 @@ kvmppc_read_intr:
/*
* Save away FP, VMX and VSX registers.
* r3 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
*/
kvmppc_save_fp:
mflr r30
@@ -2449,13 +2451,15 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
- stw r6,VCPU_VRSAVE(r3)
+ stw r6,VCPU_VRSAVE(r31)
mtlr r30
blr
/*
* Load up FP, VMX and VSX registers
* r4 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
*/
kvmppc_load_fp:
mflr r30
@@ -2481,7 +2485,7 @@ BEGIN_FTR_SECTION
bl .load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- lwz r7,VCPU_VRSAVE(r4)
+ lwz r7,VCPU_VRSAVE(r31)
mtspr SPRN_VRSAVE,r7
mtlr r30
mr r4,r31