aboutsummaryrefslogtreecommitdiffstats
path: root/queue-5.15/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch
diff options
context:
space:
mode:
Diffstat (limited to 'queue-5.15/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch')
-rw-r--r--queue-5.15/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch112
1 files changed, 112 insertions, 0 deletions
diff --git a/queue-5.15/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch b/queue-5.15/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch
new file mode 100644
index 0000000000..4744093c8d
--- /dev/null
+++ b/queue-5.15/kvm-x86-snapshot-if-a-vcpu-s-vendor-model-is-amd-vs.-intel-compatible.patch
@@ -0,0 +1,112 @@
+From fd706c9b1674e2858766bfbf7430534c2b26fbef Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 5 Apr 2024 16:55:54 -0700
+Subject: KVM: x86: Snapshot if a vCPU's vendor model is AMD vs. Intel compatible
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit fd706c9b1674e2858766bfbf7430534c2b26fbef upstream.
+
+Add kvm_vcpu_arch.is_amd_compatible to cache if a vCPU's vendor model is
+compatible with AMD, i.e. if the vCPU vendor is AMD or Hygon, along with
+helpers to check if a vCPU is compatible AMD vs. Intel. To handle Intel
+vs. AMD behavior related to masking the LVTPC entry, KVM will need to
+check for vendor compatibility on every PMI injection, i.e. querying for
+AMD will soon be a moderately hot path.
+
+Note! This subtly (or maybe not-so-subtly) makes "Intel compatible" KVM's
+default behavior, both if userspace omits (or never sets) CPUID 0x0 and if
+userspace sets a completely unknown vendor. One could argue that KVM
+should treat such vCPUs as not being compatible with Intel *or* AMD, but
+that would add useless complexity to KVM.
+
+KVM needs to do *something* in the face of vendor specific behavior, and
+so unless KVM conjured up a magic third option, choosing to treat unknown
+vendors as neither Intel nor AMD means that checks on AMD compatibility
+would yield Intel behavior, and checks for Intel compatibility would yield
+AMD behavior. And that's far worse as it would effectively yield random
+behavior depending on whether KVM checked for AMD vs. Intel vs. !AMD vs.
+!Intel. And practically speaking, all x86 CPUs follow either Intel or AMD
+architecture, i.e. "supporting" an unknown third architecture adds no
+value.
+
+Deliberately don't convert any of the existing guest_cpuid_is_intel()
+checks, as the Intel side of things is messier due to some flows explicitly
+checking for exactly vendor==Intel, versus some flows assuming anything
+that isn't "AMD compatible" gets Intel behavior. The Intel code will be
+cleaned up in the future.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-ID: <20240405235603.1173076-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/cpuid.c | 1 +
+ arch/x86/kvm/cpuid.h | 10 ++++++++++
+ arch/x86/kvm/mmu/mmu.c | 2 +-
+ arch/x86/kvm/x86.c | 2 +-
+ 5 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -732,6 +732,7 @@ struct kvm_vcpu_arch {
+
+ int cpuid_nent;
+ struct kvm_cpuid_entry2 *cpuid_entries;
++ bool is_amd_compatible;
+
+ u64 reserved_gpa_bits;
+ int maxphyaddr;
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -189,6 +189,7 @@ static void kvm_vcpu_after_set_cpuid(str
+
+ kvm_update_pv_runtime(vcpu);
+
++ vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+ vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
+
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -121,6 +121,16 @@ static inline bool guest_cpuid_is_intel(
+ return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
+ }
+
++static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.is_amd_compatible;
++}
++
++static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
++{
++ return !guest_cpuid_is_amd_compatible(vcpu);
++}
++
+ static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4351,7 +4351,7 @@ static void reset_rsvds_bits_mask(struct
+ context->root_level, is_efer_nx(context),
+ guest_can_use_gbpages(vcpu),
+ is_cr4_pse(context),
+- guest_cpuid_is_amd_or_hygon(vcpu));
++ guest_cpuid_is_amd_compatible(vcpu));
+ }
+
+ static void
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3107,7 +3107,7 @@ static void kvmclock_sync_fn(struct work
+ static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+ {
+ /* McStatusWrEn enabled? */
+- if (guest_cpuid_is_amd_or_hygon(vcpu))
++ if (guest_cpuid_is_amd_compatible(vcpu))
+ return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+
+ return false;