aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJim Mattson <jmattson@google.com>2020-05-08 13:39:38 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-05-09 09:26:31 -0400
commitb49a1a6d4e234a60211805b7cb0db06c9b293209 (patch)
tree083a9c88bfacbdc95ff8c0645955bba23a873496
parent9c8389546e1fbb6e25cc5bc22bc2f2765e9dbfbe (diff)
downloadkvm-unit-tests-b49a1a6d4e234a60211805b7cb0db06c9b293209.tar.gz
x86: VMX: Add a VMX-preemption timer expiration test
When the VMX-preemption timer is activated, code executing in VMX non-root operation should never be able to record a TSC value beyond the deadline imposed by adding the scaled VMX-preemption timer value to the first TSC value observed by the guest after VM-entry. Signed-off-by: Jim Mattson <jmattson@google.com> Reviewed-by: Peter Shier <pshier@google.com> Message-Id: <20200508203938.88508-1-jmattson@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--lib/x86/processor.h23
-rw-r--r--x86/vmx.h21
-rw-r--r--x86/vmx_tests.c81
3 files changed, 125 insertions, 0 deletions
diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 804673b..6e0811e 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -479,6 +479,29 @@ static inline unsigned long long rdtsc(void)
return r;
}
+/*
+ * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
+ * executed immediately before rdtsc ensures that rdtsc will be
+ * executed only after all previous instructions have executed and all
+ * previous loads and stores are globally visible. In addition, the
+ * lfence immediately after rdtsc ensures that rdtsc will be executed
+ * prior to the execution of any subsequent instruction.
+ */
+static inline unsigned long long fenced_rdtsc(void)
+{
+ unsigned long long tsc;
+
+#ifdef __x86_64__
+ unsigned int eax, edx;
+
+ asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
+ tsc = eax | ((unsigned long long)edx << 32);
+#else
+ asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
+#endif
+ return tsc;
+}
+
static inline unsigned long long rdtscp(u32 *aux)
{
long long r;
diff --git a/x86/vmx.h b/x86/vmx.h
index 08b354d..71fdaa0 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -118,6 +118,27 @@ union vmx_ctrl_msr {
};
};
+union vmx_misc {
+ u64 val;
+ struct {
+ u32 pt_bit:5,
+ stores_lma:1,
+ act_hlt:1,
+ act_shutdown:1,
+ act_wfsipi:1,
+ :5,
+ vmx_pt:1,
+ smm_smbase:1,
+ cr3_targets:9,
+ msr_list_size:3,
+ smm_mon_ctl:1,
+ vmwrite_any:1,
+ inject_len0:1,
+ :1;
+ u32 mseg_revision;
+ };
+};
+
union vmx_ept_vpid {
u64 val;
struct {
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index aa94a34..68f93d3 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -8673,6 +8673,86 @@ static void vmx_preemption_timer_tf_test(void)
handle_exception(DB_VECTOR, old_db);
}
+#define VMX_PREEMPTION_TIMER_EXPIRY_CYCLES 1000000
+
+static u64 vmx_preemption_timer_expiry_start;
+static u64 vmx_preemption_timer_expiry_finish;
+
+static void vmx_preemption_timer_expiry_test_guest(void)
+{
+ vmcall();
+ vmx_preemption_timer_expiry_start = fenced_rdtsc();
+
+ while (vmx_get_test_stage() == 0)
+ vmx_preemption_timer_expiry_finish = fenced_rdtsc();
+}
+
+/*
+ * Test that the VMX-preemption timer is not excessively delayed.
+ *
+ * Per the SDM, volume 3, VM-entry starts the VMX-preemption timer
+ * with the unsigned value in the VMX-preemption timer-value field,
+ * and the VMX-preemption timer counts down by 1 every time bit X in
+ * the TSC changes due to a TSC increment (where X is
+ * IA32_VMX_MISC[4:0]). If the timer counts down to zero in any state
+ * other than the wait-for-SIPI state, the logical processor
+ * transitions to the C0 C-state and causes a VM-exit.
+ *
+ * The guest code above reads the starting TSC after VM-entry. At this
+ * point, the VMX-preemption timer has already been activated. Next,
+ * the guest code reads the current TSC in a loop, storing the value
+ * read to memory.
+ *
+ * If the RDTSC in the loop reads a value past the VMX-preemption
+ * timer deadline, then the VMX-preemption timer VM-exit must be
+ * delivered before the next instruction retires. Even if a higher
+ * priority SMI is delivered first, the VMX-preemption timer VM-exit
+ * must be delivered before the next instruction retires. Hence, a TSC
+ * value past the VMX-preemption timer deadline might be read, but it
+ * cannot be stored. If a TSC value past the deadline *is* stored,
+ * then the architectural specification has been violated.
+ */
+static void vmx_preemption_timer_expiry_test(void)
+{
+ u32 preemption_timer_value;
+ union vmx_misc misc;
+ u64 tsc_deadline;
+ u32 reason;
+
+ if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
+ report_skip("'Activate VMX-preemption timer' not supported");
+ return;
+ }
+
+ test_set_guest(vmx_preemption_timer_expiry_test_guest);
+
+ enter_guest();
+ skip_exit_vmcall();
+
+ misc.val = rdmsr(MSR_IA32_VMX_MISC);
+ preemption_timer_value =
+ VMX_PREEMPTION_TIMER_EXPIRY_CYCLES >> misc.pt_bit;
+
+ vmcs_set_bits(PIN_CONTROLS, PIN_PREEMPT);
+ vmcs_write(PREEMPT_TIMER_VALUE, preemption_timer_value);
+ vmx_set_test_stage(0);
+
+ enter_guest();
+ reason = (u32)vmcs_read(EXI_REASON);
+ TEST_ASSERT(reason == VMX_PREEMPT);
+
+ vmcs_clear_bits(PIN_CONTROLS, PIN_PREEMPT);
+ vmx_set_test_stage(1);
+ enter_guest();
+
+ tsc_deadline = ((vmx_preemption_timer_expiry_start >> misc.pt_bit) <<
+ misc.pt_bit) + (preemption_timer_value << misc.pt_bit);
+
+ report(vmx_preemption_timer_expiry_finish < tsc_deadline,
+ "Last stored guest TSC (%lu) < TSC deadline (%lu)",
+ vmx_preemption_timer_expiry_finish, tsc_deadline);
+}
+
static void vmx_db_test_guest(void)
{
/*
@@ -9981,6 +10061,7 @@ struct vmx_test vmx_tests[] = {
TEST(vmx_store_tsc_test),
TEST(vmx_preemption_timer_zero_test),
TEST(vmx_preemption_timer_tf_test),
+ TEST(vmx_preemption_timer_expiry_test),
/* EPT access tests. */
TEST(ept_access_test_not_present),
TEST(ept_access_test_read_only),