From 1cd2b08f7cc4a57cc1b04f62b6349970d13456c3 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Mon, 15 Jan 2024 14:02:09 -0800 Subject: KVM: arm64: selftests: Handle feature fields with nonzero minimum value correctly There are some feature fields with nonzero minimum valid value. Make sure get_safe_value() won't return invalid field values for them. Also fix a bug that wrongly uses the feature bits type as the feature bits sign causing all fields as signed in the get_safe_value() and get_invalid_value(). Fixes: 54a9ea73527d ("KVM: arm64: selftests: Test for setting ID register from usersapce") Reported-by: Zenghui Yu Reported-by: Itaru Kitayama Tested-by: Itaru Kitayama Signed-off-by: Jing Zhang Link: https://lore.kernel.org/r/20240115220210.3966064-2-jingzhangos@google.com Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/aarch64/set_id_regs.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c index bac05210b5392..16e2338686c17 100644 --- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c +++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c @@ -32,6 +32,10 @@ struct reg_ftr_bits { enum ftr_type type; uint8_t shift; uint64_t mask; + /* + * For FTR_EXACT, safe_val is used as the exact safe value. + * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value. + */ int64_t safe_val; }; @@ -65,13 +69,13 @@ struct test_feature_reg { static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = { S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0), - REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP), REG_FTR_END, }; static const struct reg_ftr_bits ftr_id_dfr0_el1[] = { - S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, 0), - REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, 0), + S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8), REG_FTR_END, }; @@ -224,13 +228,13 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) { uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); - if (ftr_bits->type == FTR_UNSIGNED) { + if (ftr_bits->sign == FTR_UNSIGNED) { switch (ftr_bits->type) { case FTR_EXACT: ftr = ftr_bits->safe_val; break; case FTR_LOWER_SAFE: - if (ftr > 0) + if (ftr > ftr_bits->safe_val) ftr--; break; case FTR_HIGHER_SAFE: @@ -252,7 +256,7 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) ftr = ftr_bits->safe_val; break; case FTR_LOWER_SAFE: - if (ftr > 0) + if (ftr > ftr_bits->safe_val) ftr--; break; case FTR_HIGHER_SAFE: @@ -276,7 +280,7 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) { uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); - if (ftr_bits->type == FTR_UNSIGNED) { + if (ftr_bits->sign == FTR_UNSIGNED) { switch (ftr_bits->type) { case FTR_EXACT: ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); -- cgit 1.2.3-korg From d7e68738e1aae940b54bfc03313e0ef5720e2e71 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 9 Jan 2024 15:02:32 -0800 Subject: KVM: selftests: Add vcpu_set_cpuid_property() to set properties Add vcpu_set_cpuid_property() helper function for setting properties, and use it instead of open coding an equivalent for MAX_PHY_ADDR. Future vPMU testcases will also need to stuff various CPUID properties. Reviewed-by: Jim Mattson Signed-off-by: Jinrong Liang Co-developed-by: Sean Christopherson Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-13-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86_64/processor.h | 4 +++- tools/testing/selftests/kvm/lib/x86_64/processor.c | 15 ++++++++++++--- .../kvm/x86_64/smaller_maxphyaddr_emulation_test.c | 2 +- 3 files changed, 16 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index a84863503fcb4..932944c4ea01e 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -995,7 +995,9 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu) vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid); } -void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); +void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, + struct kvm_x86_cpu_property property, + uint32_t value); void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index d8288374078e4..67eb82a6c7543 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -752,12 +752,21 @@ void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid) vcpu_set_cpuid(vcpu); } -void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr) +void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, + struct kvm_x86_cpu_property property, + uint32_t value) { - struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, 0x80000008); + struct kvm_cpuid_entry2 *entry; + + entry = __vcpu_get_cpuid_entry(vcpu, property.function, property.index); + + (&entry->eax)[property.reg] &= ~GENMASK(property.hi_bit, property.lo_bit); + (&entry->eax)[property.reg] |= value << property.lo_bit; - entry->eax = (entry->eax & ~0xff) | maxphyaddr; vcpu_set_cpuid(vcpu); + + /* Sanity check that @value doesn't exceed the bounds in any way. */ + TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value); } void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) diff --git a/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c b/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c index 06edf00a97d61..9b89440dff195 100644 --- a/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c +++ b/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c @@ -63,7 +63,7 @@ int main(int argc, char *argv[]) vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); - vcpu_set_cpuid_maxphyaddr(vcpu, MAXPHYADDR); + vcpu_set_cpuid_property(vcpu, X86_PROPERTY_MAX_PHY_ADDR, MAXPHYADDR); rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE); TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable"); -- cgit 1.2.3-korg From ff76d771251003b28aeac2051cfe72384dff232f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:33 -0800 Subject: KVM: selftests: Drop the "name" param from KVM_X86_PMU_FEATURE() Drop the "name" parameter from KVM_X86_PMU_FEATURE(), it's unused and the name is redundant with the macro, i.e. it's truly useless. Reviewed-by: Jim Mattson Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-14-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86_64/processor.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 932944c4ea01e..4f737d3b893cb 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -290,7 +290,7 @@ struct kvm_x86_cpu_property { struct kvm_x86_pmu_feature { struct kvm_x86_cpu_feature anti_feature; }; -#define KVM_X86_PMU_FEATURE(name, __bit) \ +#define KVM_X86_PMU_FEATURE(__bit) \ ({ \ struct kvm_x86_pmu_feature feature = { \ .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \ @@ -299,7 +299,7 @@ struct kvm_x86_pmu_feature { feature; \ }) -#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(BRANCH_INSNS_RETIRED, 5) +#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(5) static inline unsigned int x86_family(unsigned int eax) { -- cgit 1.2.3-korg From 370d536322896cc9ad59eb8849ad8e1f9e985953 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:34 -0800 Subject: KVM: selftests: Extend {kvm,this}_pmu_has() to support fixed counters Extend the kvm_x86_pmu_feature framework to allow querying for fixed counters via {kvm,this}_pmu_has(). Like architectural events, checking for a fixed counter annoyingly requires checking multiple CPUID fields, as a fixed counter exists if: FxCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i); Note, KVM currently doesn't actually support exposing fixed counters via the bitmask, but that will hopefully change sooner than later, and Intel's SDM explicitly "recommends" checking both the number of counters and the mask. Rename the intermedate "anti_feature" field to simply 'f' since the fixed counter bitmask (thankfully) doesn't have reversed polarity like the architectural events bitmask. Note, ideally the helpers would use BUILD_BUG_ON() to assert on the incoming register, but the expected usage in PMU tests can't guarantee the inputs are compile-time constants. Opportunistically define macros for all of the known architectural events and fixed counters. Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-15-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/x86_64/processor.h | 65 ++++++++++++++++------ 1 file changed, 47 insertions(+), 18 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 4f737d3b893cb..92d4f8ecc7308 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -282,24 +282,41 @@ struct kvm_x86_cpu_property { * that indicates the feature is _not_ supported, and a property that states * the length of the bit mask of unsupported features. A feature is supported * if the size of the bit mask is larger than the "unavailable" bit, and said - * bit is not set. + * bit is not set. Fixed counters also bizarre enumeration, but inverted from + * arch events for general purpose counters. Fixed counters are supported if a + * feature flag is set **OR** the total number of fixed counters is greater + * than index of the counter. * - * Wrap the "unavailable" feature to simplify checking whether or not a given - * architectural event is supported. + * Wrap the events for general purpose and fixed counters to simplify checking + * whether or not a given architectural event is supported. */ struct kvm_x86_pmu_feature { - struct kvm_x86_cpu_feature anti_feature; + struct kvm_x86_cpu_feature f; }; -#define KVM_X86_PMU_FEATURE(__bit) \ -({ \ - struct kvm_x86_pmu_feature feature = { \ - .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \ - }; \ - \ - feature; \ +#define KVM_X86_PMU_FEATURE(__reg, __bit) \ +({ \ + struct kvm_x86_pmu_feature feature = { \ + .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit), \ + }; \ + \ + kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX || \ + KVM_CPUID_##__reg == KVM_CPUID_ECX); \ + feature; \ }) -#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(5) +#define X86_PMU_FEATURE_CPU_CYCLES KVM_X86_PMU_FEATURE(EBX, 0) +#define X86_PMU_FEATURE_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 1) +#define X86_PMU_FEATURE_REFERENCE_CYCLES KVM_X86_PMU_FEATURE(EBX, 2) +#define X86_PMU_FEATURE_LLC_REFERENCES KVM_X86_PMU_FEATURE(EBX, 3) +#define X86_PMU_FEATURE_LLC_MISSES KVM_X86_PMU_FEATURE(EBX, 4) +#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5) +#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6) +#define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7) + +#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0) +#define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1) +#define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2) +#define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED KVM_X86_PMU_FEATURE(ECX, 3) static inline unsigned int x86_family(unsigned int eax) { @@ -698,10 +715,16 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + uint32_t nr_bits; + + if (feature.f.reg == KVM_CPUID_EBX) { + nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + return nr_bits > feature.f.bit && !this_cpu_has(feature.f); + } - return nr_bits > feature.anti_feature.bit && - !this_cpu_has(feature.anti_feature); + GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX); + nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + return nr_bits > feature.f.bit || this_cpu_has(feature.f); } static __always_inline uint64_t this_cpu_supported_xcr0(void) @@ -917,10 +940,16 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + uint32_t nr_bits; + + if (feature.f.reg == KVM_CPUID_EBX) { + nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f); + } - return nr_bits > feature.anti_feature.bit && - !kvm_cpu_has(feature.anti_feature); + TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX); + nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + return nr_bits > feature.f.bit || kvm_cpu_has(feature.f); } static __always_inline uint64_t kvm_cpu_supported_xcr0(void) -- cgit 1.2.3-korg From e6faa04970575622243d3a20782dde2d5813772d Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 9 Jan 2024 15:02:35 -0800 Subject: KVM: selftests: Add pmu.h and lib/pmu.c for common PMU assets Add a PMU library for x86 selftests to help eliminate open-coded event encodings, and to reduce the amount of copy+paste between PMU selftests. Use the new common macro definitions in the existing PMU event filter test. Cc: Aaron Lewis Suggested-by: Sean Christopherson Signed-off-by: Jinrong Liang Co-developed-by: Sean Christopherson Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-16-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/include/x86_64/pmu.h | 97 ++++++++++++++ tools/testing/selftests/kvm/lib/x86_64/pmu.c | 31 +++++ .../selftests/kvm/x86_64/pmu_event_filter_test.c | 141 +++++++-------------- 4 files changed, 173 insertions(+), 97 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/x86_64/pmu.h create mode 100644 tools/testing/selftests/kvm/lib/x86_64/pmu.c (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 492e937fab006..0f07ce8bd2979 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -36,6 +36,7 @@ LIBKVM_x86_64 += lib/x86_64/apic.c LIBKVM_x86_64 += lib/x86_64/handlers.S LIBKVM_x86_64 += lib/x86_64/hyperv.c LIBKVM_x86_64 += lib/x86_64/memstress.c +LIBKVM_x86_64 += lib/x86_64/pmu.c LIBKVM_x86_64 += lib/x86_64/processor.c LIBKVM_x86_64 += lib/x86_64/svm.c LIBKVM_x86_64 += lib/x86_64/ucall.c diff --git a/tools/testing/selftests/kvm/include/x86_64/pmu.h b/tools/testing/selftests/kvm/include/x86_64/pmu.h new file mode 100644 index 0000000000000..3c10c4dc0ae8f --- /dev/null +++ b/tools/testing/selftests/kvm/include/x86_64/pmu.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023, Tencent, Inc. + */ +#ifndef SELFTEST_KVM_PMU_H +#define SELFTEST_KVM_PMU_H + +#include + +#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 + +/* + * Encode an eventsel+umask pair into event-select MSR format. Note, this is + * technically AMD's format, as Intel's format only supports 8 bits for the + * event selector, i.e. doesn't use bits 24:16 for the selector. But, OR-ing + * in '0' is a nop and won't clobber the CMASK. + */ +#define RAW_EVENT(eventsel, umask) (((eventsel & 0xf00UL) << 24) | \ + ((eventsel) & 0xff) | \ + ((umask) & 0xff) << 8) + +/* + * These are technically Intel's definitions, but except for CMASK (see above), + * AMD's layout is compatible with Intel's. + */ +#define ARCH_PERFMON_EVENTSEL_EVENT GENMASK_ULL(7, 0) +#define ARCH_PERFMON_EVENTSEL_UMASK GENMASK_ULL(15, 8) +#define ARCH_PERFMON_EVENTSEL_USR BIT_ULL(16) +#define ARCH_PERFMON_EVENTSEL_OS BIT_ULL(17) +#define ARCH_PERFMON_EVENTSEL_EDGE BIT_ULL(18) +#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL BIT_ULL(19) +#define ARCH_PERFMON_EVENTSEL_INT BIT_ULL(20) +#define ARCH_PERFMON_EVENTSEL_ANY BIT_ULL(21) +#define ARCH_PERFMON_EVENTSEL_ENABLE BIT_ULL(22) +#define ARCH_PERFMON_EVENTSEL_INV BIT_ULL(23) +#define ARCH_PERFMON_EVENTSEL_CMASK GENMASK_ULL(31, 24) + +/* RDPMC control flags, Intel only. */ +#define INTEL_RDPMC_METRICS BIT_ULL(29) +#define INTEL_RDPMC_FIXED BIT_ULL(30) +#define INTEL_RDPMC_FAST BIT_ULL(31) + +/* Fixed PMC controls, Intel only. */ +#define FIXED_PMC_GLOBAL_CTRL_ENABLE(_idx) BIT_ULL((32 + (_idx))) + +#define FIXED_PMC_KERNEL BIT_ULL(0) +#define FIXED_PMC_USER BIT_ULL(1) +#define FIXED_PMC_ANYTHREAD BIT_ULL(2) +#define FIXED_PMC_ENABLE_PMI BIT_ULL(3) +#define FIXED_PMC_NR_BITS 4 +#define FIXED_PMC_CTRL(_idx, _val) ((_val) << ((_idx) * FIXED_PMC_NR_BITS)) + +#define PMU_CAP_FW_WRITES BIT_ULL(13) +#define PMU_CAP_LBR_FMT 0x3f + +#define INTEL_ARCH_CPU_CYCLES RAW_EVENT(0x3c, 0x00) +#define INTEL_ARCH_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00) +#define INTEL_ARCH_REFERENCE_CYCLES RAW_EVENT(0x3c, 0x01) +#define INTEL_ARCH_LLC_REFERENCES RAW_EVENT(0x2e, 0x4f) +#define INTEL_ARCH_LLC_MISSES RAW_EVENT(0x2e, 0x41) +#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00) +#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00) +#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01) + +#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00) +#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00) +#define AMD_ZEN_BRANCHES_RETIRED RAW_EVENT(0xc2, 0x00) +#define AMD_ZEN_BRANCHES_MISPREDICTED RAW_EVENT(0xc3, 0x00) + +/* + * Note! The order and thus the index of the architectural events matters as + * support for each event is enumerated via CPUID using the index of the event. + */ +enum intel_pmu_architectural_events { + INTEL_ARCH_CPU_CYCLES_INDEX, + INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX, + INTEL_ARCH_REFERENCE_CYCLES_INDEX, + INTEL_ARCH_LLC_REFERENCES_INDEX, + INTEL_ARCH_LLC_MISSES_INDEX, + INTEL_ARCH_BRANCHES_RETIRED_INDEX, + INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX, + INTEL_ARCH_TOPDOWN_SLOTS_INDEX, + NR_INTEL_ARCH_EVENTS, +}; + +enum amd_pmu_zen_events { + AMD_ZEN_CORE_CYCLES_INDEX, + AMD_ZEN_INSTRUCTIONS_INDEX, + AMD_ZEN_BRANCHES_INDEX, + AMD_ZEN_BRANCH_MISSES_INDEX, + NR_AMD_ZEN_EVENTS, +}; + +extern const uint64_t intel_pmu_arch_events[]; +extern const uint64_t amd_pmu_zen_events[]; + +#endif /* SELFTEST_KVM_PMU_H */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/pmu.c b/tools/testing/selftests/kvm/lib/x86_64/pmu.c new file mode 100644 index 0000000000000..f31f0427c17cb --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/pmu.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, Tencent, Inc. + */ + +#include + +#include + +#include "kvm_util.h" +#include "pmu.h" + +const uint64_t intel_pmu_arch_events[] = { + INTEL_ARCH_CPU_CYCLES, + INTEL_ARCH_INSTRUCTIONS_RETIRED, + INTEL_ARCH_REFERENCE_CYCLES, + INTEL_ARCH_LLC_REFERENCES, + INTEL_ARCH_LLC_MISSES, + INTEL_ARCH_BRANCHES_RETIRED, + INTEL_ARCH_BRANCHES_MISPREDICTED, + INTEL_ARCH_TOPDOWN_SLOTS, +}; +kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); + +const uint64_t amd_pmu_zen_events[] = { + AMD_ZEN_CORE_CYCLES, + AMD_ZEN_INSTRUCTIONS_RETIRED, + AMD_ZEN_BRANCHES_RETIRED, + AMD_ZEN_BRANCHES_MISPREDICTED, +}; +kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS); diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 283cc55597a4f..7ec9fbed92e07 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -11,72 +11,18 @@ */ #define _GNU_SOURCE /* for program_invocation_short_name */ -#include "test_util.h" + #include "kvm_util.h" +#include "pmu.h" #include "processor.h" - -/* - * In lieu of copying perf_event.h into tools... - */ -#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) -#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) - -/* End of stuff taken from perf_event.h. */ - -/* Oddly, this isn't in perf_event.h. */ -#define ARCH_PERFMON_BRANCHES_RETIRED 5 +#include "test_util.h" #define NUM_BRANCHES 42 -#define INTEL_PMC_IDX_FIXED 32 - -/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ -#define MAX_FILTER_EVENTS 300 #define MAX_TEST_EVENTS 10 #define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1) #define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1) -#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1) - -/* - * This is how the event selector and unit mask are stored in an AMD - * core performance event-select register. Intel's format is similar, - * but the event selector is only 8 bits. - */ -#define EVENT(select, umask) ((select & 0xf00UL) << 24 | (select & 0xff) | \ - (umask & 0xff) << 8) - -/* - * "Branch instructions retired", from the Intel SDM, volume 3, - * "Pre-defined Architectural Performance Events." - */ - -#define INTEL_BR_RETIRED EVENT(0xc4, 0) - -/* - * "Retired branch instructions", from Processor Programming Reference - * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors, - * Preliminary Processor Programming Reference (PPR) for AMD Family - * 17h Model 31h, Revision B0 Processors, and Preliminary Processor - * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision - * B1 Processors Volume 1 of 2. - */ - -#define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0) - - -/* - * "Retired instructions", from Processor Programming Reference - * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors, - * Preliminary Processor Programming Reference (PPR) for AMD Family - * 17h Model 31h, Revision B0 Processors, and Preliminary Processor - * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision - * B1 Processors Volume 1 of 2. - * --- and --- - * "Instructions retired", from the Intel SDM, volume 3, - * "Pre-defined Architectural Performance Events." - */ - -#define INST_RETIRED EVENT(0xc0, 0) +#define PMU_EVENT_FILTER_INVALID_NEVENTS (KVM_PMU_EVENT_FILTER_MAX_EVENTS + 1) struct __kvm_pmu_event_filter { __u32 action; @@ -84,26 +30,28 @@ struct __kvm_pmu_event_filter { __u32 fixed_counter_bitmap; __u32 flags; __u32 pad[4]; - __u64 events[MAX_FILTER_EVENTS]; + __u64 events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; }; /* - * This event list comprises Intel's eight architectural events plus - * AMD's "retired branch instructions" for Zen[123] (and possibly - * other AMD CPUs). + * This event list comprises Intel's known architectural events, plus AMD's + * "retired branch instructions" for Zen1-Zen3 (and* possibly other AMD CPUs). + * Note, AMD and Intel use the same encoding for instructions retired. */ +kvm_static_assert(INTEL_ARCH_INSTRUCTIONS_RETIRED == AMD_ZEN_INSTRUCTIONS_RETIRED); + static const struct __kvm_pmu_event_filter base_event_filter = { .nevents = ARRAY_SIZE(base_event_filter.events), .events = { - EVENT(0x3c, 0), - INST_RETIRED, - EVENT(0x3c, 1), - EVENT(0x2e, 0x4f), - EVENT(0x2e, 0x41), - EVENT(0xc4, 0), - EVENT(0xc5, 0), - EVENT(0xa4, 1), - AMD_ZEN_BR_RETIRED, + INTEL_ARCH_CPU_CYCLES, + INTEL_ARCH_INSTRUCTIONS_RETIRED, + INTEL_ARCH_REFERENCE_CYCLES, + INTEL_ARCH_LLC_REFERENCES, + INTEL_ARCH_LLC_MISSES, + INTEL_ARCH_BRANCHES_RETIRED, + INTEL_ARCH_BRANCHES_MISPREDICTED, + INTEL_ARCH_TOPDOWN_SLOTS, + AMD_ZEN_BRANCHES_RETIRED, }, }; @@ -165,9 +113,9 @@ static void intel_guest_code(void) for (;;) { wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | - ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); + ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_BRANCHES_RETIRED); wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | - ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); + ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_INSTRUCTIONS_RETIRED); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); run_and_measure_loop(MSR_IA32_PMC0); @@ -189,9 +137,9 @@ static void amd_guest_code(void) for (;;) { wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | - ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); + ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BRANCHES_RETIRED); wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | - ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); + ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_INSTRUCTIONS_RETIRED); run_and_measure_loop(MSR_K7_PERFCTR0); GUEST_SYNC(0); @@ -312,7 +260,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu) .action = KVM_PMU_EVENT_DENY, .nevents = 1, .events = { - EVENT(0x1C2, 0), + RAW_EVENT(0x1C2, 0), }, }; @@ -347,9 +295,9 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu) f.action = KVM_PMU_EVENT_DENY; - remove_event(&f, INST_RETIRED); - remove_event(&f, INTEL_BR_RETIRED); - remove_event(&f, AMD_ZEN_BR_RETIRED); + remove_event(&f, INTEL_ARCH_INSTRUCTIONS_RETIRED); + remove_event(&f, INTEL_ARCH_BRANCHES_RETIRED); + remove_event(&f, AMD_ZEN_BRANCHES_RETIRED); test_with_filter(vcpu, &f); ASSERT_PMC_COUNTING_INSTRUCTIONS(); @@ -361,9 +309,9 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu) f.action = KVM_PMU_EVENT_ALLOW; - remove_event(&f, INST_RETIRED); - remove_event(&f, INTEL_BR_RETIRED); - remove_event(&f, AMD_ZEN_BR_RETIRED); + remove_event(&f, INTEL_ARCH_INSTRUCTIONS_RETIRED); + remove_event(&f, INTEL_ARCH_BRANCHES_RETIRED); + remove_event(&f, AMD_ZEN_BRANCHES_RETIRED); test_with_filter(vcpu, &f); ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); @@ -452,9 +400,9 @@ static bool use_amd_pmu(void) * - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake. */ #define MEM_INST_RETIRED 0xD0 -#define MEM_INST_RETIRED_LOAD EVENT(MEM_INST_RETIRED, 0x81) -#define MEM_INST_RETIRED_STORE EVENT(MEM_INST_RETIRED, 0x82) -#define MEM_INST_RETIRED_LOAD_STORE EVENT(MEM_INST_RETIRED, 0x83) +#define MEM_INST_RETIRED_LOAD RAW_EVENT(MEM_INST_RETIRED, 0x81) +#define MEM_INST_RETIRED_STORE RAW_EVENT(MEM_INST_RETIRED, 0x82) +#define MEM_INST_RETIRED_LOAD_STORE RAW_EVENT(MEM_INST_RETIRED, 0x83) static bool supports_event_mem_inst_retired(void) { @@ -486,9 +434,9 @@ static bool supports_event_mem_inst_retired(void) * B1 Processors Volume 1 of 2. */ #define LS_DISPATCH 0x29 -#define LS_DISPATCH_LOAD EVENT(LS_DISPATCH, BIT(0)) -#define LS_DISPATCH_STORE EVENT(LS_DISPATCH, BIT(1)) -#define LS_DISPATCH_LOAD_STORE EVENT(LS_DISPATCH, BIT(2)) +#define LS_DISPATCH_LOAD RAW_EVENT(LS_DISPATCH, BIT(0)) +#define LS_DISPATCH_STORE RAW_EVENT(LS_DISPATCH, BIT(1)) +#define LS_DISPATCH_LOAD_STORE RAW_EVENT(LS_DISPATCH, BIT(2)) #define INCLUDE_MASKED_ENTRY(event_select, mask, match) \ KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false) @@ -729,14 +677,14 @@ static void add_dummy_events(uint64_t *events, int nevents) static void test_masked_events(struct kvm_vcpu *vcpu) { - int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS; - uint64_t events[MAX_FILTER_EVENTS]; + int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS; + uint64_t events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; /* Run the test cases against a sparse PMU event filter. */ run_masked_events_tests(vcpu, events, 0); /* Run the test cases against a dense PMU event filter. */ - add_dummy_events(events, MAX_FILTER_EVENTS); + add_dummy_events(events, KVM_PMU_EVENT_FILTER_MAX_EVENTS); run_masked_events_tests(vcpu, events, nevents); } @@ -809,20 +757,19 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); } -static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx) +static void intel_run_fixed_counter_guest_code(uint8_t idx) { for (;;) { wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); - wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0); + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + idx, 0); /* Only OS_EN bit is enabled for fixed counter[idx]. */ - wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx)); - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, - BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx)); + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(idx, FIXED_PMC_KERNEL)); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(idx)); __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); - GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx)); + GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + idx)); } } -- cgit 1.2.3-korg From 4f1bd6b16074aa5f9a0633a77fc87f471888a588 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 9 Jan 2024 15:02:36 -0800 Subject: KVM: selftests: Test Intel PMU architectural events on gp counters Add test cases to verify that Intel's Architectural PMU events work as expected when they are available according to guest CPUID. Iterate over a range of sane PMU versions, with and without full-width writes enabled, and over interesting combinations of lengths/masks for the bit vector that enumerates unavailable events. Test up to vPMU version 5, i.e. the current architectural max. KVM only officially supports up to version 2, but the behavior of the counters is backwards compatible, i.e. KVM shouldn't do something completely different for a higher, architecturally-defined vPMU version. Verify KVM behavior against the effective vPMU version, e.g. advertising vPMU 5 when KVM only supports vPMU 2 shouldn't magically unlock vPMU 5 features. According to Intel SDM, the number of architectural events is reported through CPUID.0AH:EAX[31:24] and the architectural event x is supported if EBX[x]=0 && EAX[31:24]>x. Handcode the entirety of the measured section so that the test can precisely assert on the number of instructions and branches retired. Co-developed-by: Like Xu Signed-off-by: Like Xu Signed-off-by: Jinrong Liang Co-developed-by: Sean Christopherson Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-17-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/x86_64/pmu_counters_test.c | 321 +++++++++++++++++++++ 2 files changed, 322 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/pmu_counters_test.c (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 0f07ce8bd2979..ce58098d80fda 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -81,6 +81,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test +TEST_GEN_PROGS_x86_64 += x86_64/pmu_counters_test TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test TEST_GEN_PROGS_x86_64 += x86_64/private_mem_conversions_test TEST_GEN_PROGS_x86_64 += x86_64/private_mem_kvm_exits_test diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c new file mode 100644 index 0000000000000..5b8687bb46398 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023, Tencent, Inc. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include + +#include "pmu.h" +#include "processor.h" + +/* Number of LOOP instructions for the guest measurement payload. */ +#define NUM_BRANCHES 10 +/* + * Number of "extra" instructions that will be counted, i.e. the number of + * instructions that are needed to set up the loop and then disabled the + * counter. 2 MOV, 2 XOR, 1 WRMSR. + */ +#define NUM_EXTRA_INSNS 5 +#define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS) + +static uint8_t kvm_pmu_version; +static bool kvm_has_perf_caps; + +static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, + void *guest_code, + uint8_t pmu_version, + uint64_t perf_capabilities) +{ + struct kvm_vm *vm; + + vm = vm_create_with_one_vcpu(vcpu, guest_code); + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(*vcpu); + + sync_global_to_guest(vm, kvm_pmu_version); + + /* + * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling + * features via PERF_CAPABILITIES if the guest doesn't have a vPMU. + */ + if (kvm_has_perf_caps) + vcpu_set_msr(*vcpu, MSR_IA32_PERF_CAPABILITIES, perf_capabilities); + + vcpu_set_cpuid_property(*vcpu, X86_PROPERTY_PMU_VERSION, pmu_version); + return vm; +} + +static void run_vcpu(struct kvm_vcpu *vcpu) +{ + struct ucall uc; + + do { + vcpu_run(vcpu); + switch (get_ucall(vcpu, &uc)) { + case UCALL_SYNC: + break; + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + break; + case UCALL_PRINTF: + pr_info("%s", uc.buffer); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); + } + } while (uc.cmd != UCALL_DONE); +} + +static uint8_t guest_get_pmu_version(void) +{ + /* + * Return the effective PMU version, i.e. the minimum between what KVM + * supports and what is enumerated to the guest. The host deliberately + * advertises a PMU version to the guest beyond what is actually + * supported by KVM to verify KVM doesn't freak out and do something + * bizarre with an architecturally valid, but unsupported, version. + */ + return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION)); +} + +/* + * If an architectural event is supported and guaranteed to generate at least + * one "hit, assert that its count is non-zero. If an event isn't supported or + * the test can't guarantee the associated action will occur, then all bets are + * off regarding the count, i.e. no checks can be done. + * + * Sanity check that in all cases, the event doesn't count when it's disabled, + * and that KVM correctly emulates the write of an arbitrary value. + */ +static void guest_assert_event_count(uint8_t idx, + struct kvm_x86_pmu_feature event, + uint32_t pmc, uint32_t pmc_msr) +{ + uint64_t count; + + count = _rdpmc(pmc); + if (!this_pmu_has(event)) + goto sanity_checks; + + switch (idx) { + case INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX: + GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED); + break; + case INTEL_ARCH_BRANCHES_RETIRED_INDEX: + GUEST_ASSERT_EQ(count, NUM_BRANCHES); + break; + case INTEL_ARCH_CPU_CYCLES_INDEX: + case INTEL_ARCH_REFERENCE_CYCLES_INDEX: + GUEST_ASSERT_NE(count, 0); + break; + default: + break; + } + +sanity_checks: + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + GUEST_ASSERT_EQ(_rdpmc(pmc), count); + + wrmsr(pmc_msr, 0xdead); + GUEST_ASSERT_EQ(_rdpmc(pmc), 0xdead); +} + +static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event, + uint32_t pmc, uint32_t pmc_msr, + uint32_t ctrl_msr, uint64_t ctrl_msr_value) +{ + wrmsr(pmc_msr, 0); + + /* + * Enable and disable the PMC in a monolithic asm blob to ensure that + * the compiler can't insert _any_ code into the measured sequence. + * Note, ECX doesn't need to be clobbered as the input value, @pmc_msr, + * is restored before the end of the sequence. + */ + __asm__ __volatile__("wrmsr\n\t" + "mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" + "loop .\n\t" + "mov %%edi, %%ecx\n\t" + "xor %%eax, %%eax\n\t" + "xor %%edx, %%edx\n\t" + "wrmsr\n\t" + :: "a"((uint32_t)ctrl_msr_value), + "d"(ctrl_msr_value >> 32), + "c"(ctrl_msr), "D"(ctrl_msr) + ); + + guest_assert_event_count(idx, event, pmc, pmc_msr); +} + +static void guest_test_arch_event(uint8_t idx) +{ + const struct { + struct kvm_x86_pmu_feature gp_event; + } intel_event_to_feature[] = { + [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES }, + [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED }, + [INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES }, + [INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES }, + [INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES }, + [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED }, + [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED }, + [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS }, + }; + + uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); + uint32_t pmu_version = guest_get_pmu_version(); + /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */ + bool guest_has_perf_global_ctrl = pmu_version >= 2; + struct kvm_x86_pmu_feature gp_event; + uint32_t base_pmc_msr; + unsigned int i; + + /* The host side shouldn't invoke this without a guest PMU. */ + GUEST_ASSERT(pmu_version); + + if (this_cpu_has(X86_FEATURE_PDCM) && + rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES) + base_pmc_msr = MSR_IA32_PMC0; + else + base_pmc_msr = MSR_IA32_PERFCTR0; + + gp_event = intel_event_to_feature[idx].gp_event; + GUEST_ASSERT_EQ(idx, gp_event.f.bit); + + GUEST_ASSERT(nr_gp_counters); + + for (i = 0; i < nr_gp_counters; i++) { + uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS | + ARCH_PERFMON_EVENTSEL_ENABLE | + intel_pmu_arch_events[idx]; + + wrmsr(MSR_P6_EVNTSEL0 + i, 0); + if (guest_has_perf_global_ctrl) + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(i)); + + __guest_test_arch_event(idx, gp_event, i, base_pmc_msr + i, + MSR_P6_EVNTSEL0 + i, eventsel); + } +} + +static void guest_test_arch_events(void) +{ + uint8_t i; + + for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) + guest_test_arch_event(i); + + GUEST_DONE(); +} + +static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, + uint8_t length, uint8_t unavailable_mask) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + /* Testing arch events requires a vPMU (there are no negative tests). */ + if (!pmu_version) + return; + + vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events, + pmu_version, perf_capabilities); + + vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH, + length); + vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EVENTS_MASK, + unavailable_mask); + + run_vcpu(vcpu); + + kvm_vm_free(vm); +} + +static void test_intel_counters(void) +{ + uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); + unsigned int i; + uint8_t v, j; + uint32_t k; + + const uint64_t perf_caps[] = { + 0, + PMU_CAP_FW_WRITES, + }; + + /* + * Test up to PMU v5, which is the current maximum version defined by + * Intel, i.e. is the last version that is guaranteed to be backwards + * compatible with KVM's existing behavior. + */ + uint8_t max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5); + + /* + * Detect the existence of events that aren't supported by selftests. + * This will (obviously) fail any time the kernel adds support for a + * new event, but it's worth paying that price to keep the test fresh. + */ + TEST_ASSERT(nr_arch_events <= NR_INTEL_ARCH_EVENTS, + "New architectural event(s) detected; please update this test (length = %u, mask = %x)", + nr_arch_events, kvm_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK)); + + /* + * Force iterating over known arch events regardless of whether or not + * KVM/hardware supports a given event. + */ + nr_arch_events = max_t(typeof(nr_arch_events), nr_arch_events, NR_INTEL_ARCH_EVENTS); + + for (v = 0; v <= max_pmu_version; v++) { + for (i = 0; i < ARRAY_SIZE(perf_caps); i++) { + if (!kvm_has_perf_caps && perf_caps[i]) + continue; + + pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n", + v, perf_caps[i]); + /* + * To keep the total runtime reasonable, test every + * possible non-zero, non-reserved bitmap combination + * only with the native PMU version and the full bit + * vector length. + */ + if (v == pmu_version) { + for (k = 1; k < (BIT(nr_arch_events) - 1); k++) + test_arch_events(v, perf_caps[i], nr_arch_events, k); + } + /* + * Test single bits for all PMU version and lengths up + * the number of events +1 (to verify KVM doesn't do + * weird things if the guest length is greater than the + * host length). Explicitly test a mask of '0' and all + * ones i.e. all events being available and unavailable. + */ + for (j = 0; j <= nr_arch_events + 1; j++) { + test_arch_events(v, perf_caps[i], j, 0); + test_arch_events(v, perf_caps[i], j, 0xff); + + for (k = 0; k < nr_arch_events; k++) + test_arch_events(v, perf_caps[i], j, BIT(k)); + } + } + } +} + +int main(int argc, char *argv[]) +{ + TEST_REQUIRE(get_kvm_param_bool("enable_pmu")); + + TEST_REQUIRE(host_cpu_is_intel); + TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); + TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0); + + kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); + kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM); + + test_intel_counters(); + + return 0; +} -- cgit 1.2.3-korg From 3e26b825f87ddfa610e069ff3f668b942d8642bb Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 9 Jan 2024 15:02:37 -0800 Subject: KVM: selftests: Test Intel PMU architectural events on fixed counters Extend the PMU counters test to validate architectural events using fixed counters. The core logic is largely the same, the biggest difference being that if a fixed counter exists, its associated event is available (the SDM doesn't explicitly state this to be true, but it's KVM's ABI and letting software program a fixed counter that doesn't actually count would be quite bizarre). Note, fixed counters rely on PERF_GLOBAL_CTRL. Reviewed-by: Jim Mattson Reviewed-by: Dapeng Mi Co-developed-by: Like Xu Signed-off-by: Like Xu Signed-off-by: Jinrong Liang Co-developed-by: Sean Christopherson Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-18-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_counters_test.c | 54 ++++++++++++++++++---- 1 file changed, 45 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index 5b8687bb46398..663e8fbe7ff8f 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -150,26 +150,46 @@ static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature even guest_assert_event_count(idx, event, pmc, pmc_msr); } +#define X86_PMU_FEATURE_NULL \ +({ \ + struct kvm_x86_pmu_feature feature = {}; \ + \ + feature; \ +}) + +static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event) +{ + return !(*(u64 *)&event); +} + static void guest_test_arch_event(uint8_t idx) { const struct { struct kvm_x86_pmu_feature gp_event; + struct kvm_x86_pmu_feature fixed_event; } intel_event_to_feature[] = { - [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES }, - [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED }, - [INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES }, - [INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES }, - [INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES }, - [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED }, - [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED }, - [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS }, + [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED }, + [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED }, + /* + * Note, the fixed counter for reference cycles is NOT the same + * as the general purpose architectural event. The fixed counter + * explicitly counts at the same frequency as the TSC, whereas + * the GP event counts at a fixed, but uarch specific, frequency. + * Bundle them here for simplicity. + */ + [INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED }, + [INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED }, }; uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); uint32_t pmu_version = guest_get_pmu_version(); /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */ bool guest_has_perf_global_ctrl = pmu_version >= 2; - struct kvm_x86_pmu_feature gp_event; + struct kvm_x86_pmu_feature gp_event, fixed_event; uint32_t base_pmc_msr; unsigned int i; @@ -199,6 +219,22 @@ static void guest_test_arch_event(uint8_t idx) __guest_test_arch_event(idx, gp_event, i, base_pmc_msr + i, MSR_P6_EVNTSEL0 + i, eventsel); } + + if (!guest_has_perf_global_ctrl) + return; + + fixed_event = intel_event_to_feature[idx].fixed_event; + if (pmu_is_null_feature(fixed_event) || !this_pmu_has(fixed_event)) + return; + + i = fixed_event.f.bit; + + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL)); + + __guest_test_arch_event(idx, fixed_event, i | INTEL_RDPMC_FIXED, + MSR_CORE_PERF_FIXED_CTR0 + i, + MSR_CORE_PERF_GLOBAL_CTRL, + FIXED_PMC_GLOBAL_CTRL_ENABLE(i)); } static void guest_test_arch_events(void) -- cgit 1.2.3-korg From 7137cf751b9b14662adad61e35c236e141430aed Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 9 Jan 2024 15:02:38 -0800 Subject: KVM: selftests: Test consistency of CPUID with num of gp counters Add a test to verify that KVM correctly emulates MSR-based accesses to general purpose counters based on guest CPUID, e.g. that accesses to non-existent counters #GP and accesses to existent counters succeed. Note, for compatibility reasons, KVM does not emulate #GP when MSR_P6_PERFCTR[0|1] is not present (writes should be dropped). Co-developed-by: Like Xu Signed-off-by: Like Xu Signed-off-by: Jinrong Liang Co-developed-by: Sean Christopherson Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-19-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_counters_test.c | 99 ++++++++++++++++++++++ 1 file changed, 99 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index 663e8fbe7ff8f..863418842ef85 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -270,9 +270,103 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, kvm_vm_free(vm); } +/* + * Limit testing to MSRs that are actually defined by Intel (in the SDM). MSRs + * that aren't defined counter MSRs *probably* don't exist, but there's no + * guarantee that currently undefined MSR indices won't be used for something + * other than PMCs in the future. + */ +#define MAX_NR_GP_COUNTERS 8 +#define MAX_NR_FIXED_COUNTERS 3 + +#define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector) \ +__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ + "Expected %s on " #insn "(0x%x), got vector %u", \ + expect_gp ? "#GP" : "no fault", msr, vector) \ + +#define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \ + __GUEST_ASSERT(val == expected_val, \ + "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \ + msr, expected_val, val); + +static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters, + uint8_t nr_counters) +{ + uint8_t i; + + for (i = 0; i < nr_possible_counters; i++) { + /* + * TODO: Test a value that validates full-width writes and the + * width of the counters. + */ + const uint64_t test_val = 0xffff; + const uint32_t msr = base_msr + i; + const bool expect_success = i < nr_counters; + + /* + * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are + * unsupported, i.e. doesn't #GP and reads back '0'. + */ + const uint64_t expected_val = expect_success ? test_val : 0; + const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 && + msr != MSR_P6_PERFCTR1; + uint8_t vector; + uint64_t val; + + vector = wrmsr_safe(msr, test_val); + GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector); + + vector = rdmsr_safe(msr, &val); + GUEST_ASSERT_PMC_MSR_ACCESS(RDMSR, msr, expect_gp, vector); + + /* On #GP, the result of RDMSR is undefined. */ + if (!expect_gp) + GUEST_ASSERT_PMC_VALUE(RDMSR, msr, val, expected_val); + + vector = wrmsr_safe(msr, 0); + GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector); + } + GUEST_DONE(); +} + +static void guest_test_gp_counters(void) +{ + uint8_t nr_gp_counters = 0; + uint32_t base_msr; + + if (guest_get_pmu_version()) + nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); + + if (this_cpu_has(X86_FEATURE_PDCM) && + rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES) + base_msr = MSR_IA32_PMC0; + else + base_msr = MSR_IA32_PERFCTR0; + + guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters); +} + +static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, + uint8_t nr_gp_counters) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_gp_counters, + pmu_version, perf_capabilities); + + vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS, + nr_gp_counters); + + run_vcpu(vcpu); + + kvm_vm_free(vm); +} + static void test_intel_counters(void) { uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); unsigned int i; uint8_t v, j; @@ -336,6 +430,11 @@ static void test_intel_counters(void) for (k = 0; k < nr_arch_events; k++) test_arch_events(v, perf_caps[i], j, BIT(k)); } + + pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n", + v, perf_caps[i]); + for (j = 0; j <= nr_gp_counters; j++) + test_gp_counters(v, perf_caps[i], j); } } } -- cgit 1.2.3-korg From c7d7c76ecf78f93b2bd7d40bccabc813c1ed0a49 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 9 Jan 2024 15:02:39 -0800 Subject: KVM: selftests: Test consistency of CPUID with num of fixed counters Extend the PMU counters test to verify KVM emulation of fixed counters in addition to general purpose counters. Fixed counters add an extra wrinkle in the form of an extra supported bitmask. Thus quoth the SDM: fixed-function performance counter 'i' is supported if ECX[i] || (EDX[4:0] > i) Test that KVM handles a counter being available through either method. Reviewed-by: Dapeng Mi Co-developed-by: Like Xu Signed-off-by: Like Xu Signed-off-by: Jinrong Liang Co-developed-by: Sean Christopherson Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-20-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_counters_test.c | 60 ++++++++++++++++++++-- 1 file changed, 57 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index 863418842ef85..b07294af71a3b 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -290,7 +290,7 @@ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ msr, expected_val, val); static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters, - uint8_t nr_counters) + uint8_t nr_counters, uint32_t or_mask) { uint8_t i; @@ -301,7 +301,13 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters */ const uint64_t test_val = 0xffff; const uint32_t msr = base_msr + i; - const bool expect_success = i < nr_counters; + + /* + * Fixed counters are supported if the counter is less than the + * number of enumerated contiguous counters *or* the counter is + * explicitly enumerated in the supported counters mask. + */ + const bool expect_success = i < nr_counters || (or_mask & BIT(i)); /* * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are @@ -343,7 +349,7 @@ static void guest_test_gp_counters(void) else base_msr = MSR_IA32_PERFCTR0; - guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters); + guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0); } static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, @@ -363,9 +369,50 @@ static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, kvm_vm_free(vm); } +static void guest_test_fixed_counters(void) +{ + uint64_t supported_bitmask = 0; + uint8_t nr_fixed_counters = 0; + + /* Fixed counters require Architectural vPMU Version 2+. */ + if (guest_get_pmu_version() >= 2) + nr_fixed_counters = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + + /* + * The supported bitmask for fixed counters was introduced in PMU + * version 5. + */ + if (guest_get_pmu_version() >= 5) + supported_bitmask = this_cpu_property(X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK); + + guest_rd_wr_counters(MSR_CORE_PERF_FIXED_CTR0, MAX_NR_FIXED_COUNTERS, + nr_fixed_counters, supported_bitmask); +} + +static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, + uint8_t nr_fixed_counters, + uint32_t supported_bitmask) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_fixed_counters, + pmu_version, perf_capabilities); + + vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK, + supported_bitmask); + vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_FIXED_COUNTERS, + nr_fixed_counters); + + run_vcpu(vcpu); + + kvm_vm_free(vm); +} + static void test_intel_counters(void) { uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); unsigned int i; @@ -435,6 +482,13 @@ static void test_intel_counters(void) v, perf_caps[i]); for (j = 0; j <= nr_gp_counters; j++) test_gp_counters(v, perf_caps[i], j); + + pr_info("Testing fixed counters, PMU version %u, perf_caps = %lx\n", + v, perf_caps[i]); + for (j = 0; j <= nr_fixed_counters; j++) { + for (k = 0; k <= (BIT(nr_fixed_counters) - 1); k++) + test_fixed_counters(v, perf_caps[i], j, k); + } } } } -- cgit 1.2.3-korg From 787071fd02625151b94a2f93a47cdc9a25884633 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 9 Jan 2024 15:02:40 -0800 Subject: KVM: selftests: Add functional test for Intel's fixed PMU counters Extend the fixed counters test to verify that supported counters can actually be enabled in the control MSRs, that unsupported counters cannot, and that enabled counters actually count. Co-developed-by: Like Xu Signed-off-by: Like Xu Signed-off-by: Jinrong Liang [sean: fold into the rd/wr access test, massage changelog] Reviewed-by: Dapeng Mi Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-21-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_counters_test.c | 31 +++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index b07294af71a3b..f5dedd1124714 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -332,7 +332,6 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters vector = wrmsr_safe(msr, 0); GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector); } - GUEST_DONE(); } static void guest_test_gp_counters(void) @@ -350,6 +349,7 @@ static void guest_test_gp_counters(void) base_msr = MSR_IA32_PERFCTR0; guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0); + GUEST_DONE(); } static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, @@ -373,6 +373,7 @@ static void guest_test_fixed_counters(void) { uint64_t supported_bitmask = 0; uint8_t nr_fixed_counters = 0; + uint8_t i; /* Fixed counters require Architectural vPMU Version 2+. */ if (guest_get_pmu_version() >= 2) @@ -387,6 +388,34 @@ static void guest_test_fixed_counters(void) guest_rd_wr_counters(MSR_CORE_PERF_FIXED_CTR0, MAX_NR_FIXED_COUNTERS, nr_fixed_counters, supported_bitmask); + + for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) { + uint8_t vector; + uint64_t val; + + if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) { + vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL, + FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL)); + __GUEST_ASSERT(vector == GP_VECTOR, + "Expected #GP for counter %u in FIXED_CTR_CTRL", i); + + vector = wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL, + FIXED_PMC_GLOBAL_CTRL_ENABLE(i)); + __GUEST_ASSERT(vector == GP_VECTOR, + "Expected #GP for counter %u in PERF_GLOBAL_CTRL", i); + continue; + } + + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0); + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL)); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i)); + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i); + + GUEST_ASSERT_NE(val, 0); + } + GUEST_DONE(); } static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, -- cgit 1.2.3-korg From b55e7adf633a8a13d5ede57b77509ea8fa157a56 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:41 -0800 Subject: KVM: selftests: Expand PMU counters test to verify LLC events Expand the PMU counters test to verify that LLC references and misses have non-zero counts when the code being executed while the LLC event(s) is active is evicted via CFLUSH{,OPT}. Note, CLFLUSH{,OPT} requires a fence of some kind to ensure the cache lines are flushed before execution continues. Use MFENCE for simplicity (performance is not a concern). Suggested-by: Jim Mattson Reviewed-by: Dapeng Mi Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-22-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_counters_test.c | 59 +++++++++++++++------- 1 file changed, 40 insertions(+), 19 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index f5dedd1124714..4c7133ddcda8d 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -14,9 +14,9 @@ /* * Number of "extra" instructions that will be counted, i.e. the number of * instructions that are needed to set up the loop and then disabled the - * counter. 2 MOV, 2 XOR, 1 WRMSR. + * counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR. */ -#define NUM_EXTRA_INSNS 5 +#define NUM_EXTRA_INSNS 7 #define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS) static uint8_t kvm_pmu_version; @@ -107,6 +107,12 @@ static void guest_assert_event_count(uint8_t idx, case INTEL_ARCH_BRANCHES_RETIRED_INDEX: GUEST_ASSERT_EQ(count, NUM_BRANCHES); break; + case INTEL_ARCH_LLC_REFERENCES_INDEX: + case INTEL_ARCH_LLC_MISSES_INDEX: + if (!this_cpu_has(X86_FEATURE_CLFLUSHOPT) && + !this_cpu_has(X86_FEATURE_CLFLUSH)) + break; + fallthrough; case INTEL_ARCH_CPU_CYCLES_INDEX: case INTEL_ARCH_REFERENCE_CYCLES_INDEX: GUEST_ASSERT_NE(count, 0); @@ -123,29 +129,44 @@ sanity_checks: GUEST_ASSERT_EQ(_rdpmc(pmc), 0xdead); } +/* + * Enable and disable the PMC in a monolithic asm blob to ensure that the + * compiler can't insert _any_ code into the measured sequence. Note, ECX + * doesn't need to be clobbered as the input value, @pmc_msr, is restored + * before the end of the sequence. + * + * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the + * start of the loop to force LLC references and misses, i.e. to allow testing + * that those events actually count. + */ +#define GUEST_MEASURE_EVENT(_msr, _value, clflush) \ +do { \ + __asm__ __volatile__("wrmsr\n\t" \ + clflush "\n\t" \ + "mfence\n\t" \ + "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \ + "loop .\n\t" \ + "mov %%edi, %%ecx\n\t" \ + "xor %%eax, %%eax\n\t" \ + "xor %%edx, %%edx\n\t" \ + "wrmsr\n\t" \ + :: "a"((uint32_t)_value), "d"(_value >> 32), \ + "c"(_msr), "D"(_msr) \ + ); \ +} while (0) + static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event, uint32_t pmc, uint32_t pmc_msr, uint32_t ctrl_msr, uint64_t ctrl_msr_value) { wrmsr(pmc_msr, 0); - /* - * Enable and disable the PMC in a monolithic asm blob to ensure that - * the compiler can't insert _any_ code into the measured sequence. - * Note, ECX doesn't need to be clobbered as the input value, @pmc_msr, - * is restored before the end of the sequence. - */ - __asm__ __volatile__("wrmsr\n\t" - "mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" - "loop .\n\t" - "mov %%edi, %%ecx\n\t" - "xor %%eax, %%eax\n\t" - "xor %%edx, %%edx\n\t" - "wrmsr\n\t" - :: "a"((uint32_t)ctrl_msr_value), - "d"(ctrl_msr_value >> 32), - "c"(ctrl_msr), "D"(ctrl_msr) - ); + if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) + GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflushopt 1f"); + else if (this_cpu_has(X86_FEATURE_CLFLUSH)) + GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflush 1f"); + else + GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "nop"); guest_assert_event_count(idx, event, pmc, pmc_msr); } -- cgit 1.2.3-korg From c85e986716b03bccfec2e5a577cc1aab4f172ffd Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:42 -0800 Subject: KVM: selftests: Add a helper to query if the PMU module param is enabled Add a helper to probe KVM's "enable_pmu" param, open coding strings in multiple places is just asking for false negatives and/or runtime errors due to typos. Reviewed-by: Dapeng Mi Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-23-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86_64/processor.h | 5 +++++ tools/testing/selftests/kvm/x86_64/pmu_counters_test.c | 2 +- tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c | 2 +- tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 92d4f8ecc7308..ee082ae58f404 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -1217,6 +1217,11 @@ static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) bool kvm_is_tdp_enabled(void); +static inline bool kvm_is_pmu_enabled(void) +{ + return get_kvm_param_bool("enable_pmu"); +} + uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, int *level); uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr); diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index 4c7133ddcda8d..9e9dc4084c0d9 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -545,7 +545,7 @@ static void test_intel_counters(void) int main(int argc, char *argv[]) { - TEST_REQUIRE(get_kvm_param_bool("enable_pmu")); + TEST_REQUIRE(kvm_is_pmu_enabled()); TEST_REQUIRE(host_cpu_is_intel); TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 7ec9fbed92e07..fa407e2ccb2f7 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -867,7 +867,7 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu, *vcpu2 = NULL; struct kvm_vm *vm; - TEST_REQUIRE(get_kvm_param_bool("enable_pmu")); + TEST_REQUIRE(kvm_is_pmu_enabled()); TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER)); TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_MASKED_EVENTS)); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 2a8d4ac2f0204..8ded194c5a6d2 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -237,7 +237,7 @@ int main(int argc, char *argv[]) { union perf_capabilities host_cap; - TEST_REQUIRE(get_kvm_param_bool("enable_pmu")); + TEST_REQUIRE(kvm_is_pmu_enabled()); TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM)); TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); -- cgit 1.2.3-korg From 45e4755c39fc2df55f58ea893615a7d45a0fbcc8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:43 -0800 Subject: KVM: selftests: Add helpers to read integer module params Add helpers to read integer module params, which is painfully non-trivial because the pain of dealing with strings in C is exacerbated by the kernel inserting a newline. Don't bother differentiating between int, uint, short, etc. They all fit in an int, and KVM (thankfully) doesn't have any integer params larger than an int. Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-24-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/include/kvm_util_base.h | 4 ++ tools/testing/selftests/kvm/lib/kvm_util.c | 62 +++++++++++++++++++--- 2 files changed, 60 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 9e5afc472c142..070f250036fcb 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -259,6 +259,10 @@ bool get_kvm_param_bool(const char *param); bool get_kvm_intel_param_bool(const char *param); bool get_kvm_amd_param_bool(const char *param); +int get_kvm_param_integer(const char *param); +int get_kvm_intel_param_integer(const char *param); +int get_kvm_amd_param_integer(const char *param); + unsigned int kvm_check_cap(long cap); static inline bool kvm_has_cap(long cap) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index e066d584c6561..9bafe44cb9786 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -51,13 +51,13 @@ int open_kvm_dev_path_or_exit(void) return _open_kvm_dev_path_or_exit(O_RDONLY); } -static bool get_module_param_bool(const char *module_name, const char *param) +static ssize_t get_module_param(const char *module_name, const char *param, + void *buffer, size_t buffer_size) { const int path_size = 128; char path[path_size]; - char value; - ssize_t r; - int fd; + ssize_t bytes_read; + int fd, r; r = snprintf(path, path_size, "/sys/module/%s/parameters/%s", module_name, param); @@ -66,11 +66,46 @@ static bool get_module_param_bool(const char *module_name, const char *param) fd = open_path_or_exit(path, O_RDONLY); - r = read(fd, &value, 1); - TEST_ASSERT(r == 1, "read(%s) failed", path); + bytes_read = read(fd, buffer, buffer_size); + TEST_ASSERT(bytes_read > 0, "read(%s) returned %ld, wanted %ld bytes", + path, bytes_read, buffer_size); r = close(fd); TEST_ASSERT(!r, "close(%s) failed", path); + return bytes_read; +} + +static int get_module_param_integer(const char *module_name, const char *param) +{ + /* + * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the + * NUL char, and 1 byte because the kernel sucks and inserts a newline + * at the end. + */ + char value[16 + 1 + 1]; + ssize_t r; + + memset(value, '\0', sizeof(value)); + + r = get_module_param(module_name, param, value, sizeof(value)); + TEST_ASSERT(value[r - 1] == '\n', + "Expected trailing newline, got char '%c'", value[r - 1]); + + /* + * Squash the newline, otherwise atoi_paranoid() will complain about + * trailing non-NUL characters in the string. + */ + value[r - 1] = '\0'; + return atoi_paranoid(value); +} + +static bool get_module_param_bool(const char *module_name, const char *param) +{ + char value; + ssize_t r; + + r = get_module_param(module_name, param, &value, sizeof(value)); + TEST_ASSERT_EQ(r, 1); if (value == 'Y') return true; @@ -95,6 +130,21 @@ bool get_kvm_amd_param_bool(const char *param) return get_module_param_bool("kvm_amd", param); } +int get_kvm_param_integer(const char *param) +{ + return get_module_param_integer("kvm", param); +} + +int get_kvm_intel_param_integer(const char *param) +{ + return get_module_param_integer("kvm_intel", param); +} + +int get_kvm_amd_param_integer(const char *param) +{ + return get_module_param_integer("kvm_amd", param); +} + /* * Capability * -- cgit 1.2.3-korg From 0326cc6b02c8e099bb6631168791924be097e1d9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:44 -0800 Subject: KVM: selftests: Query module param to detect FEP in MSR filtering test Add a helper to detect KVM support for forced emulation by querying the module param, and use the helper to detect support for the MSR filtering test instead of throwing a noodle/NOP at KVM to see if it sticks. Cc: Aaron Lewis Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-25-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/x86_64/processor.h | 5 ++++ .../selftests/kvm/x86_64/userspace_msr_exit_test.c | 27 ++++++++-------------- 2 files changed, 14 insertions(+), 18 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index ee082ae58f404..d211cea188bee 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -1222,6 +1222,11 @@ static inline bool kvm_is_pmu_enabled(void) return get_kvm_param_bool("enable_pmu"); } +static inline bool kvm_is_forced_emulation_enabled(void) +{ + return !!get_kvm_param_integer("force_emulation_prefix"); +} + uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, int *level); uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr); diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c index 3533dc2fbfeeb..9e12dbc47a72f 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c @@ -14,8 +14,7 @@ /* Forced emulation prefix, used to invoke the emulator unconditionally. */ #define KVM_FEP "ud2; .byte 'k', 'v', 'm';" -#define KVM_FEP_LENGTH 5 -static int fep_available = 1; +static bool fep_available; #define MSR_NON_EXISTENT 0x474f4f00 @@ -260,13 +259,6 @@ static void guest_code_filter_allow(void) GUEST_ASSERT(data == 2); GUEST_ASSERT(guest_exception_count == 0); - /* - * Test to see if the instruction emulator is available (ie: the module - * parameter 'kvm.force_emulation_prefix=1' is set). This instruction - * will #UD if it isn't available. - */ - __asm__ __volatile__(KVM_FEP "nop"); - if (fep_available) { /* Let userspace know we aren't done. */ GUEST_SYNC(0); @@ -388,12 +380,6 @@ static void guest_fep_gp_handler(struct ex_regs *regs) &em_wrmsr_start, &em_wrmsr_end); } -static void guest_ud_handler(struct ex_regs *regs) -{ - fep_available = 0; - regs->rip += KVM_FEP_LENGTH; -} - static void check_for_guest_assert(struct kvm_vcpu *vcpu) { struct ucall uc; @@ -531,9 +517,11 @@ static void test_msr_filter_allow(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; + uint64_t cmd; int rc; vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_allow); + sync_global_to_guest(vm, fep_available); rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); @@ -561,11 +549,11 @@ static void test_msr_filter_allow(void) run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT); run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT); - vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); vcpu_run(vcpu); - vm_install_exception_handler(vm, UD_VECTOR, NULL); + cmd = process_ucall(vcpu); - if (process_ucall(vcpu) != UCALL_DONE) { + if (fep_available) { + TEST_ASSERT_EQ(cmd, UCALL_SYNC); vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler); /* Process emulated rdmsr and wrmsr instructions. */ @@ -583,6 +571,7 @@ static void test_msr_filter_allow(void) /* Confirm the guest completed without issues. */ run_guest_then_process_ucall_done(vcpu); } else { + TEST_ASSERT_EQ(cmd, UCALL_DONE); printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n"); } @@ -804,6 +793,8 @@ static void test_user_exit_msr_flags(void) int main(int argc, char *argv[]) { + fep_available = kvm_is_forced_emulation_enabled(); + test_msr_filter_allow(); test_msr_filter_deny(); -- cgit 1.2.3-korg From 00856e17da730299205270234cda30628ae53b92 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:45 -0800 Subject: KVM: selftests: Move KVM_FEP macro into common library header Move the KVM_FEP definition, a.k.a. the KVM force emulation prefix, into processor.h so that it can be used for other tests besides the MSR filter test. Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-26-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86_64/processor.h | 3 +++ tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index d211cea188bee..6be365ac2a85b 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -23,6 +23,9 @@ extern bool host_cpu_is_intel; extern bool host_cpu_is_amd; +/* Forced emulation prefix, used to invoke the emulator unconditionally. */ +#define KVM_FEP "ud2; .byte 'k', 'v', 'm';" + #define NMI_VECTOR 0x02 #define X86_EFLAGS_FIXED (1u << 1) diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c index 9e12dbc47a72f..ab3a8c4f0b864 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c @@ -12,8 +12,6 @@ #include "kvm_util.h" #include "vmx.h" -/* Forced emulation prefix, used to invoke the emulator unconditionally. */ -#define KVM_FEP "ud2; .byte 'k', 'v', 'm';" static bool fep_available; #define MSR_NON_EXISTENT 0x474f4f00 -- cgit 1.2.3-korg From cd34fd8c758e968aae15944b1679c974af719648 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:46 -0800 Subject: KVM: selftests: Test PMC virtualization with forced emulation Extend the PMC counters test to use forced emulation to verify that KVM emulates counter events for instructions retired and branches retired. Force emulation for only a subset of the measured code to test that KVM does the right thing when mixing perf events with emulated events. Reviewed-by: Dapeng Mi Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-27-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_counters_test.c | 44 +++++++++++++++------- 1 file changed, 30 insertions(+), 14 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index 9e9dc4084c0d9..cb808ac827ba8 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -21,6 +21,7 @@ static uint8_t kvm_pmu_version; static bool kvm_has_perf_caps; +static bool is_forced_emulation_enabled; static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, void *guest_code, @@ -34,6 +35,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, vcpu_init_descriptor_tables(*vcpu); sync_global_to_guest(vm, kvm_pmu_version); + sync_global_to_guest(vm, is_forced_emulation_enabled); /* * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling @@ -138,37 +140,50 @@ sanity_checks: * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the * start of the loop to force LLC references and misses, i.e. to allow testing * that those events actually count. + * + * If forced emulation is enabled (and specified), force emulation on a subset + * of the measured code to verify that KVM correctly emulates instructions and + * branches retired events in conjunction with hardware also counting said + * events. */ -#define GUEST_MEASURE_EVENT(_msr, _value, clflush) \ +#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP) \ do { \ __asm__ __volatile__("wrmsr\n\t" \ clflush "\n\t" \ "mfence\n\t" \ "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \ - "loop .\n\t" \ - "mov %%edi, %%ecx\n\t" \ - "xor %%eax, %%eax\n\t" \ - "xor %%edx, %%edx\n\t" \ + FEP "loop .\n\t" \ + FEP "mov %%edi, %%ecx\n\t" \ + FEP "xor %%eax, %%eax\n\t" \ + FEP "xor %%edx, %%edx\n\t" \ "wrmsr\n\t" \ :: "a"((uint32_t)_value), "d"(_value >> 32), \ "c"(_msr), "D"(_msr) \ ); \ } while (0) +#define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP) \ +do { \ + wrmsr(pmc_msr, 0); \ + \ + if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \ + GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP); \ + else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \ + GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP); \ + else \ + GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \ + \ + guest_assert_event_count(_idx, _event, _pmc, _pmc_msr); \ +} while (0) + static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event, uint32_t pmc, uint32_t pmc_msr, uint32_t ctrl_msr, uint64_t ctrl_msr_value) { - wrmsr(pmc_msr, 0); - - if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) - GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflushopt 1f"); - else if (this_cpu_has(X86_FEATURE_CLFLUSH)) - GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflush 1f"); - else - GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "nop"); + GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, ""); - guest_assert_event_count(idx, event, pmc, pmc_msr); + if (is_forced_emulation_enabled) + GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP); } #define X86_PMU_FEATURE_NULL \ @@ -553,6 +568,7 @@ int main(int argc, char *argv[]) kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM); + is_forced_emulation_enabled = kvm_is_forced_emulation_enabled(); test_intel_counters(); -- cgit 1.2.3-korg From ab3b6a7de8df00b380fb66a523c79e3b387aa877 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:47 -0800 Subject: KVM: selftests: Add a forced emulation variation of KVM_ASM_SAFE() Add KVM_ASM_SAFE_FEP() to allow forcing emulation on an instruction that might fault. Note, KVM skips RIP past the FEP prefix before injecting an exception, i.e. the fixup needs to be on the instruction itself. Do not check for FEP support, that is firmly the responsibility of whatever code wants to use KVM_ASM_SAFE_FEP(). Sadly, chaining variadic arguments that contain commas doesn't work, thus the unfortunate amount of copy+paste. Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-28-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/x86_64/processor.h | 30 ++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 6be365ac2a85b..fe891424ff55d 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -1154,16 +1154,19 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, * r9 = exception vector (non-zero) * r10 = error code */ -#define KVM_ASM_SAFE(insn) \ +#define __KVM_ASM_SAFE(insn, fep) \ "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \ "lea 1f(%%rip), %%r10\n\t" \ "lea 2f(%%rip), %%r11\n\t" \ - "1: " insn "\n\t" \ + fep "1: " insn "\n\t" \ "xor %%r9, %%r9\n\t" \ "2:\n\t" \ "mov %%r9b, %[vector]\n\t" \ "mov %%r10, %[error_code]\n\t" +#define KVM_ASM_SAFE(insn) __KVM_ASM_SAFE(insn, "") +#define KVM_ASM_SAFE_FEP(insn) __KVM_ASM_SAFE(insn, KVM_FEP) + #define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec) #define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11" @@ -1190,6 +1193,29 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, vector; \ }) +#define kvm_asm_safe_fep(insn, inputs...) \ +({ \ + uint64_t ign_error_code; \ + uint8_t vector; \ + \ + asm volatile(KVM_ASM_SAFE(insn) \ + : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ + : inputs \ + : KVM_ASM_SAFE_CLOBBERS); \ + vector; \ +}) + +#define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \ +({ \ + uint8_t vector; \ + \ + asm volatile(KVM_ASM_SAFE_FEP(insn) \ + : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ + : inputs \ + : KVM_ASM_SAFE_CLOBBERS); \ + vector; \ +}) + static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val) { uint64_t error_code; -- cgit 1.2.3-korg From b5e66df34cb0de716859595ce7a4d9d1d015a695 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:48 -0800 Subject: KVM: selftests: Add helpers for safe and safe+forced RDMSR, RDPMC, and XGETBV Add helpers for safe and safe-with-forced-emulations versions of RDMSR, RDPMC, and XGETBV. Use macro shenanigans to eliminate the rather large amount of boilerplate needed to get values in and out of registers. Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-29-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/x86_64/processor.h | 38 +++++++++++++++------- 1 file changed, 26 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index fe891424ff55d..abac816f6594c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -1216,20 +1216,34 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, vector; \ }) -static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val) -{ - uint64_t error_code; - uint8_t vector; - uint32_t a, d; +#define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ +static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ +{ \ + uint64_t error_code; \ + uint8_t vector; \ + uint32_t a, d; \ + \ + asm volatile(KVM_ASM_SAFE##_FEP(#insn) \ + : "=a"(a), "=d"(d), \ + KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ + : "c"(idx) \ + : KVM_ASM_SAFE_CLOBBERS); \ + \ + *val = (uint64_t)a | ((uint64_t)d << 32); \ + return vector; \ +} - asm volatile(KVM_ASM_SAFE("rdmsr") - : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector, error_code) - : "c"(msr) - : KVM_ASM_SAFE_CLOBBERS); +/* + * Generate {insn}_safe() and {insn}_safe_fep() helpers for instructions that + * use ECX as in input index, and EDX:EAX as a 64-bit output. + */ +#define BUILD_READ_U64_SAFE_HELPERS(insn) \ + BUILD_READ_U64_SAFE_HELPER(insn, , ) \ + BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ - *val = (uint64_t)a | ((uint64_t)d << 32); - return vector; -} +BUILD_READ_U64_SAFE_HELPERS(rdmsr) +BUILD_READ_U64_SAFE_HELPERS(rdpmc) +BUILD_READ_U64_SAFE_HELPERS(xgetbv) static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) { -- cgit 1.2.3-korg From a8a37f5556845b13be2df3d545e7245b574e0ff5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 9 Jan 2024 15:02:49 -0800 Subject: KVM: selftests: Extend PMU counters test to validate RDPMC after WRMSR Extend the read/write PMU counters subtest to verify that RDPMC also reads back the written value. Opportunsitically verify that attempting to use the "fast" mode of RDPMC fails, as the "fast" flag is only supported by non-architectural PMUs, which KVM doesn't virtualize. Tested-by: Dapeng Mi Link: https://lore.kernel.org/r/20240109230250.424295-30-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_counters_test.c | 41 ++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index cb808ac827ba8..ae5f6042f1e89 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -325,9 +325,30 @@ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \ msr, expected_val, val); +static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, + uint64_t expected_val) +{ + uint8_t vector; + uint64_t val; + + vector = rdpmc_safe(rdpmc_idx, &val); + GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector); + if (expect_success) + GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val); + + if (!is_forced_emulation_enabled) + return; + + vector = rdpmc_safe_fep(rdpmc_idx, &val); + GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector); + if (expect_success) + GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val); +} + static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters, uint8_t nr_counters, uint32_t or_mask) { + const bool pmu_has_fast_mode = !guest_get_pmu_version(); uint8_t i; for (i = 0; i < nr_possible_counters; i++) { @@ -352,6 +373,7 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters const uint64_t expected_val = expect_success ? test_val : 0; const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 && msr != MSR_P6_PERFCTR1; + uint32_t rdpmc_idx; uint8_t vector; uint64_t val; @@ -365,6 +387,25 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters if (!expect_gp) GUEST_ASSERT_PMC_VALUE(RDMSR, msr, val, expected_val); + /* + * Redo the read tests with RDPMC, which has different indexing + * semantics and additional capabilities. + */ + rdpmc_idx = i; + if (base_msr == MSR_CORE_PERF_FIXED_CTR0) + rdpmc_idx |= INTEL_RDPMC_FIXED; + + guest_test_rdpmc(rdpmc_idx, expect_success, expected_val); + + /* + * KVM doesn't support non-architectural PMUs, i.e. it should + * impossible to have fast mode RDPMC. Verify that attempting + * to use fast RDPMC always #GPs. + */ + GUEST_ASSERT(!expect_success || !pmu_has_fast_mode); + rdpmc_idx |= INTEL_RDPMC_FAST; + guest_test_rdpmc(rdpmc_idx, false, -1ull); + vector = wrmsr_safe(msr, 0); GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector); } -- cgit 1.2.3-korg From dcf0926e9b899eca754a07c4064de69815b85a38 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 4 Jan 2024 15:15:43 -0500 Subject: x86: replace CONFIG_HAVE_KVM with IS_ENABLED(CONFIG_KVM) It is more accurate to check if KVM is enabled, instead of having the architecture say so. Architectures always "have" KVM, so for example checking CONFIG_HAVE_KVM in x86 code is pointless, but if KVM is disabled in a specific build, there is no need for support code. Alternatively, many of the #ifdefs could simply be deleted. However, this would add completely dead code. For example, when KVM is disabled, there should not be any posted interrupts, i.e. NOT wiring up the "dummy" handlers and treating IRQs on those vectors as spurious is the right thing to do. Cc: x86@kernel.org Cc: kbingham@kernel.org Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/hardirq.h | 2 +- arch/x86/include/asm/idtentry.h | 2 +- arch/x86/include/asm/irq.h | 2 +- arch/x86/include/asm/irq_vectors.h | 2 +- arch/x86/kernel/idt.c | 2 +- arch/x86/kernel/irq.c | 4 ++-- scripts/gdb/linux/constants.py.in | 6 +++++- scripts/gdb/linux/interrupts.py | 2 +- tools/arch/x86/include/asm/irq_vectors.h | 2 +- 9 files changed, 14 insertions(+), 10 deletions(-) (limited to 'tools') diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 66837b8c67f1a..fbc7722b87d1f 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -15,7 +15,7 @@ typedef struct { unsigned int irq_spurious_count; unsigned int icr_read_retry_count; #endif -#ifdef CONFIG_HAVE_KVM +#if IS_ENABLED(CONFIG_KVM) unsigned int kvm_posted_intr_ipis; unsigned int kvm_posted_intr_wakeup_ipis; unsigned int kvm_posted_intr_nested_ipis; diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 13639e57e1f8a..d9c86733d0dbc 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -675,7 +675,7 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work); # endif #endif -#ifdef CONFIG_HAVE_KVM +#if IS_ENABLED(CONFIG_KVM) DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi); diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 836c170d30875..194dfff84cb11 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -29,7 +29,7 @@ struct irq_desc; extern void fixup_irqs(void); -#ifdef CONFIG_HAVE_KVM +#if IS_ENABLED(CONFIG_KVM) extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); #endif diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 3a19904c2db69..3f73ac3ed3a07 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -84,7 +84,7 @@ #define HYPERVISOR_CALLBACK_VECTOR 0xf3 /* Vector for KVM to deliver posted interrupt IPI */ -#ifdef CONFIG_HAVE_KVM +#if IS_ENABLED(CONFIG_KVM) #define POSTED_INTR_VECTOR 0xf2 #define POSTED_INTR_WAKEUP_VECTOR 0xf1 #define POSTED_INTR_NESTED_VECTOR 0xf0 diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 660b601f1d6c3..d2bc67cbaf920 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -153,7 +153,7 @@ static const __initconst struct idt_data apic_idts[] = { #ifdef CONFIG_X86_LOCAL_APIC INTG(LOCAL_TIMER_VECTOR, asm_sysvec_apic_timer_interrupt), INTG(X86_PLATFORM_IPI_VECTOR, asm_sysvec_x86_platform_ipi), -# ifdef CONFIG_HAVE_KVM +# if IS_ENABLED(CONFIG_KVM) INTG(POSTED_INTR_VECTOR, asm_sysvec_kvm_posted_intr_ipi), INTG(POSTED_INTR_WAKEUP_VECTOR, asm_sysvec_kvm_posted_intr_wakeup_ipi), INTG(POSTED_INTR_NESTED_VECTOR, asm_sysvec_kvm_posted_intr_nested_ipi), diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 11761c1245453..35fde0107901d 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -164,7 +164,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) #if defined(CONFIG_X86_IO_APIC) seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); #endif -#ifdef CONFIG_HAVE_KVM +#if IS_ENABLED(CONFIG_KVM) seq_printf(p, "%*s: ", prec, "PIN"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); @@ -290,7 +290,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi) } #endif -#ifdef CONFIG_HAVE_KVM +#if IS_ENABLED(CONFIG_KVM) static void dummy_handler(void) {} static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler; diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index e810e0c27ff18..5cace7588e243 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -130,7 +130,11 @@ LX_CONFIG(CONFIG_X86_MCE_THRESHOLD) LX_CONFIG(CONFIG_X86_MCE_AMD) LX_CONFIG(CONFIG_X86_MCE) LX_CONFIG(CONFIG_X86_IO_APIC) -LX_CONFIG(CONFIG_HAVE_KVM) +/* + * CONFIG_KVM can be "m" but it affects common code too. Use CONFIG_KVM_COMMON + * as a proxy for IS_ENABLED(CONFIG_KVM). + */ +LX_CONFIG_KVM = IS_BUILTIN(CONFIG_KVM_COMMON) LX_CONFIG(CONFIG_NUMA) LX_CONFIG(CONFIG_ARM64) LX_CONFIG(CONFIG_ARM64_4K_PAGES) diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py index ef478e273791f..66ae5c7690cf1 100644 --- a/scripts/gdb/linux/interrupts.py +++ b/scripts/gdb/linux/interrupts.py @@ -151,7 +151,7 @@ def x86_show_interupts(prec): if cnt is not None: text += "%*s: %10u\n" % (prec, "MIS", cnt['counter']) - if constants.LX_CONFIG_HAVE_KVM: + if constants.LX_CONFIG_KVM: text += x86_show_irqstat(prec, "PIN", 'kvm_posted_intr_ipis', 'Posted-interrupt notification event') text += x86_show_irqstat(prec, "NPI", 'kvm_posted_intr_nested_ipis', 'Nested posted-interrupt event') text += x86_show_irqstat(prec, "PIW", 'kvm_posted_intr_wakeup_ipis', 'Posted-interrupt wakeup event') diff --git a/tools/arch/x86/include/asm/irq_vectors.h b/tools/arch/x86/include/asm/irq_vectors.h index 3a19904c2db69..3f73ac3ed3a07 100644 --- a/tools/arch/x86/include/asm/irq_vectors.h +++ b/tools/arch/x86/include/asm/irq_vectors.h @@ -84,7 +84,7 @@ #define HYPERVISOR_CALLBACK_VECTOR 0xf3 /* Vector for KVM to deliver posted interrupt IPI */ -#ifdef CONFIG_HAVE_KVM +#if IS_ENABLED(CONFIG_KVM) #define POSTED_INTR_VECTOR 0xf2 #define POSTED_INTR_WAKEUP_VECTOR 0xf1 #define POSTED_INTR_NESTED_VECTOR 0xf0 -- cgit 1.2.3-korg From 06fdd894b473c6cc29c9b39b82e0941cefec4e51 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 2 Feb 2024 15:46:03 -0800 Subject: KVM: selftests: Fix GUEST_PRINTF() format warnings in ARM code Fix a pile of -Wformat warnings in the KVM ARM selftests code, almost all of which are benign "long" versus "long long" issues (selftests are 64-bit only, and the guest printf code treats "ll" the same as "l"). The code itself isn't problematic, but the warnings make it impossible to build ARM selftests with -Werror, which does detect real issues from time to time. Opportunistically have GUEST_ASSERT_BITMAP_REG() interpret set_expected, which is a bool, as an unsigned decimal value, i.e. have it print '0' or '1' instead of '0x0' or '0x1'. Signed-off-by: Sean Christopherson Tested-by: Zenghui Yu Link: https://lore.kernel.org/r/20240202234603.366925-1-seanjc@google.com Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/aarch64/arch_timer.c | 4 ++-- tools/testing/selftests/kvm/aarch64/debug-exceptions.c | 2 +- tools/testing/selftests/kvm/aarch64/hypercalls.c | 4 ++-- tools/testing/selftests/kvm/aarch64/page_fault_test.c | 2 +- tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c | 12 ++++++------ 5 files changed, 12 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index 274b8465b42a5..d5e8f365aa011 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -158,9 +158,9 @@ static void guest_validate_irq(unsigned int intid, /* Basic 'timer condition met' check */ __GUEST_ASSERT(xcnt >= cval, - "xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx", + "xcnt = 0x%lx, cval = 0x%lx, xcnt_diff_us = 0x%lx", xcnt, cval, xcnt_diff_us); - __GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt); + __GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%lx", xcnt); WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); } diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index 866002917441c..2582c49e525ad 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -365,7 +365,7 @@ static void guest_wp_handler(struct ex_regs *regs) static void guest_ss_handler(struct ex_regs *regs) { - __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx); + __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%lu'", ss_idx); ss_addr[ss_idx++] = regs->pc; regs->pstate |= SPSR_SS; } diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c index 31f66ba97228b..c62739d897d61 100644 --- a/tools/testing/selftests/kvm/aarch64/hypercalls.c +++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c @@ -105,12 +105,12 @@ static void guest_test_hvc(const struct test_hvc_info *hc_info) case TEST_STAGE_HVC_IFACE_FEAT_DISABLED: case TEST_STAGE_HVC_IFACE_FALSE_INFO: __GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED, - "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u", + "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%lx, stage = %u", res.a0, hc_info->func_id, hc_info->arg1, stage); break; case TEST_STAGE_HVC_IFACE_FEAT_ENABLED: __GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED, - "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u", + "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%lx, stage = %u", res.a0, hc_info->func_id, hc_info->arg1, stage); break; default: diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index 08a5ca5bed56a..7bbd9fb5c8d67 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -292,7 +292,7 @@ static void guest_code(struct test_desc *test) static void no_dabt_handler(struct ex_regs *regs) { - GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1)); + GUEST_FAIL("Unexpected dabt, far_el1 = 0x%lx", read_sysreg(far_el1)); } static void no_iabt_handler(struct ex_regs *regs) diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 9d51b56913496..f8f0c655c7232 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -195,11 +195,11 @@ struct pmc_accessor pmc_accessors[] = { \ if (set_expected) \ __GUEST_ASSERT((_tval & mask), \ - "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \ + "tval: 0x%lx; mask: 0x%lx; set_expected: %u", \ _tval, mask, set_expected); \ else \ __GUEST_ASSERT(!(_tval & mask), \ - "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \ + "tval: 0x%lx; mask: 0x%lx; set_expected: %u", \ _tval, mask, set_expected); \ } @@ -286,7 +286,7 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) acc->write_typer(pmc_idx, write_data); read_data = acc->read_typer(pmc_idx); __GUEST_ASSERT(read_data == write_data, - "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", + "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); /* @@ -297,14 +297,14 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) /* The count value must be 0, as it is disabled and reset */ __GUEST_ASSERT(read_data == 0, - "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx", + "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx", pmc_idx, PMC_ACC_TO_IDX(acc), read_data); write_data = read_data + pmc_idx + 0x12345; acc->write_cntr(pmc_idx, write_data); read_data = acc->read_cntr(pmc_idx); __GUEST_ASSERT(read_data == write_data, - "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", + "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); } @@ -379,7 +379,7 @@ static void guest_code(uint64_t expected_pmcr_n) int i, pmc; __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, - "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx", + "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%x", expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS); pmcr = read_sysreg(pmcr_el0); -- cgit 1.2.3-korg From 8cdc71fbf65567dca6f52aac206d91754ad55147 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 12 Feb 2024 21:09:33 +0000 Subject: KVM: selftests: Print timer ctl register in ISTATUS assertion Zenghui noted that the test assertion for the ISTATUS bit is printing the current timer value instead of the control register in the case of failure. While the assertion is sound, printing CNT isn't informative. Change things around to actually print the CTL register value instead. Reported-by: Zenghui Yu Closes: https://lore.kernel.org/kvmarm/3188e6f1-f150-f7d0-6c2b-5b7608b0b012@huawei.com/ Reviewed-by: Zenghui Yu Link: https://lore.kernel.org/r/20240212210932.3095265-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/aarch64/arch_timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index d5e8f365aa011..ab4b604d8ec07 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -160,7 +160,7 @@ static void guest_validate_irq(unsigned int intid, __GUEST_ASSERT(xcnt >= cval, "xcnt = 0x%lx, cval = 0x%lx, xcnt_diff_us = 0x%lx", xcnt, cval, xcnt_diff_us); - __GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%lx", xcnt); + __GUEST_ASSERT(xctl & CTL_ISTATUS, "xctl = 0x%lx", xctl); WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); } -- cgit 1.2.3-korg From 4a447b135e45b49101417d54079df25b520299d9 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Sun, 18 Feb 2024 12:30:03 +0800 Subject: KVM: selftests: Test top-down slots event in x86's pmu_counters_test Although the fixed counter 3 and its exclusive pseudo slots event are not supported by KVM yet, the architectural slots event is supported by KVM and can be programmed on any GP counter. Thus add validation for this architectural slots event. Top-down slots event "counts the total number of available slots for an unhalted logical processor, and increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method." As for the slot, it's an abstract concept which indicates how many uops (decoded from instructions) can be processed simultaneously (per cycle) on HW. In Top-down Microarchitecture Analysis (TMA) method, the processor is divided into two parts, frond-end and back-end. Assume there is a processor with classic 5-stage pipeline, fetch, decode, execute, memory access and register writeback. The former 2 stages (fetch/decode) are classified to frond-end and the latter 3 stages are classified to back-end. In modern Intel processors, a complicated instruction would be decoded into several uops (micro-operations) and so these uops can be processed simultaneously and then improve the performance. Thus, assume a processor can decode and dispatch 4 uops in front-end and execute 4 uops in back-end simultaneously (per-cycle), so the machine-width of this processor is 4 and this processor has 4 topdown slots per-cycle. If a slot is spare and can be used to process a new upcoming uop, then the slot is available, but if a uop occupies a slot for several cycles and can't be retired (maybe blocked by memory access), then this slot is stall and unavailable. Considering the testing instruction sequence can't be macro-fused on x86 platforms, the measured slots count should not be less than NUM_INSNS_RETIRED. Thus assert the slots count against NUM_INSNS_RETIRED. pmu_counters_test passed with this patch on Intel Sapphire Rapids. About the more information about TMA method, please refer the below link. https://www.intel.com/content/www/us/en/docs/vtune-profiler/cookbook/2023-0/top-down-microarchitecture-analysis-method.html Signed-off-by: Dapeng Mi Link: https://lore.kernel.org/r/20240218043003.2424683-1-dapeng1.mi@linux.intel.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/pmu_counters_test.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index ae5f6042f1e89..29609b52f8fa0 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -119,6 +119,9 @@ static void guest_assert_event_count(uint8_t idx, case INTEL_ARCH_REFERENCE_CYCLES_INDEX: GUEST_ASSERT_NE(count, 0); break; + case INTEL_ARCH_TOPDOWN_SLOTS_INDEX: + GUEST_ASSERT(count >= NUM_INSNS_RETIRED); + break; default: break; } -- cgit 1.2.3-korg From 9397b5334af1f4af37cf35b5c647f84a948e3110 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:29:09 +0000 Subject: KVM: selftests: map Xen's shared_info page using HVA rather than GFN Using the HVA of the shared_info page is more efficient, so if the capability (KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) is present use that method to do the mapping. NOTE: Have the juggle_shinfo_state() thread map and unmap using both GFN and HVA, to make sure the older mechanism is not broken. Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-15-paul@xen.org Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/xen_shinfo_test.c | 44 +++++++++++++++++----- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 9ec9ab60b63ee..a61500ff0822b 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -389,6 +389,7 @@ static int cmp_timespec(struct timespec *a, struct timespec *b) return 0; } +static struct shared_info *shinfo; static struct vcpu_info *vinfo; static struct kvm_vcpu *vcpu; @@ -404,20 +405,38 @@ static void *juggle_shinfo_state(void *arg) { struct kvm_vm *vm = (struct kvm_vm *)arg; - struct kvm_xen_hvm_attr cache_activate = { + struct kvm_xen_hvm_attr cache_activate_gfn = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE }; - struct kvm_xen_hvm_attr cache_deactivate = { + struct kvm_xen_hvm_attr cache_deactivate_gfn = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .u.shared_info.gfn = KVM_XEN_INVALID_GFN }; + struct kvm_xen_hvm_attr cache_activate_hva = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA, + .u.shared_info.hva = (unsigned long)shinfo + }; + + struct kvm_xen_hvm_attr cache_deactivate_hva = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, + .u.shared_info.hva = 0 + }; + + int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM); + for (;;) { - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate); - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_gfn); pthread_testcancel(); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_gfn); + + if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) { + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva); + pthread_testcancel(); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva); + } } return NULL; @@ -442,6 +461,7 @@ int main(int argc, char *argv[]) bool do_runstate_flag = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG); bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL); bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND); + bool has_shinfo_hva = !!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA); clock_gettime(CLOCK_REALTIME, &min_ts); @@ -452,7 +472,7 @@ int main(int argc, char *argv[]) SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0); virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3); - struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR); + shinfo = addr_gpa2hva(vm, SHINFO_VADDR); int zero_fd = open("/dev/zero", O_RDONLY); TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero"); @@ -488,10 +508,16 @@ int main(int argc, char *argv[]) "Failed to read back RUNSTATE_UPDATE_FLAG attr"); } - struct kvm_xen_hvm_attr ha = { - .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, - .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE, - }; + struct kvm_xen_hvm_attr ha = {}; + + if (has_shinfo_hva) { + ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA; + ha.u.shared_info.hva = (unsigned long)shinfo; + } else { + ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO; + ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE; + } + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha); /* -- cgit 1.2.3-korg From b4dfbfdc95387eeb8c900cf80116088a7a663be5 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 15 Feb 2024 15:29:10 +0000 Subject: KVM: selftests: re-map Xen's vcpu_info using HVA rather than GPA If the relevant capability (KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) is present then re-map vcpu_info using the HVA part way through the tests to make sure then there is no functional change. Signed-off-by: Paul Durrant Reviewed-by: David Woodhouse Link: https://lore.kernel.org/r/20240215152916.1158-16-paul@xen.org Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index a61500ff0822b..d2ea0435f4f76 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -62,6 +62,7 @@ enum { TEST_POLL_TIMEOUT, TEST_POLL_MASKED, TEST_POLL_WAKE, + SET_VCPU_INFO, TEST_TIMER_PAST, TEST_LOCKING_SEND_RACE, TEST_LOCKING_POLL_RACE, @@ -321,6 +322,10 @@ static void guest_code(void) GUEST_SYNC(TEST_POLL_WAKE); + /* Set the vcpu_info to point at exactly the place it already is to + * make sure the attribute is functional. */ + GUEST_SYNC(SET_VCPU_INFO); + /* A timer wake an *unmasked* port which should wake us with an * actual interrupt, while we're polling on a different port. */ ports[0]++; @@ -888,6 +893,16 @@ int main(int argc, char *argv[]) alarm(1); break; + case SET_VCPU_INFO: + if (has_shinfo_hva) { + struct kvm_xen_vcpu_attr vih = { + .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA, + .u.hva = (unsigned long)vinfo + }; + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vih); + } + break; + case TEST_TIMER_PAST: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); -- cgit 1.2.3-korg From 00de073e2420df02ac0f1a19dbfb60ff8eb198be Mon Sep 17 00:00:00 2001 From: Nina Schoetterl-Glausch Date: Thu, 11 Jan 2024 10:48:05 +0100 Subject: KVM: s390: selftest: memop: Fix undefined behavior If an integer's type has x bits, shifting the integer left by x or more is undefined behavior. This can happen in the rotate function when attempting to do a rotation of the whole value by 0. Fixes: 0dd714bfd200 ("KVM: s390: selftest: memop: Add cmpxchg tests") Signed-off-by: Nina Schoetterl-Glausch Link: https://lore.kernel.org/r/20240111094805.363047-1-nsg@linux.ibm.com Acked-by: Janosch Frank Signed-off-by: Janosch Frank Message-Id: <20240111094805.363047-1-nsg@linux.ibm.com> --- tools/testing/selftests/kvm/s390x/memop.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index bb3ca9a5d7318..4ec8d0181e8db 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -489,6 +489,8 @@ static __uint128_t rotate(int size, __uint128_t val, int amount) amount = (amount + bits) % bits; val = cut_to_size(size, val); + if (!amount) + return val; return (val << (bits - amount)) | (val >> amount); } -- cgit 1.2.3-korg From 2c5af1c8460376751d57c50af88a053a3b869926 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 22 Jan 2024 17:58:31 +0800 Subject: selftests/kvm: Fix issues with $(SPLIT_TESTS) The introduction of $(SPLIT_TESTS) also introduced a warning when building selftests on architectures that include get-reg-lists: make: Entering directory '/root/kvm/tools/testing/selftests/kvm' Makefile:272: warning: overriding recipe for target '/root/kvm/tools/testing/selftests/kvm/get-reg-list' Makefile:267: warning: ignoring old recipe for target '/root/kvm/tools/testing/selftests/kvm/get-reg-list' make: Leaving directory '/root/kvm/tools/testing/selftests/kvm' In addition, the rule for $(SPLIT_TESTS_TARGETS) includes _all_ the $(SPLIT_TESTS_OBJS), which only works because there is just one. So fix both by adjusting the rules: - remove $(SPLIT_TESTS_TARGETS) from the $(TEST_GEN_PROGS) rules, and rename it to $(SPLIT_TEST_GEN_PROGS) - fix $(SPLIT_TESTS_OBJS) so that it plays well with $(OUTPUT), rename it to $(SPLIT_TEST_GEN_OBJ), and list the object file explicitly in the $(SPLIT_TEST_GEN_PROGS) link rule Fixes: 17da79e009c3 ("KVM: arm64: selftests: Split get-reg-list test code", 2023-08-09) Signed-off-by: Paolo Bonzini Tested-by: Andrew Jones Reviewed-by: Marc Zyngier Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/Makefile | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 492e937fab006..157309da38e26 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -260,32 +260,36 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C)) LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S)) LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING)) LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ) -SPLIT_TESTS_TARGETS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS)) -SPLIT_TESTS_OBJS := $(patsubst %, $(ARCH_DIR)/%.o, $(SPLIT_TESTS)) +SPLIT_TEST_GEN_PROGS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS)) +SPLIT_TEST_GEN_OBJ := $(patsubst %, $(OUTPUT)/$(ARCH_DIR)/%.o, $(SPLIT_TESTS)) TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS)) TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED)) TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ)) TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS)) -TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TESTS_OBJS)) +TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TEST_GEN_OBJ)) -include $(TEST_DEP_FILES) -$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o +x := $(shell mkdir -p $(sort $(OUTPUT)/$(ARCH_DIR) $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) + +$(filter-out $(SPLIT_TEST_GEN_PROGS), $(TEST_GEN_PROGS)) \ +$(TEST_GEN_PROGS_EXTENDED): %: %.o $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@ $(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ -$(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS) +$(SPLIT_TEST_GEN_PROGS): $(OUTPUT)/%: $(OUTPUT)/%.o $(OUTPUT)/$(ARCH_DIR)/%.o $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@ +$(SPLIT_TEST_GEN_OBJ): $(OUTPUT)/$(ARCH_DIR)/%.o: $(ARCH_DIR)/%.c + $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ EXTRA_CLEAN += $(GEN_HDRS) \ $(LIBKVM_OBJS) \ - $(SPLIT_TESTS_OBJS) \ + $(SPLIT_TEST_GEN_OBJ) \ $(TEST_DEP_FILES) \ $(TEST_GEN_OBJ) \ cscope.* -x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS) $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ @@ -299,7 +303,7 @@ $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@ x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) -$(SPLIT_TESTS_OBJS): $(GEN_HDRS) +$(SPLIT_TEST_GEN_OBJ): $(GEN_HDRS) $(TEST_GEN_PROGS): $(LIBKVM_OBJS) $(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS) $(TEST_GEN_OBJ): $(GEN_HDRS) -- cgit 1.2.3-korg From f0617e4ac2b2fb69369486e09d0ce7653cd94985 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:32 +0800 Subject: KVM: arm64: selftests: Data type cleanup for arch_timer test Change signed type to unsigned in test_args struct which only make sense for unsigned value. Suggested-by: Andrew Jones Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Reviewed-by: Marc Zyngier Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/arch_timer.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index 2cb8dd1f8275f..27191217f117f 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -42,10 +42,10 @@ #define TIMER_TEST_MIGRATION_FREQ_MS 2 struct test_args { - int nr_vcpus; - int nr_iter; - int timer_period_ms; - int migration_freq_ms; + uint32_t nr_vcpus; + uint32_t nr_iter; + uint32_t timer_period_ms; + uint32_t migration_freq_ms; struct kvm_arm_counter_offset offset; }; @@ -57,7 +57,7 @@ static struct test_args test_args = { .offset = { .reserved = 1 }, }; -#define msecs_to_usecs(msec) ((msec) * 1000LL) +#define msecs_to_usecs(msec) ((msec) * 1000ULL) #define GICD_BASE_GPA 0x8000000ULL #define GICR_BASE_GPA 0x80A0000ULL @@ -72,7 +72,7 @@ enum guest_stage { /* Shared variables between host and guest */ struct test_vcpu_shared_data { - int nr_iter; + uint32_t nr_iter; enum guest_stage guest_stage; uint64_t xcnt; }; -- cgit 1.2.3-korg From d1dafd065a23e47a559f30573bffa600e527ff2a Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:33 +0800 Subject: KVM: arm64: selftests: Enable tuning of error margin in arch_timer test There are intermittent failures occurred when stressing the arch-timer test in a Qemu VM: Guest assert failed, vcpu 0; stage; 4; iter: 3 ==== Test Assertion Failure ==== aarch64/arch_timer.c:196: config_iter + 1 == irq_iter pid=4048 tid=4049 errno=4 - Interrupted system call 1 0x000000000040253b: test_vcpu_run at arch_timer.c:248 2 0x0000ffffb60dd5c7: ?? ??:0 3 0x0000ffffb6145d1b: ?? ??:0 0x3 != 0x2 (config_iter + 1 != irq_iter)e Further test and debug show that the timeout for an interrupt to arrive do have random high fluctuation, espectially when testing in an virtual environment. To alleviate this issue, just expose the timeout value as user configurable and print some hint message to increase the value when hitting the failure.. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Reviewed-by: Marc Zyngier Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/arch_timer.c | 32 +++++++++++++++++------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index 27191217f117f..f794fd1152f55 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -6,16 +6,18 @@ * CVAL and TVAL registers. This consitutes the four stages in the test. * The guest's main thread configures the timer interrupt for a stage * and waits for it to fire, with a timeout equal to the timer period. - * It asserts that the timeout doesn't exceed the timer period. + * It asserts that the timeout doesn't exceed the timer period plus + * a user configurable error margin(default to 100us). * * On the other hand, upon receipt of an interrupt, the guest's interrupt * handler validates the interrupt by checking if the architectural state * is in compliance with the specifications. * * The test provides command-line options to configure the timer's - * period (-p), number of vCPUs (-n), and iterations per stage (-i). - * To stress-test the timer stack even more, an option to migrate the - * vCPUs across pCPUs (-m), at a particular rate, is also provided. + * period (-p), number of vCPUs (-n), iterations per stage (-i) and timer + * interrupt arrival error margin (-e). To stress-test the timer stack + * even more, an option to migrate the vCPUs across pCPUs (-m), at a + * particular rate, is also provided. * * Copyright (c) 2021, Google LLC. */ @@ -46,6 +48,7 @@ struct test_args { uint32_t nr_iter; uint32_t timer_period_ms; uint32_t migration_freq_ms; + uint32_t timer_err_margin_us; struct kvm_arm_counter_offset offset; }; @@ -54,6 +57,7 @@ static struct test_args test_args = { .nr_iter = NR_TEST_ITERS_DEF, .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, + .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, .offset = { .reserved = 1 }, }; @@ -190,10 +194,14 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data, /* Setup a timeout for the interrupt to arrive */ udelay(msecs_to_usecs(test_args.timer_period_ms) + - TIMER_TEST_ERR_MARGIN_US); + test_args.timer_err_margin_us); irq_iter = READ_ONCE(shared_data->nr_iter); - GUEST_ASSERT_EQ(config_iter + 1, irq_iter); + __GUEST_ASSERT(config_iter + 1 == irq_iter, + "config_iter + 1 = 0x%lx, irq_iter = 0x%lx.\n" + " Guest timer interrupt was not trigged within the specified\n" + " interval, try to increase the error margin by [-e] option.\n", + config_iter + 1, irq_iter); } } @@ -408,8 +416,9 @@ static void test_vm_cleanup(struct kvm_vm *vm) static void test_print_help(char *name) { - pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n", - name); + pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n" + "\t\t [-m migration_freq_ms] [-o counter_offset]\n" + "\t\t [-e timer_err_margin_us]\n", name); pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", NR_VCPUS_DEF, KVM_MAX_VCPUS); pr_info("\t-i: Number of iterations per stage (default: %u)\n", @@ -419,6 +428,8 @@ static void test_print_help(char *name) pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", TIMER_TEST_MIGRATION_FREQ_MS); pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n"); + pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", + TIMER_TEST_ERR_MARGIN_US); pr_info("\t-h: print this help screen\n"); } @@ -426,7 +437,7 @@ static bool parse_args(int argc, char *argv[]) { int opt; - while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) { + while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) { switch (opt) { case 'n': test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); @@ -445,6 +456,9 @@ static bool parse_args(int argc, char *argv[]) case 'm': test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg); break; + case 'e': + test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg); + break; case 'o': test_args.offset.counter_offset = strtol(optarg, NULL, 0); test_args.offset.reserved = 0; -- cgit 1.2.3-korg From c20dd9e0695fc1e4da4ff01a111b0392862cfed6 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:34 +0800 Subject: KVM: arm64: selftests: Split arch_timer test code Split the arch-neutral test code out of aarch64/arch_timer.c and put them into a common arch_timer.c. This is a preparation to share timer test codes in riscv. Suggested-by: Andrew Jones Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/Makefile | 3 +- tools/testing/selftests/kvm/aarch64/arch_timer.c | 285 +---------------------- tools/testing/selftests/kvm/arch_timer.c | 257 ++++++++++++++++++++ tools/testing/selftests/kvm/include/test_util.h | 2 + tools/testing/selftests/kvm/include/timer_test.h | 44 ++++ 5 files changed, 311 insertions(+), 280 deletions(-) create mode 100644 tools/testing/selftests/kvm/arch_timer.c create mode 100644 tools/testing/selftests/kvm/include/timer_test.h (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 157309da38e26..78960fe74480d 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -143,7 +143,6 @@ TEST_GEN_PROGS_x86_64 += system_counter_offset_test TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs -TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test @@ -155,6 +154,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access TEST_GEN_PROGS_aarch64 += access_tracking_perf_test +TEST_GEN_PROGS_aarch64 += arch_timer TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test @@ -194,6 +194,7 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test TEST_GEN_PROGS_riscv += set_memory_region_test TEST_GEN_PROGS_riscv += steal_time +SPLIT_TESTS += arch_timer SPLIT_TESTS += get-reg-list TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR)) diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index f794fd1152f55..b9ce8c6455db6 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -1,68 +1,19 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * arch_timer.c - Tests the aarch64 timer IRQ functionality - * * The test validates both the virtual and physical timer IRQs using - * CVAL and TVAL registers. This consitutes the four stages in the test. - * The guest's main thread configures the timer interrupt for a stage - * and waits for it to fire, with a timeout equal to the timer period. - * It asserts that the timeout doesn't exceed the timer period plus - * a user configurable error margin(default to 100us). - * - * On the other hand, upon receipt of an interrupt, the guest's interrupt - * handler validates the interrupt by checking if the architectural state - * is in compliance with the specifications. - * - * The test provides command-line options to configure the timer's - * period (-p), number of vCPUs (-n), iterations per stage (-i) and timer - * interrupt arrival error margin (-e). To stress-test the timer stack - * even more, an option to migrate the vCPUs across pCPUs (-m), at a - * particular rate, is also provided. + * CVAL and TVAL registers. * * Copyright (c) 2021, Google LLC. */ #define _GNU_SOURCE -#include -#include -#include -#include -#include -#include - -#include "kvm_util.h" -#include "processor.h" -#include "delay.h" #include "arch_timer.h" +#include "delay.h" #include "gic.h" +#include "processor.h" +#include "timer_test.h" #include "vgic.h" -#define NR_VCPUS_DEF 4 -#define NR_TEST_ITERS_DEF 5 -#define TIMER_TEST_PERIOD_MS_DEF 10 -#define TIMER_TEST_ERR_MARGIN_US 100 -#define TIMER_TEST_MIGRATION_FREQ_MS 2 - -struct test_args { - uint32_t nr_vcpus; - uint32_t nr_iter; - uint32_t timer_period_ms; - uint32_t migration_freq_ms; - uint32_t timer_err_margin_us; - struct kvm_arm_counter_offset offset; -}; - -static struct test_args test_args = { - .nr_vcpus = NR_VCPUS_DEF, - .nr_iter = NR_TEST_ITERS_DEF, - .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, - .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, - .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, - .offset = { .reserved = 1 }, -}; - -#define msecs_to_usecs(msec) ((msec) * 1000ULL) - #define GICD_BASE_GPA 0x8000000ULL #define GICR_BASE_GPA 0x80A0000ULL @@ -74,22 +25,8 @@ enum guest_stage { GUEST_STAGE_MAX, }; -/* Shared variables between host and guest */ -struct test_vcpu_shared_data { - uint32_t nr_iter; - enum guest_stage guest_stage; - uint64_t xcnt; -}; - -static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; -static pthread_t pt_vcpu_run[KVM_MAX_VCPUS]; -static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS]; - static int vtimer_irq, ptimer_irq; -static unsigned long *vcpu_done_map; -static pthread_mutex_t vcpu_done_map_lock; - static void guest_configure_timer_action(struct test_vcpu_shared_data *shared_data) { @@ -230,137 +167,6 @@ static void guest_code(void) GUEST_DONE(); } -static void *test_vcpu_run(void *arg) -{ - unsigned int vcpu_idx = (unsigned long)arg; - struct ucall uc; - struct kvm_vcpu *vcpu = vcpus[vcpu_idx]; - struct kvm_vm *vm = vcpu->vm; - struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx]; - - vcpu_run(vcpu); - - /* Currently, any exit from guest is an indication of completion */ - pthread_mutex_lock(&vcpu_done_map_lock); - __set_bit(vcpu_idx, vcpu_done_map); - pthread_mutex_unlock(&vcpu_done_map_lock); - - switch (get_ucall(vcpu, &uc)) { - case UCALL_SYNC: - case UCALL_DONE: - break; - case UCALL_ABORT: - sync_global_from_guest(vm, *shared_data); - fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n", - vcpu_idx, shared_data->guest_stage, shared_data->nr_iter); - REPORT_GUEST_ASSERT(uc); - break; - default: - TEST_FAIL("Unexpected guest exit"); - } - - return NULL; -} - -static uint32_t test_get_pcpu(void) -{ - uint32_t pcpu; - unsigned int nproc_conf; - cpu_set_t online_cpuset; - - nproc_conf = get_nprocs_conf(); - sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset); - - /* Randomly find an available pCPU to place a vCPU on */ - do { - pcpu = rand() % nproc_conf; - } while (!CPU_ISSET(pcpu, &online_cpuset)); - - return pcpu; -} - -static int test_migrate_vcpu(unsigned int vcpu_idx) -{ - int ret; - cpu_set_t cpuset; - uint32_t new_pcpu = test_get_pcpu(); - - CPU_ZERO(&cpuset); - CPU_SET(new_pcpu, &cpuset); - - pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); - - ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx], - sizeof(cpuset), &cpuset); - - /* Allow the error where the vCPU thread is already finished */ - TEST_ASSERT(ret == 0 || ret == ESRCH, - "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d", - vcpu_idx, new_pcpu, ret); - - return ret; -} - -static void *test_vcpu_migration(void *arg) -{ - unsigned int i, n_done; - bool vcpu_done; - - do { - usleep(msecs_to_usecs(test_args.migration_freq_ms)); - - for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) { - pthread_mutex_lock(&vcpu_done_map_lock); - vcpu_done = test_bit(i, vcpu_done_map); - pthread_mutex_unlock(&vcpu_done_map_lock); - - if (vcpu_done) { - n_done++; - continue; - } - - test_migrate_vcpu(i); - } - } while (test_args.nr_vcpus != n_done); - - return NULL; -} - -static void test_run(struct kvm_vm *vm) -{ - pthread_t pt_vcpu_migration; - unsigned int i; - int ret; - - pthread_mutex_init(&vcpu_done_map_lock, NULL); - vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus); - TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap"); - - for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) { - ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run, - (void *)(unsigned long)i); - TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i); - } - - /* Spawn a thread to control the vCPU migrations */ - if (test_args.migration_freq_ms) { - srand(time(NULL)); - - ret = pthread_create(&pt_vcpu_migration, NULL, - test_vcpu_migration, NULL); - TEST_ASSERT(!ret, "Failed to create the migration pthread"); - } - - - for (i = 0; i < test_args.nr_vcpus; i++) - pthread_join(pt_vcpu_run[i], NULL); - - if (test_args.migration_freq_ms) - pthread_join(pt_vcpu_migration, NULL); - - bitmap_free(vcpu_done_map); -} - static void test_init_timer_irq(struct kvm_vm *vm) { /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ @@ -377,7 +183,7 @@ static void test_init_timer_irq(struct kvm_vm *vm) static int gic_fd; -static struct kvm_vm *test_vm_create(void) +struct kvm_vm *test_vm_create(void) { struct kvm_vm *vm; unsigned int i; @@ -408,87 +214,8 @@ static struct kvm_vm *test_vm_create(void) return vm; } -static void test_vm_cleanup(struct kvm_vm *vm) +void test_vm_cleanup(struct kvm_vm *vm) { close(gic_fd); kvm_vm_free(vm); } - -static void test_print_help(char *name) -{ - pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n" - "\t\t [-m migration_freq_ms] [-o counter_offset]\n" - "\t\t [-e timer_err_margin_us]\n", name); - pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", - NR_VCPUS_DEF, KVM_MAX_VCPUS); - pr_info("\t-i: Number of iterations per stage (default: %u)\n", - NR_TEST_ITERS_DEF); - pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n", - TIMER_TEST_PERIOD_MS_DEF); - pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", - TIMER_TEST_MIGRATION_FREQ_MS); - pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n"); - pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", - TIMER_TEST_ERR_MARGIN_US); - pr_info("\t-h: print this help screen\n"); -} - -static bool parse_args(int argc, char *argv[]) -{ - int opt; - - while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) { - switch (opt) { - case 'n': - test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); - if (test_args.nr_vcpus > KVM_MAX_VCPUS) { - pr_info("Max allowed vCPUs: %u\n", - KVM_MAX_VCPUS); - goto err; - } - break; - case 'i': - test_args.nr_iter = atoi_positive("Number of iterations", optarg); - break; - case 'p': - test_args.timer_period_ms = atoi_positive("Periodicity", optarg); - break; - case 'm': - test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg); - break; - case 'e': - test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg); - break; - case 'o': - test_args.offset.counter_offset = strtol(optarg, NULL, 0); - test_args.offset.reserved = 0; - break; - case 'h': - default: - goto err; - } - } - - return true; - -err: - test_print_help(argv[0]); - return false; -} - -int main(int argc, char *argv[]) -{ - struct kvm_vm *vm; - - if (!parse_args(argc, argv)) - exit(KSFT_SKIP); - - __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2, - "At least two physical CPUs needed for vCPU migration"); - - vm = test_vm_create(); - test_run(vm); - test_vm_cleanup(vm); - - return 0; -} diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c new file mode 100644 index 0000000000000..f7e4cee8cb349 --- /dev/null +++ b/tools/testing/selftests/kvm/arch_timer.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch_timer.c - Tests the arch timer IRQ functionality + * + * The guest's main thread configures the timer interrupt and waits + * for it to fire, with a timeout equal to the timer period. + * It asserts that the timeout doesn't exceed the timer period plus + * a user configurable error margin(default to 100us) + * + * On the other hand, upon receipt of an interrupt, the guest's interrupt + * handler validates the interrupt by checking if the architectural state + * is in compliance with the specifications. + * + * The test provides command-line options to configure the timer's + * period (-p), number of vCPUs (-n), iterations per stage (-i) and timer + * interrupt arrival error margin (-e). To stress-test the timer stack + * even more, an option to migrate the vCPUs across pCPUs (-m), at a + * particular rate, is also provided. + * + * Copyright (c) 2021, Google LLC. + */ + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#include "timer_test.h" + +struct test_args test_args = { + .nr_vcpus = NR_VCPUS_DEF, + .nr_iter = NR_TEST_ITERS_DEF, + .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, + .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, + .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, + .offset = { .reserved = 1 }, +}; + +struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; +struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS]; + +static pthread_t pt_vcpu_run[KVM_MAX_VCPUS]; +static unsigned long *vcpu_done_map; +static pthread_mutex_t vcpu_done_map_lock; + +static void *test_vcpu_run(void *arg) +{ + unsigned int vcpu_idx = (unsigned long)arg; + struct ucall uc; + struct kvm_vcpu *vcpu = vcpus[vcpu_idx]; + struct kvm_vm *vm = vcpu->vm; + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx]; + + vcpu_run(vcpu); + + /* Currently, any exit from guest is an indication of completion */ + pthread_mutex_lock(&vcpu_done_map_lock); + __set_bit(vcpu_idx, vcpu_done_map); + pthread_mutex_unlock(&vcpu_done_map_lock); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_SYNC: + case UCALL_DONE: + break; + case UCALL_ABORT: + sync_global_from_guest(vm, *shared_data); + fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n", + vcpu_idx, shared_data->guest_stage, shared_data->nr_iter); + REPORT_GUEST_ASSERT(uc); + break; + default: + TEST_FAIL("Unexpected guest exit"); + } + + return NULL; +} + +static uint32_t test_get_pcpu(void) +{ + uint32_t pcpu; + unsigned int nproc_conf; + cpu_set_t online_cpuset; + + nproc_conf = get_nprocs_conf(); + sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset); + + /* Randomly find an available pCPU to place a vCPU on */ + do { + pcpu = rand() % nproc_conf; + } while (!CPU_ISSET(pcpu, &online_cpuset)); + + return pcpu; +} + +static int test_migrate_vcpu(unsigned int vcpu_idx) +{ + int ret; + cpu_set_t cpuset; + uint32_t new_pcpu = test_get_pcpu(); + + CPU_ZERO(&cpuset); + CPU_SET(new_pcpu, &cpuset); + + pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); + + ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx], + sizeof(cpuset), &cpuset); + + /* Allow the error where the vCPU thread is already finished */ + TEST_ASSERT(ret == 0 || ret == ESRCH, + "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d", + vcpu_idx, new_pcpu, ret); + + return ret; +} + +static void *test_vcpu_migration(void *arg) +{ + unsigned int i, n_done; + bool vcpu_done; + + do { + usleep(msecs_to_usecs(test_args.migration_freq_ms)); + + for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) { + pthread_mutex_lock(&vcpu_done_map_lock); + vcpu_done = test_bit(i, vcpu_done_map); + pthread_mutex_unlock(&vcpu_done_map_lock); + + if (vcpu_done) { + n_done++; + continue; + } + + test_migrate_vcpu(i); + } + } while (test_args.nr_vcpus != n_done); + + return NULL; +} + +static void test_run(struct kvm_vm *vm) +{ + pthread_t pt_vcpu_migration; + unsigned int i; + int ret; + + pthread_mutex_init(&vcpu_done_map_lock, NULL); + vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus); + TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap"); + + for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) { + ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run, + (void *)(unsigned long)i); + TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i); + } + + /* Spawn a thread to control the vCPU migrations */ + if (test_args.migration_freq_ms) { + srand(time(NULL)); + + ret = pthread_create(&pt_vcpu_migration, NULL, + test_vcpu_migration, NULL); + TEST_ASSERT(!ret, "Failed to create the migration pthread"); + } + + + for (i = 0; i < test_args.nr_vcpus; i++) + pthread_join(pt_vcpu_run[i], NULL); + + if (test_args.migration_freq_ms) + pthread_join(pt_vcpu_migration, NULL); + + bitmap_free(vcpu_done_map); +} + +static void test_print_help(char *name) +{ + pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n" + "\t\t [-m migration_freq_ms] [-o counter_offset]\n" + "\t\t [-e timer_err_margin_us]\n", name); + pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n", + NR_VCPUS_DEF, KVM_MAX_VCPUS); + pr_info("\t-i: Number of iterations per stage (default: %u)\n", + NR_TEST_ITERS_DEF); + pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n", + TIMER_TEST_PERIOD_MS_DEF); + pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", + TIMER_TEST_MIGRATION_FREQ_MS); + pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n"); + pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", + TIMER_TEST_ERR_MARGIN_US); + pr_info("\t-h: print this help screen\n"); +} + +static bool parse_args(int argc, char *argv[]) +{ + int opt; + + while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) { + switch (opt) { + case 'n': + test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); + if (test_args.nr_vcpus > KVM_MAX_VCPUS) { + pr_info("Max allowed vCPUs: %u\n", + KVM_MAX_VCPUS); + goto err; + } + break; + case 'i': + test_args.nr_iter = atoi_positive("Number of iterations", optarg); + break; + case 'p': + test_args.timer_period_ms = atoi_positive("Periodicity", optarg); + break; + case 'm': + test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg); + break; + case 'e': + test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg); + break; + case 'o': + test_args.offset.counter_offset = strtol(optarg, NULL, 0); + test_args.offset.reserved = 0; + break; + case 'h': + default: + goto err; + } + } + + return true; + +err: + test_print_help(argv[0]); + return false; +} + +int main(int argc, char *argv[]) +{ + struct kvm_vm *vm; + + if (!parse_args(argc, argv)) + exit(KSFT_SKIP); + + __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2, + "At least two physical CPUs needed for vCPU migration"); + + vm = test_vm_create(); + test_run(vm); + test_vm_cleanup(vm); + + return 0; +} diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index 50a5e31ba8da1..8a6e30612c862 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -20,6 +20,8 @@ #include #include "kselftest.h" +#define msecs_to_usecs(msec) ((msec) * 1000ULL) + static inline int _no_printf(const char *format, ...) { return 0; } #ifdef DEBUG diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h new file mode 100644 index 0000000000000..256e2d2137cf1 --- /dev/null +++ b/tools/testing/selftests/kvm/include/timer_test.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * timer test specific header + * + * Copyright (C) 2018, Google LLC + */ + +#ifndef SELFTEST_KVM_TIMER_TEST_H +#define SELFTEST_KVM_TIMER_TEST_H + +#include "kvm_util.h" + +#define NR_VCPUS_DEF 4 +#define NR_TEST_ITERS_DEF 5 +#define TIMER_TEST_PERIOD_MS_DEF 10 +#define TIMER_TEST_ERR_MARGIN_US 100 +#define TIMER_TEST_MIGRATION_FREQ_MS 2 + +/* Timer test cmdline parameters */ +struct test_args { + uint32_t nr_vcpus; + uint32_t nr_iter; + uint32_t timer_period_ms; + uint32_t migration_freq_ms; + uint32_t timer_err_margin_us; + /* TODO: Change arm specific type to a common one */ + struct kvm_arm_counter_offset offset; +}; + +/* Shared variables between host and guest */ +struct test_vcpu_shared_data { + uint32_t nr_iter; + int guest_stage; + uint64_t xcnt; +}; + +extern struct test_args test_args; +extern struct kvm_vcpu *vcpus[]; +extern struct test_vcpu_shared_data vcpu_shared_data[]; + +struct kvm_vm *test_vm_create(void); +void test_vm_cleanup(struct kvm_vm *vm); + +#endif /* SELFTEST_KVM_TIMER_TEST_H */ -- cgit 1.2.3-korg From b4b12469c5c3fbd9b9f8e5070e6e47a4337b87c6 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:35 +0800 Subject: KVM: selftests: Add CONFIG_64BIT definition for the build Since only 64bit KVM selftests were supported on all architectures, add the CONFIG_64BIT definition in kvm/Makefile to ensure only 64bit definitions were available in the corresponding included files. Suggested-by: Andrew Jones Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 78960fe74480d..63592045720f4 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -218,7 +218,7 @@ else LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ - -Wno-gnu-variable-sized-type-not-at-end -MD -MP \ + -Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \ -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ -fno-builtin-strnlen \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -- cgit 1.2.3-korg From a69459d579df9200bc6f58ff04e1a8a4984016a7 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:36 +0800 Subject: tools: riscv: Add header file csr.h Borrow the csr definitions and operations from kernel's arch/riscv/include/asm/csr.h to tools/ for riscv. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/arch/riscv/include/asm/csr.h | 541 +++++++++++++++++++++++++++++++++++++ 1 file changed, 541 insertions(+) create mode 100644 tools/arch/riscv/include/asm/csr.h (limited to 'tools') diff --git a/tools/arch/riscv/include/asm/csr.h b/tools/arch/riscv/include/asm/csr.h new file mode 100644 index 0000000000000..0dfc09254f99a --- /dev/null +++ b/tools/arch/riscv/include/asm/csr.h @@ -0,0 +1,541 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _ASM_RISCV_CSR_H +#define _ASM_RISCV_CSR_H + +#include + +/* Status register flags */ +#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ +#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */ +#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */ +#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */ +#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */ +#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */ +#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */ + +#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ +#define SR_FS_OFF _AC(0x00000000, UL) +#define SR_FS_INITIAL _AC(0x00002000, UL) +#define SR_FS_CLEAN _AC(0x00004000, UL) +#define SR_FS_DIRTY _AC(0x00006000, UL) + +#define SR_VS _AC(0x00000600, UL) /* Vector Status */ +#define SR_VS_OFF _AC(0x00000000, UL) +#define SR_VS_INITIAL _AC(0x00000200, UL) +#define SR_VS_CLEAN _AC(0x00000400, UL) +#define SR_VS_DIRTY _AC(0x00000600, UL) + +#define SR_XS _AC(0x00018000, UL) /* Extension Status */ +#define SR_XS_OFF _AC(0x00000000, UL) +#define SR_XS_INITIAL _AC(0x00008000, UL) +#define SR_XS_CLEAN _AC(0x00010000, UL) +#define SR_XS_DIRTY _AC(0x00018000, UL) + +#define SR_FS_VS (SR_FS | SR_VS) /* Vector and Floating-Point Unit */ + +#ifndef CONFIG_64BIT +#define SR_SD _AC(0x80000000, UL) /* FS/VS/XS dirty */ +#else +#define SR_SD _AC(0x8000000000000000, UL) /* FS/VS/XS dirty */ +#endif + +#ifdef CONFIG_64BIT +#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */ +#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */ +#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */ +#endif + +/* SATP flags */ +#ifndef CONFIG_64BIT +#define SATP_PPN _AC(0x003FFFFF, UL) +#define SATP_MODE_32 _AC(0x80000000, UL) +#define SATP_MODE_SHIFT 31 +#define SATP_ASID_BITS 9 +#define SATP_ASID_SHIFT 22 +#define SATP_ASID_MASK _AC(0x1FF, UL) +#else +#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) +#define SATP_MODE_39 _AC(0x8000000000000000, UL) +#define SATP_MODE_48 _AC(0x9000000000000000, UL) +#define SATP_MODE_57 _AC(0xa000000000000000, UL) +#define SATP_MODE_SHIFT 60 +#define SATP_ASID_BITS 16 +#define SATP_ASID_SHIFT 44 +#define SATP_ASID_MASK _AC(0xFFFF, UL) +#endif + +/* Exception cause high bit - is an interrupt if set */ +#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1)) + +/* Interrupt causes (minus the high bit) */ +#define IRQ_S_SOFT 1 +#define IRQ_VS_SOFT 2 +#define IRQ_M_SOFT 3 +#define IRQ_S_TIMER 5 +#define IRQ_VS_TIMER 6 +#define IRQ_M_TIMER 7 +#define IRQ_S_EXT 9 +#define IRQ_VS_EXT 10 +#define IRQ_M_EXT 11 +#define IRQ_S_GEXT 12 +#define IRQ_PMU_OVF 13 +#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1) +#define IRQ_LOCAL_MASK GENMASK((IRQ_LOCAL_MAX - 1), 0) + +/* Exception causes */ +#define EXC_INST_MISALIGNED 0 +#define EXC_INST_ACCESS 1 +#define EXC_INST_ILLEGAL 2 +#define EXC_BREAKPOINT 3 +#define EXC_LOAD_MISALIGNED 4 +#define EXC_LOAD_ACCESS 5 +#define EXC_STORE_MISALIGNED 6 +#define EXC_STORE_ACCESS 7 +#define EXC_SYSCALL 8 +#define EXC_HYPERVISOR_SYSCALL 9 +#define EXC_SUPERVISOR_SYSCALL 10 +#define EXC_INST_PAGE_FAULT 12 +#define EXC_LOAD_PAGE_FAULT 13 +#define EXC_STORE_PAGE_FAULT 15 +#define EXC_INST_GUEST_PAGE_FAULT 20 +#define EXC_LOAD_GUEST_PAGE_FAULT 21 +#define EXC_VIRTUAL_INST_FAULT 22 +#define EXC_STORE_GUEST_PAGE_FAULT 23 + +/* PMP configuration */ +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_A_TOR 0x08 +#define PMP_A_NA4 0x10 +#define PMP_A_NAPOT 0x18 +#define PMP_L 0x80 + +/* HSTATUS flags */ +#ifdef CONFIG_64BIT +#define HSTATUS_VSXL _AC(0x300000000, UL) +#define HSTATUS_VSXL_SHIFT 32 +#endif +#define HSTATUS_VTSR _AC(0x00400000, UL) +#define HSTATUS_VTW _AC(0x00200000, UL) +#define HSTATUS_VTVM _AC(0x00100000, UL) +#define HSTATUS_VGEIN _AC(0x0003f000, UL) +#define HSTATUS_VGEIN_SHIFT 12 +#define HSTATUS_HU _AC(0x00000200, UL) +#define HSTATUS_SPVP _AC(0x00000100, UL) +#define HSTATUS_SPV _AC(0x00000080, UL) +#define HSTATUS_GVA _AC(0x00000040, UL) +#define HSTATUS_VSBE _AC(0x00000020, UL) + +/* HGATP flags */ +#define HGATP_MODE_OFF _AC(0, UL) +#define HGATP_MODE_SV32X4 _AC(1, UL) +#define HGATP_MODE_SV39X4 _AC(8, UL) +#define HGATP_MODE_SV48X4 _AC(9, UL) +#define HGATP_MODE_SV57X4 _AC(10, UL) + +#define HGATP32_MODE_SHIFT 31 +#define HGATP32_VMID_SHIFT 22 +#define HGATP32_VMID GENMASK(28, 22) +#define HGATP32_PPN GENMASK(21, 0) + +#define HGATP64_MODE_SHIFT 60 +#define HGATP64_VMID_SHIFT 44 +#define HGATP64_VMID GENMASK(57, 44) +#define HGATP64_PPN GENMASK(43, 0) + +#define HGATP_PAGE_SHIFT 12 + +#ifdef CONFIG_64BIT +#define HGATP_PPN HGATP64_PPN +#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT +#define HGATP_VMID HGATP64_VMID +#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT +#else +#define HGATP_PPN HGATP32_PPN +#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT +#define HGATP_VMID HGATP32_VMID +#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT +#endif + +/* VSIP & HVIP relation */ +#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT) +#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \ + (_AC(1, UL) << IRQ_S_TIMER) | \ + (_AC(1, UL) << IRQ_S_EXT)) + +/* AIA CSR bits */ +#define TOPI_IID_SHIFT 16 +#define TOPI_IID_MASK GENMASK(11, 0) +#define TOPI_IPRIO_MASK GENMASK(7, 0) +#define TOPI_IPRIO_BITS 8 + +#define TOPEI_ID_SHIFT 16 +#define TOPEI_ID_MASK GENMASK(10, 0) +#define TOPEI_PRIO_MASK GENMASK(10, 0) + +#define ISELECT_IPRIO0 0x30 +#define ISELECT_IPRIO15 0x3f +#define ISELECT_MASK GENMASK(8, 0) + +#define HVICTL_VTI BIT(30) +#define HVICTL_IID GENMASK(27, 16) +#define HVICTL_IID_SHIFT 16 +#define HVICTL_DPR BIT(9) +#define HVICTL_IPRIOM BIT(8) +#define HVICTL_IPRIO GENMASK(7, 0) + +/* xENVCFG flags */ +#define ENVCFG_STCE (_AC(1, ULL) << 63) +#define ENVCFG_PBMTE (_AC(1, ULL) << 62) +#define ENVCFG_CBZE (_AC(1, UL) << 7) +#define ENVCFG_CBCFE (_AC(1, UL) << 6) +#define ENVCFG_CBIE_SHIFT 4 +#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT) +#define ENVCFG_CBIE_ILL _AC(0x0, UL) +#define ENVCFG_CBIE_FLUSH _AC(0x1, UL) +#define ENVCFG_CBIE_INV _AC(0x3, UL) +#define ENVCFG_FIOM _AC(0x1, UL) + +/* Smstateen bits */ +#define SMSTATEEN0_AIA_IMSIC_SHIFT 58 +#define SMSTATEEN0_AIA_IMSIC (_ULL(1) << SMSTATEEN0_AIA_IMSIC_SHIFT) +#define SMSTATEEN0_AIA_SHIFT 59 +#define SMSTATEEN0_AIA (_ULL(1) << SMSTATEEN0_AIA_SHIFT) +#define SMSTATEEN0_AIA_ISEL_SHIFT 60 +#define SMSTATEEN0_AIA_ISEL (_ULL(1) << SMSTATEEN0_AIA_ISEL_SHIFT) +#define SMSTATEEN0_HSENVCFG_SHIFT 62 +#define SMSTATEEN0_HSENVCFG (_ULL(1) << SMSTATEEN0_HSENVCFG_SHIFT) +#define SMSTATEEN0_SSTATEEN0_SHIFT 63 +#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT) + +/* symbolic CSR names: */ +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f + +#define CSR_SSCOUNTOVF 0xda0 + +#define CSR_SSTATUS 0x100 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SENVCFG 0x10a +#define CSR_SSTATEEN0 0x10c +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_SATP 0x180 + +#define CSR_STIMECMP 0x14D +#define CSR_STIMECMPH 0x15D + +/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_SISELECT 0x150 +#define CSR_SIREG 0x151 + +/* Supervisor-Level Interrupts (AIA) */ +#define CSR_STOPEI 0x15c +#define CSR_STOPI 0xdb0 + +/* Supervisor-Level High-Half CSRs (AIA) */ +#define CSR_SIEH 0x114 +#define CSR_SIPH 0x154 + +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 +#define CSR_VSTIMECMP 0x24D +#define CSR_VSTIMECMPH 0x25D + +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGEIE 0x607 +#define CSR_HENVCFG 0x60a +#define CSR_HTIMEDELTAH 0x615 +#define CSR_HENVCFGH 0x61a +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HVIP 0x645 +#define CSR_HTINST 0x64a +#define CSR_HGATP 0x680 +#define CSR_HGEIP 0xe12 + +/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ +#define CSR_HVIEN 0x608 +#define CSR_HVICTL 0x609 +#define CSR_HVIPRIO1 0x646 +#define CSR_HVIPRIO2 0x647 + +/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */ +#define CSR_VSISELECT 0x250 +#define CSR_VSIREG 0x251 + +/* VS-Level Interrupts (H-extension with AIA) */ +#define CSR_VSTOPEI 0x25c +#define CSR_VSTOPI 0xeb0 + +/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ +#define CSR_HIDELEGH 0x613 +#define CSR_HVIENH 0x618 +#define CSR_HVIPH 0x655 +#define CSR_HVIPRIO1H 0x656 +#define CSR_HVIPRIO2H 0x657 +#define CSR_VSIEH 0x214 +#define CSR_VSIPH 0x254 + +/* Hypervisor stateen CSRs */ +#define CSR_HSTATEEN0 0x60c +#define CSR_HSTATEEN0H 0x61c + +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MENVCFG 0x30a +#define CSR_MENVCFGH 0x31a +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPADDR0 0x3b0 +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 + +/* Machine-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_MISELECT 0x350 +#define CSR_MIREG 0x351 + +/* Machine-Level Interrupts (AIA) */ +#define CSR_MTOPEI 0x35c +#define CSR_MTOPI 0xfb0 + +/* Virtual Interrupts for Supervisor Level (AIA) */ +#define CSR_MVIEN 0x308 +#define CSR_MVIP 0x309 + +/* Machine-Level High-Half CSRs (AIA) */ +#define CSR_MIDELEGH 0x313 +#define CSR_MIEH 0x314 +#define CSR_MVIENH 0x318 +#define CSR_MVIPH 0x319 +#define CSR_MIPH 0x354 + +#define CSR_VSTART 0x8 +#define CSR_VCSR 0xf +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 + +#ifdef CONFIG_RISCV_M_MODE +# define CSR_STATUS CSR_MSTATUS +# define CSR_IE CSR_MIE +# define CSR_TVEC CSR_MTVEC +# define CSR_SCRATCH CSR_MSCRATCH +# define CSR_EPC CSR_MEPC +# define CSR_CAUSE CSR_MCAUSE +# define CSR_TVAL CSR_MTVAL +# define CSR_IP CSR_MIP + +# define CSR_IEH CSR_MIEH +# define CSR_ISELECT CSR_MISELECT +# define CSR_IREG CSR_MIREG +# define CSR_IPH CSR_MIPH +# define CSR_TOPEI CSR_MTOPEI +# define CSR_TOPI CSR_MTOPI + +# define SR_IE SR_MIE +# define SR_PIE SR_MPIE +# define SR_PP SR_MPP + +# define RV_IRQ_SOFT IRQ_M_SOFT +# define RV_IRQ_TIMER IRQ_M_TIMER +# define RV_IRQ_EXT IRQ_M_EXT +#else /* CONFIG_RISCV_M_MODE */ +# define CSR_STATUS CSR_SSTATUS +# define CSR_IE CSR_SIE +# define CSR_TVEC CSR_STVEC +# define CSR_SCRATCH CSR_SSCRATCH +# define CSR_EPC CSR_SEPC +# define CSR_CAUSE CSR_SCAUSE +# define CSR_TVAL CSR_STVAL +# define CSR_IP CSR_SIP + +# define CSR_IEH CSR_SIEH +# define CSR_ISELECT CSR_SISELECT +# define CSR_IREG CSR_SIREG +# define CSR_IPH CSR_SIPH +# define CSR_TOPEI CSR_STOPEI +# define CSR_TOPI CSR_STOPI + +# define SR_IE SR_SIE +# define SR_PIE SR_SPIE +# define SR_PP SR_SPP + +# define RV_IRQ_SOFT IRQ_S_SOFT +# define RV_IRQ_TIMER IRQ_S_TIMER +# define RV_IRQ_EXT IRQ_S_EXT +# define RV_IRQ_PMU IRQ_PMU_OVF +# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF) + +#endif /* !CONFIG_RISCV_M_MODE */ + +/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ +#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT) +#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) +#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) + +#ifdef __ASSEMBLY__ +#define __ASM_STR(x) x +#else +#define __ASM_STR(x) #x +#endif + +#ifndef __ASSEMBLY__ + +#define csr_swap(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\ + : "=r" (__v) : "rK" (__v) \ + : "memory"); \ + __v; \ +}) + +#define csr_read(csr) \ +({ \ + register unsigned long __v; \ + __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \ + : "=r" (__v) : \ + : "memory"); \ + __v; \ +}) + +#define csr_write(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \ + : : "rK" (__v) \ + : "memory"); \ +}) + +#define csr_read_set(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\ + : "=r" (__v) : "rK" (__v) \ + : "memory"); \ + __v; \ +}) + +#define csr_set(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \ + : : "rK" (__v) \ + : "memory"); \ +}) + +#define csr_read_clear(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\ + : "=r" (__v) : "rK" (__v) \ + : "memory"); \ + __v; \ +}) + +#define csr_clear(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \ + : : "rK" (__v) \ + : "memory"); \ +}) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_CSR_H */ -- cgit 1.2.3-korg From 1d50c77208933fd1c18be8359633913d9c482e5a Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:37 +0800 Subject: tools: riscv: Add header file vdso/processor.h Borrow the cpu_relax() definitions from kernel's arch/riscv/include/asm/vdso/processor.h to tools/ for riscv. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/arch/riscv/include/asm/vdso/processor.h | 32 +++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 tools/arch/riscv/include/asm/vdso/processor.h (limited to 'tools') diff --git a/tools/arch/riscv/include/asm/vdso/processor.h b/tools/arch/riscv/include/asm/vdso/processor.h new file mode 100644 index 0000000000000..662aca0398481 --- /dev/null +++ b/tools/arch/riscv/include/asm/vdso/processor.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#include + +static inline void cpu_relax(void) +{ +#ifdef __riscv_muldiv + int dummy; + /* In lieu of a halt instruction, induce a long-latency stall. */ + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); +#endif + +#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ + __asm__ __volatile__ ("pause"); +#else + /* Encoding of the pause instruction */ + __asm__ __volatile__ (".4byte 0x100000F"); +#endif + barrier(); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ -- cgit 1.2.3-korg From feb2c8fae3b8703408f01a8db822dd05b1a576ee Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:38 +0800 Subject: KVM: riscv: selftests: Switch to use macro from csr.h Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/include/riscv/processor.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index a0f9efe5a2a8d..b6b95e747688e 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -7,8 +7,9 @@ #ifndef SELFTEST_KVM_PROCESSOR_H #define SELFTEST_KVM_PROCESSOR_H -#include "kvm_util.h" #include +#include +#include "kvm_util.h" static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, uint64_t idx, uint64_t size) @@ -101,13 +102,6 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, #define PGTBL_PAGE_SIZE PGTBL_L0_BLOCK_SIZE #define PGTBL_PAGE_SIZE_SHIFT PGTBL_L0_BLOCK_SHIFT -#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) -#define SATP_MODE_39 _AC(0x8000000000000000, UL) -#define SATP_MODE_48 _AC(0x9000000000000000, UL) -#define SATP_ASID_BITS 16 -#define SATP_ASID_SHIFT 44 -#define SATP_ASID_MASK _AC(0xFFFF, UL) - /* SBI return error codes */ #define SBI_SUCCESS 0 #define SBI_ERR_FAILURE -1 -- cgit 1.2.3-korg From e10086285659bb7ecc5819e5c7e47f5bdc02668d Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 8 Feb 2024 21:48:37 +0100 Subject: KVM: selftests: x86: sync_regs_test: Use vcpu_run() where appropriate In the spots where we are expecting a successful run, we should use vcpu_run() instead of _vcpu_run() to make sure that the run did not fail. Suggested-by: Sean Christopherson Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-2-thuth@redhat.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/sync_regs_test.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 00965ba33f730..8c3898cf79b31 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -259,7 +259,7 @@ int main(int argc, char *argv[]) /* Request and verify all valid register sets. */ /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ run->kvm_valid_regs = TEST_SYNC_FIELDS; - rv = _vcpu_run(vcpu); + vcpu_run(vcpu); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); vcpu_regs_get(vcpu, ®s); @@ -278,7 +278,7 @@ int main(int argc, char *argv[]) run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; - rv = _vcpu_run(vcpu); + vcpu_run(vcpu); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1, "rbx sync regs value incorrect 0x%llx.", @@ -302,7 +302,7 @@ int main(int argc, char *argv[]) run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_dirty_regs = 0; run->s.regs.regs.rbx = 0xDEADBEEF; - rv = _vcpu_run(vcpu); + vcpu_run(vcpu); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF, "rbx sync regs value incorrect 0x%llx.", @@ -317,7 +317,7 @@ int main(int argc, char *argv[]) run->s.regs.regs.rbx = 0xAAAA; regs.rbx = 0xBAC0; vcpu_regs_set(vcpu, ®s); - rv = _vcpu_run(vcpu); + vcpu_run(vcpu); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA, "rbx sync regs value incorrect 0x%llx.", @@ -334,7 +334,7 @@ int main(int argc, char *argv[]) run->kvm_valid_regs = 0; run->kvm_dirty_regs = TEST_SYNC_FIELDS; run->s.regs.regs.rbx = 0xBBBB; - rv = _vcpu_run(vcpu); + vcpu_run(vcpu); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB, "rbx sync regs value incorrect 0x%llx.", -- cgit 1.2.3-korg From 221d65449453846bbf6801d0ecf7dfdf4f413ad9 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 8 Feb 2024 21:48:38 +0100 Subject: KVM: selftests: x86: sync_regs_test: Get regs structure before modifying it The regs structure just accidentally contains the right values from the previous test in the spot where we want to change rbx. It's cleaner if we properly initialize the structure here before using it. Suggested-by: Sean Christopherson Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-3-thuth@redhat.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/sync_regs_test.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 8c3898cf79b31..1cd19dfa0046c 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -315,6 +315,7 @@ int main(int argc, char *argv[]) run->kvm_valid_regs = 0; run->kvm_dirty_regs = 0; run->s.regs.regs.rbx = 0xAAAA; + vcpu_regs_get(vcpu, ®s); regs.rbx = 0xBAC0; vcpu_regs_set(vcpu, ®s); vcpu_run(vcpu); -- cgit 1.2.3-korg From 53a43dd48f8e5e9cc046f14506a11250efc46bf6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 8 Feb 2024 21:48:39 +0100 Subject: KVM: selftests: Move setting a vCPU's entry point to a dedicated API Extract the code to set a vCPU's entry point out of vm_arch_vcpu_add() and into a new API, vcpu_arch_set_entry_point(). Providing a separate API will allow creating a KVM selftests hardness that can handle tests that use different entry points for sub-tests, whereas *requiring* the entry point to be specified at vCPU creation makes it difficult to create a generic harness, e.g. the boilerplate setup/teardown can't easily create and destroy the VM and vCPUs. Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-4-thuth@redhat.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/include/kvm_util_base.h | 11 ++++++---- .../testing/selftests/kvm/lib/aarch64/processor.c | 24 ++++++++++++++++------ tools/testing/selftests/kvm/lib/riscv/processor.c | 9 +++++--- tools/testing/selftests/kvm/lib/s390x/processor.c | 13 ++++++------ tools/testing/selftests/kvm/lib/x86_64/processor.c | 13 +++++++++--- 5 files changed, 48 insertions(+), 22 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 9e5afc472c142..a6e7738a8db73 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -969,15 +969,18 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, * Input Args: * vm - Virtual Machine * vcpu_id - The id of the VCPU to add to the VM. - * guest_code - The vCPU's entry point */ -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code); +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, void *guest_code) { - return vm_arch_vcpu_add(vm, vcpu_id, guest_code); + struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); + + vcpu_arch_set_entry_point(vcpu, guest_code); + + return vcpu; } /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 41c776b642c0c..c83616e19bad4 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -365,8 +365,13 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) indent, "", pstate, pc); } -struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - struct kvm_vcpu_init *init, void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); +} + +static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + struct kvm_vcpu_init *init) { size_t stack_size; uint64_t stack_vaddr; @@ -381,15 +386,22 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, aarch64_vcpu_setup(vcpu, init); vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + return vcpu; +} + +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + struct kvm_vcpu_init *init, void *guest_code) +{ + struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); + + vcpu_arch_set_entry_point(vcpu, guest_code); return vcpu; } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { - return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code); + return __aarch64_vcpu_add(vm, vcpu_id, NULL); } void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 7ca736fb41940..c993947f07823 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -277,8 +277,12 @@ static void __aligned(16) guest_unexp_trap(void) 0, 0, 0, 0, 0, 0); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { int r; size_t stack_size; @@ -312,7 +316,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, /* Setup stack pointer and program counter of guest */ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); - vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); /* Setup default exception vector of guest */ vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap); diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c index 15945121daf17..cd5301cc9788a 100644 --- a/tools/testing/selftests/kvm/lib/s390x/processor.c +++ b/tools/testing/selftests/kvm/lib/s390x/processor.c @@ -155,15 +155,18 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) virt_dump_region(stream, vm, indent, vm->pgd); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + vcpu->run->psw_addr = (uintptr_t)guest_code; +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); uint64_t stack_vaddr; struct kvm_regs regs; struct kvm_sregs sregs; struct kvm_vcpu *vcpu; - struct kvm_run *run; TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", vm->page_size); @@ -184,9 +187,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */ vcpu_sregs_set(vcpu, &sregs); - run = vcpu->run; - run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ - run->psw_addr = (uintptr_t)guest_code; + vcpu->run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ return vcpu; } diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index d8288374078e4..b9b6cb730a088 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -562,8 +562,16 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm) sync_global_to_guest(vm, host_cpu_is_amd); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + struct kvm_regs regs; + + vcpu_regs_get(vcpu, ®s); + regs.rip = (unsigned long) guest_code; + vcpu_regs_set(vcpu, ®s); +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { struct kvm_mp_state mp_state; struct kvm_regs regs; @@ -597,7 +605,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, vcpu_regs_get(vcpu, ®s); regs.rflags = regs.rflags | 0x2; regs.rsp = stack_vaddr; - regs.rip = (unsigned long) guest_code; vcpu_regs_set(vcpu, ®s); /* Setup the MP state */ -- cgit 1.2.3-korg From 55f2cf88486cdc504176d4c5ebccdb65ea4be161 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 8 Feb 2024 21:48:40 +0100 Subject: KVM: selftests: Add a macro to define a test with one vcpu Most tests are currently not giving any proper output for the user to see how much sub-tests have already been run, or whether new sub-tests are part of a binary or not. So it would be good to support TAP output in the KVM selftests. There is already a nice framework for this in the kselftest_harness.h header which we can use. But since we also need a vcpu in most KVM selftests, it also makes sense to introduce our own wrapper around this which takes care of creating a VM with one vcpu, so we don't have to repeat this boilerplate in each and every test. Thus let's introduce a KVM_ONE_VCPU_TEST() macro here which takes care of this. Suggested-by: Sean Christopherson Link: https://lore.kernel.org/all/Y2v+B3xxYKJSM%2FfH@google.com/ Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-5-thuth@redhat.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/kvm_test_harness.h | 36 ++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/kvm_test_harness.h (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/kvm_test_harness.h b/tools/testing/selftests/kvm/include/kvm_test_harness.h new file mode 100644 index 0000000000000..8f7c6858e8e2d --- /dev/null +++ b/tools/testing/selftests/kvm/include/kvm_test_harness.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Macros for defining a KVM test + * + * Copyright (C) 2022, Google LLC. + */ + +#ifndef SELFTEST_KVM_TEST_HARNESS_H +#define SELFTEST_KVM_TEST_HARNESS_H + +#include "kselftest_harness.h" + +#define KVM_ONE_VCPU_TEST_SUITE(name) \ + FIXTURE(name) { \ + struct kvm_vcpu *vcpu; \ + }; \ + \ + FIXTURE_SETUP(name) { \ + (void)vm_create_with_one_vcpu(&self->vcpu, NULL); \ + } \ + \ + FIXTURE_TEARDOWN(name) { \ + kvm_vm_free(self->vcpu->vm); \ + } + +#define KVM_ONE_VCPU_TEST(suite, test, guestcode) \ +static void __suite##_##test(struct kvm_vcpu *vcpu); \ + \ +TEST_F(suite, test) \ +{ \ + vcpu_arch_set_entry_point(self->vcpu, guestcode); \ + __suite##_##test(self->vcpu); \ +} \ +static void __suite##_##test(struct kvm_vcpu *vcpu) + +#endif /* SELFTEST_KVM_TEST_HARNESS_H */ -- cgit 1.2.3-korg From ba97ed0af6fe22dd718535ce663297ccd0ff52c5 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 8 Feb 2024 21:48:41 +0100 Subject: KVM: selftests: x86: Use TAP interface in the sync_regs test The sync_regs test currently does not have any output (unless one of the TEST_ASSERT statement fails), so it's hard to say for a user whether a certain new sub-test has been included in the binary or not. Let's make this a little bit more user-friendly and include some TAP output via the kselftest_harness.h / kvm_test_harness.h interface. To be able to use the interface, we have to break up the huge main() function here in more fine grained parts - then we can use the new KVM_ONE_VCPU_TEST() macro to define the individual tests. Since these are run with a separate VM now, we have also to make sure to create the expected state at the beginning of each test, so some parts grow a little bit - which should be OK considering that the individual tests are more self-contained now. Suggested-by: David Matlack Suggested-by: Sean Christopherson Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-6-thuth@redhat.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/sync_regs_test.c | 110 ++++++++++++++++----- 1 file changed, 84 insertions(+), 26 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 1cd19dfa0046c..67f78c0a58a51 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -17,6 +17,7 @@ #include #include +#include "kvm_test_harness.h" #include "test_util.h" #include "kvm_util.h" #include "processor.h" @@ -41,6 +42,8 @@ void guest_code(void) : "rax", "rbx"); } +KVM_ONE_VCPU_TEST_SUITE(sync_regs_test); + static void compare_regs(struct kvm_regs *left, struct kvm_regs *right) { #define REG_COMPARE(reg) \ @@ -152,18 +155,15 @@ static noinline void *race_sregs_cr4(void *arg) return NULL; } -static void race_sync_regs(void *racer) +static void race_sync_regs(struct kvm_vcpu *vcpu, void *racer) { const time_t TIMEOUT = 2; /* seconds, roughly */ struct kvm_x86_state *state; struct kvm_translation tr; - struct kvm_vcpu *vcpu; struct kvm_run *run; - struct kvm_vm *vm; pthread_t thread; time_t t; - vm = vm_create_with_one_vcpu(&vcpu, guest_code); run = vcpu->run; run->kvm_valid_regs = KVM_SYNC_X86_SREGS; @@ -205,26 +205,12 @@ static void race_sync_regs(void *racer) TEST_ASSERT_EQ(pthread_join(thread, NULL), 0); kvm_x86_state_cleanup(state); - kvm_vm_free(vm); } -int main(int argc, char *argv[]) +KVM_ONE_VCPU_TEST(sync_regs_test, read_invalid, guest_code) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; - struct kvm_run *run; - struct kvm_regs regs; - struct kvm_sregs sregs; - struct kvm_vcpu_events events; - int rv, cap; - - cap = kvm_check_cap(KVM_CAP_SYNC_REGS); - TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS); - TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD)); - - vm = vm_create_with_one_vcpu(&vcpu, guest_code); - - run = vcpu->run; + struct kvm_run *run = vcpu->run; + int rv; /* Request reading invalid register set from VCPU. */ run->kvm_valid_regs = INVALID_SYNC_FIELD; @@ -240,6 +226,12 @@ int main(int argc, char *argv[]) "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", rv); run->kvm_valid_regs = 0; +} + +KVM_ONE_VCPU_TEST(sync_regs_test, set_invalid, guest_code) +{ + struct kvm_run *run = vcpu->run; + int rv; /* Request setting invalid register set into VCPU. */ run->kvm_dirty_regs = INVALID_SYNC_FIELD; @@ -255,6 +247,14 @@ int main(int argc, char *argv[]) "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", rv); run->kvm_dirty_regs = 0; +} + +KVM_ONE_VCPU_TEST(sync_regs_test, req_and_verify_all_valid, guest_code) +{ + struct kvm_run *run = vcpu->run; + struct kvm_vcpu_events events; + struct kvm_sregs sregs; + struct kvm_regs regs; /* Request and verify all valid register sets. */ /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ @@ -270,6 +270,19 @@ int main(int argc, char *argv[]) vcpu_events_get(vcpu, &events); compare_vcpu_events(&events, &run->s.regs.events); +} + +KVM_ONE_VCPU_TEST(sync_regs_test, set_and_verify_various, guest_code) +{ + struct kvm_run *run = vcpu->run; + struct kvm_vcpu_events events; + struct kvm_sregs sregs; + struct kvm_regs regs; + + /* Run once to get register set */ + run->kvm_valid_regs = TEST_SYNC_FIELDS; + vcpu_run(vcpu); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); /* Set and verify various register values. */ run->s.regs.regs.rbx = 0xBAD1DEA; @@ -295,6 +308,11 @@ int main(int argc, char *argv[]) vcpu_events_get(vcpu, &events); compare_vcpu_events(&events, &run->s.regs.events); +} + +KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_dirty_regs_bits, guest_code) +{ + struct kvm_run *run = vcpu->run; /* Clear kvm_dirty_regs bits, verify new s.regs values are * overwritten with existing guest values. @@ -307,6 +325,17 @@ int main(int argc, char *argv[]) TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF, "rbx sync regs value incorrect 0x%llx.", run->s.regs.regs.rbx); +} + +KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_and_dirty_regs, guest_code) +{ + struct kvm_run *run = vcpu->run; + struct kvm_regs regs; + + /* Run once to get register set */ + run->kvm_valid_regs = TEST_SYNC_FIELDS; + vcpu_run(vcpu); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); /* Clear kvm_valid_regs bits and kvm_dirty_bits. * Verify s.regs values are not overwritten with existing guest values @@ -327,6 +356,17 @@ int main(int argc, char *argv[]) TEST_ASSERT(regs.rbx == 0xBAC0 + 1, "rbx guest value incorrect 0x%llx.", regs.rbx); +} + +KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_regs_bits, guest_code) +{ + struct kvm_run *run = vcpu->run; + struct kvm_regs regs; + + /* Run once to get register set */ + run->kvm_valid_regs = TEST_SYNC_FIELDS; + vcpu_run(vcpu); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); /* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten * with existing guest values but that guest values are overwritten @@ -344,12 +384,30 @@ int main(int argc, char *argv[]) TEST_ASSERT(regs.rbx == 0xBBBB + 1, "rbx guest value incorrect 0x%llx.", regs.rbx); +} + +KVM_ONE_VCPU_TEST(sync_regs_test, race_cr4, guest_code) +{ + race_sync_regs(vcpu, race_sregs_cr4); +} + +KVM_ONE_VCPU_TEST(sync_regs_test, race_exc, guest_code) +{ + race_sync_regs(vcpu, race_events_exc); +} - kvm_vm_free(vm); +KVM_ONE_VCPU_TEST(sync_regs_test, race_inj_pen, guest_code) +{ + race_sync_regs(vcpu, race_events_inj_pen); +} + +int main(int argc, char *argv[]) +{ + int cap; - race_sync_regs(race_sregs_cr4); - race_sync_regs(race_events_exc); - race_sync_regs(race_events_inj_pen); + cap = kvm_check_cap(KVM_CAP_SYNC_REGS); + TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS); + TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD)); - return 0; + return test_harness_run(argc, argv); } -- cgit 1.2.3-korg From a6983e8f5fabb249ad06328bc6452583f25b9c76 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 8 Feb 2024 21:48:42 +0100 Subject: KVM: selftests: x86: Use TAP interface in the fix_hypercall test Use the kvm_test_harness.h interface in this test to get TAP output, so that it is easier for the user to see what the test is doing. Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-7-thuth@redhat.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/fix_hypercall_test.c | 27 ++++++++++++++-------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index 0f728f05ea82f..f3c2239228b10 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -9,6 +9,7 @@ #include #include +#include "kvm_test_harness.h" #include "apic.h" #include "test_util.h" #include "kvm_util.h" @@ -83,6 +84,8 @@ static void guest_main(void) GUEST_DONE(); } +KVM_ONE_VCPU_TEST_SUITE(fix_hypercall); + static void enter_guest(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; @@ -103,14 +106,11 @@ static void enter_guest(struct kvm_vcpu *vcpu) } } -static void test_fix_hypercall(bool disable_quirk) +static void test_fix_hypercall(struct kvm_vcpu *vcpu, bool disable_quirk) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; - - vm = vm_create_with_one_vcpu(&vcpu, guest_main); + struct kvm_vm *vm = vcpu->vm; - vm_init_descriptor_tables(vcpu->vm); + vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); @@ -126,10 +126,19 @@ static void test_fix_hypercall(bool disable_quirk) enter_guest(vcpu); } -int main(void) +KVM_ONE_VCPU_TEST(fix_hypercall, enable_quirk, guest_main) +{ + test_fix_hypercall(vcpu, false); +} + +KVM_ONE_VCPU_TEST(fix_hypercall, disable_quirk, guest_main) +{ + test_fix_hypercall(vcpu, true); +} + +int main(int argc, char *argv[]) { TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN); - test_fix_hypercall(false); - test_fix_hypercall(true); + return test_harness_run(argc, argv); } -- cgit 1.2.3-korg From de1b03f25f3b9e723e7dd8db70639abaa557d527 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 8 Feb 2024 21:48:43 +0100 Subject: KVM: selftests: x86: Use TAP interface in the vmx_pmu_caps test Use the kvm_test_harness.h interface in this test to get TAP output, so that it is easier for the user to see what the test is doing. Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-8-thuth@redhat.com [sean: make host_cap static] Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 52 +++++----------------- 1 file changed, 12 insertions(+), 40 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 2a8d4ac2f0204..876442fadada8 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -15,10 +15,11 @@ #include +#include "kvm_test_harness.h" #include "kvm_util.h" #include "vmx.h" -union perf_capabilities { +static union perf_capabilities { struct { u64 lbr_format:6; u64 pebs_trap:1; @@ -32,7 +33,7 @@ union perf_capabilities { u64 anythread_deprecated:1; }; u64 capabilities; -}; +} host_cap; /* * The LBR format and most PEBS features are immutable, all other features are @@ -73,19 +74,19 @@ static void guest_code(uint64_t current_val) GUEST_DONE(); } +KVM_ONE_VCPU_TEST_SUITE(vmx_pmu_caps); + /* * Verify that guest WRMSRs to PERF_CAPABILITIES #GP regardless of the value * written, that the guest always sees the userspace controlled value, and that * PERF_CAPABILITIES is immutable after KVM_RUN. */ -static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) +KVM_ONE_VCPU_TEST(vmx_pmu_caps, guest_wrmsr_perf_capabilities, guest_code) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code); struct ucall uc; int r, i; - vm_init_descriptor_tables(vm); + vm_init_descriptor_tables(vcpu->vm); vcpu_init_descriptor_tables(vcpu); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); @@ -117,31 +118,21 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) TEST_ASSERT(!r, "Post-KVM_RUN write '0x%llx'didn't fail", host_cap.capabilities ^ BIT_ULL(i)); } - - kvm_vm_free(vm); } /* * Verify KVM allows writing PERF_CAPABILITIES with all KVM-supported features * enabled, as well as '0' (to disable all features). */ -static void test_basic_perf_capabilities(union perf_capabilities host_cap) +KVM_ONE_VCPU_TEST(vmx_pmu_caps, basic_perf_capabilities, guest_code) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); - vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); - - kvm_vm_free(vm); } -static void test_fungible_perf_capabilities(union perf_capabilities host_cap) +KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code) { const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; - - struct kvm_vcpu *vcpu; - struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); int bit; for_each_set_bit(bit, &fungible_caps, 64) { @@ -150,8 +141,6 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap) host_cap.capabilities & ~BIT_ULL(bit)); } vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); - - kvm_vm_free(vm); } /* @@ -160,14 +149,11 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap) * separately as they are multi-bit values, e.g. toggling or setting a single * bit can generate a false positive without dedicated safeguards. */ -static void test_immutable_perf_capabilities(union perf_capabilities host_cap) +KVM_ONE_VCPU_TEST(vmx_pmu_caps, immutable_perf_capabilities, guest_code) { const uint64_t reserved_caps = (~host_cap.capabilities | immutable_caps.capabilities) & ~format_caps.capabilities; - - struct kvm_vcpu *vcpu; - struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); union perf_capabilities val = host_cap; int r, bit; @@ -201,8 +187,6 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) TEST_ASSERT(!r, "Bad PEBS FMT = 0x%x didn't fail, host = 0x%x", val.pebs_format, host_cap.pebs_format); } - - kvm_vm_free(vm); } /* @@ -211,17 +195,13 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) * LBR_TOS as those bits are writable across all uarch implementations (arch * LBRs will need to poke a different MSR). */ -static void test_lbr_perf_capabilities(union perf_capabilities host_cap) +KVM_ONE_VCPU_TEST(vmx_pmu_caps, lbr_perf_capabilities, guest_code) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; int r; if (!host_cap.lbr_format) return; - vm = vm_create_with_one_vcpu(&vcpu, NULL); - vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); vcpu_set_msr(vcpu, MSR_LBR_TOS, 7); @@ -229,14 +209,10 @@ static void test_lbr_perf_capabilities(union perf_capabilities host_cap) r = _vcpu_set_msr(vcpu, MSR_LBR_TOS, 7); TEST_ASSERT(!r, "Writing LBR_TOS should fail after disabling vPMU"); - - kvm_vm_free(vm); } int main(int argc, char *argv[]) { - union perf_capabilities host_cap; - TEST_REQUIRE(get_kvm_param_bool("enable_pmu")); TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM)); @@ -248,9 +224,5 @@ int main(int argc, char *argv[]) TEST_ASSERT(host_cap.full_width_write, "Full-width writes should always be supported"); - test_basic_perf_capabilities(host_cap); - test_fungible_perf_capabilities(host_cap); - test_immutable_perf_capabilities(host_cap); - test_guest_wrmsr_perf_capabilities(host_cap); - test_lbr_perf_capabilities(host_cap); + return test_harness_run(argc, argv); } -- cgit 1.2.3-korg From 8d251856d4258307ef27a5a8f9020aad560a4057 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 8 Feb 2024 21:48:44 +0100 Subject: KVM: selftests: x86: Use TAP interface in the userspace_msr_exit test Use the kselftest_harness.h interface in this test to get TAP output, so that it is easier for the user to see what the test is doing. Signed-off-by: Thomas Huth Link: https://lore.kernel.org/r/20240208204844.119326-9-thuth@redhat.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/userspace_msr_exit_test.c | 52 ++++++---------------- 1 file changed, 13 insertions(+), 39 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c index 3533dc2fbfeeb..9591a5fd54d7c 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c @@ -8,6 +8,7 @@ #define _GNU_SOURCE /* for program_invocation_short_name */ #include +#include "kvm_test_harness.h" #include "test_util.h" #include "kvm_util.h" #include "vmx.h" @@ -527,14 +528,13 @@ static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu) process_ucall_done(vcpu); } -static void test_msr_filter_allow(void) +KVM_ONE_VCPU_TEST_SUITE(user_msr); + +KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; + struct kvm_vm *vm = vcpu->vm; int rc; - vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_allow); - rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER); @@ -585,8 +585,6 @@ static void test_msr_filter_allow(void) } else { printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n"); } - - kvm_vm_free(vm); } static int handle_ucall(struct kvm_vcpu *vcpu) @@ -646,16 +644,12 @@ static void handle_wrmsr(struct kvm_run *run) } } -static void test_msr_filter_deny(void) +KVM_ONE_VCPU_TEST(user_msr, msr_filter_deny, guest_code_filter_deny) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; - struct kvm_run *run; + struct kvm_vm *vm = vcpu->vm; + struct kvm_run *run = vcpu->run; int rc; - vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_deny); - run = vcpu->run; - rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL | @@ -689,18 +683,13 @@ static void test_msr_filter_deny(void) done: TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space"); TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space"); - - kvm_vm_free(vm); } -static void test_msr_permission_bitmap(void) +KVM_ONE_VCPU_TEST(user_msr, msr_permission_bitmap, guest_code_permission_bitmap) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; + struct kvm_vm *vm = vcpu->vm; int rc; - vm = vm_create_with_one_vcpu(&vcpu, guest_code_permission_bitmap); - rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER); @@ -715,8 +704,6 @@ static void test_msr_permission_bitmap(void) vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs); run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE); run_guest_then_process_ucall_done(vcpu); - - kvm_vm_free(vm); } #define test_user_exit_msr_ioctl(vm, cmd, arg, flag, valid_mask) \ @@ -786,31 +773,18 @@ static void run_msr_filter_flag_test(struct kvm_vm *vm) } /* Test that attempts to write to the unused bits in a flag fails. */ -static void test_user_exit_msr_flags(void) +KVM_ONE_VCPU_TEST(user_msr, user_exit_msr_flags, NULL) { - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; - - vm = vm_create_with_one_vcpu(&vcpu, NULL); + struct kvm_vm *vm = vcpu->vm; /* Test flags for KVM_CAP_X86_USER_SPACE_MSR. */ run_user_space_msr_flag_test(vm); /* Test flags and range flags for KVM_X86_SET_MSR_FILTER. */ run_msr_filter_flag_test(vm); - - kvm_vm_free(vm); } int main(int argc, char *argv[]) { - test_msr_filter_allow(); - - test_msr_filter_deny(); - - test_msr_permission_bitmap(); - - test_user_exit_msr_flags(); - - return 0; + return test_harness_run(argc, argv); } -- cgit 1.2.3-korg From 126190379c57b7947dc7af278bee848cb3976e10 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 Feb 2024 16:42:48 -0800 Subject: KVM: selftests: Extend VM creation's @shape to allow control of VM subtype Carve out space in the @shape passed to the various VM creation helpers to allow using the shape to control the subtype of VM, e.g. to identify x86's SEV VMs (which are "regular" VMs as far as KVM is concerned). Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerley Tng Cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Tested-by: Carlos Bilbao Link: https://lore.kernel.org/r/20240223004258.3104051-2-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/kvm_util_base.h | 9 +++++++-- tools/testing/selftests/kvm/lib/kvm_util.c | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index a6e7738a8db73..7ade281682c1f 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -90,6 +90,7 @@ enum kvm_mem_region_type { struct kvm_vm { int mode; unsigned long type; + uint8_t subtype; int kvm_fd; int fd; unsigned int pgtable_levels; @@ -191,10 +192,14 @@ enum vm_guest_mode { }; struct vm_shape { - enum vm_guest_mode mode; - unsigned int type; + uint32_t type; + uint8_t mode; + uint8_t subtype; + uint16_t padding; }; +kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); + #define VM_TYPE_DEFAULT 0 #define VM_SHAPE(__mode) \ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index e066d584c6561..22da6a10b41f2 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -225,6 +225,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape) vm->mode = shape.mode; vm->type = shape.type; + vm->subtype = shape.subtype; vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits; vm->va_bits = vm_guest_mode_params[vm->mode].va_bits; -- cgit 1.2.3-korg From 35f50c91c43e4447803c632adba4ebceaa0b692b Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Thu, 22 Feb 2024 16:42:49 -0800 Subject: KVM: selftests: Make sparsebit structs const where appropriate Make all sparsebit struct pointers "const" where appropriate. This will allow adding a bitmap to track protected/encrypted physical memory that tests can access in a read-only fashion. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerley Tng Cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Tested-by: Carlos Bilbao Signed-off-by: Michael Roth Signed-off-by: Peter Gonda [sean: massage changelog] Link: https://lore.kernel.org/r/20240223004258.3104051-3-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/sparsebit.h | 36 +++++++++---------- tools/testing/selftests/kvm/lib/sparsebit.c | 48 ++++++++++++------------- 2 files changed, 42 insertions(+), 42 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h index 12a9a4b9ceadc..fb5170d57fcbc 100644 --- a/tools/testing/selftests/kvm/include/sparsebit.h +++ b/tools/testing/selftests/kvm/include/sparsebit.h @@ -30,26 +30,26 @@ typedef uint64_t sparsebit_num_t; struct sparsebit *sparsebit_alloc(void); void sparsebit_free(struct sparsebit **sbitp); -void sparsebit_copy(struct sparsebit *dstp, struct sparsebit *src); +void sparsebit_copy(struct sparsebit *dstp, const struct sparsebit *src); -bool sparsebit_is_set(struct sparsebit *sbit, sparsebit_idx_t idx); -bool sparsebit_is_set_num(struct sparsebit *sbit, +bool sparsebit_is_set(const struct sparsebit *sbit, sparsebit_idx_t idx); +bool sparsebit_is_set_num(const struct sparsebit *sbit, sparsebit_idx_t idx, sparsebit_num_t num); -bool sparsebit_is_clear(struct sparsebit *sbit, sparsebit_idx_t idx); -bool sparsebit_is_clear_num(struct sparsebit *sbit, +bool sparsebit_is_clear(const struct sparsebit *sbit, sparsebit_idx_t idx); +bool sparsebit_is_clear_num(const struct sparsebit *sbit, sparsebit_idx_t idx, sparsebit_num_t num); -sparsebit_num_t sparsebit_num_set(struct sparsebit *sbit); -bool sparsebit_any_set(struct sparsebit *sbit); -bool sparsebit_any_clear(struct sparsebit *sbit); -bool sparsebit_all_set(struct sparsebit *sbit); -bool sparsebit_all_clear(struct sparsebit *sbit); -sparsebit_idx_t sparsebit_first_set(struct sparsebit *sbit); -sparsebit_idx_t sparsebit_first_clear(struct sparsebit *sbit); -sparsebit_idx_t sparsebit_next_set(struct sparsebit *sbit, sparsebit_idx_t prev); -sparsebit_idx_t sparsebit_next_clear(struct sparsebit *sbit, sparsebit_idx_t prev); -sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *sbit, +sparsebit_num_t sparsebit_num_set(const struct sparsebit *sbit); +bool sparsebit_any_set(const struct sparsebit *sbit); +bool sparsebit_any_clear(const struct sparsebit *sbit); +bool sparsebit_all_set(const struct sparsebit *sbit); +bool sparsebit_all_clear(const struct sparsebit *sbit); +sparsebit_idx_t sparsebit_first_set(const struct sparsebit *sbit); +sparsebit_idx_t sparsebit_first_clear(const struct sparsebit *sbit); +sparsebit_idx_t sparsebit_next_set(const struct sparsebit *sbit, sparsebit_idx_t prev); +sparsebit_idx_t sparsebit_next_clear(const struct sparsebit *sbit, sparsebit_idx_t prev); +sparsebit_idx_t sparsebit_next_set_num(const struct sparsebit *sbit, sparsebit_idx_t start, sparsebit_num_t num); -sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *sbit, +sparsebit_idx_t sparsebit_next_clear_num(const struct sparsebit *sbit, sparsebit_idx_t start, sparsebit_num_t num); void sparsebit_set(struct sparsebit *sbitp, sparsebit_idx_t idx); @@ -62,9 +62,9 @@ void sparsebit_clear_num(struct sparsebit *sbitp, sparsebit_idx_t start, sparsebit_num_t num); void sparsebit_clear_all(struct sparsebit *sbitp); -void sparsebit_dump(FILE *stream, struct sparsebit *sbit, +void sparsebit_dump(FILE *stream, const struct sparsebit *sbit, unsigned int indent); -void sparsebit_validate_internal(struct sparsebit *sbit); +void sparsebit_validate_internal(const struct sparsebit *sbit); #ifdef __cplusplus } diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index 88cb6b84e6f31..cfed9d26cc71b 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -202,7 +202,7 @@ static sparsebit_num_t node_num_set(struct node *nodep) /* Returns a pointer to the node that describes the * lowest bit index. */ -static struct node *node_first(struct sparsebit *s) +static struct node *node_first(const struct sparsebit *s) { struct node *nodep; @@ -216,7 +216,7 @@ static struct node *node_first(struct sparsebit *s) * lowest bit index > the index of the node pointed to by np. * Returns NULL if no node with a higher index exists. */ -static struct node *node_next(struct sparsebit *s, struct node *np) +static struct node *node_next(const struct sparsebit *s, struct node *np) { struct node *nodep = np; @@ -244,7 +244,7 @@ static struct node *node_next(struct sparsebit *s, struct node *np) * highest index < the index of the node pointed to by np. * Returns NULL if no node with a lower index exists. */ -static struct node *node_prev(struct sparsebit *s, struct node *np) +static struct node *node_prev(const struct sparsebit *s, struct node *np) { struct node *nodep = np; @@ -273,7 +273,7 @@ static struct node *node_prev(struct sparsebit *s, struct node *np) * subtree and duplicates the bit settings to the newly allocated nodes. * Returns the newly allocated copy of subtree. */ -static struct node *node_copy_subtree(struct node *subtree) +static struct node *node_copy_subtree(const struct node *subtree) { struct node *root; @@ -307,7 +307,7 @@ static struct node *node_copy_subtree(struct node *subtree) * index is within the bits described by the mask bits or the number of * contiguous bits set after the mask. Returns NULL if there is no such node. */ -static struct node *node_find(struct sparsebit *s, sparsebit_idx_t idx) +static struct node *node_find(const struct sparsebit *s, sparsebit_idx_t idx) { struct node *nodep; @@ -393,7 +393,7 @@ static struct node *node_add(struct sparsebit *s, sparsebit_idx_t idx) } /* Returns whether all the bits in the sparsebit array are set. */ -bool sparsebit_all_set(struct sparsebit *s) +bool sparsebit_all_set(const struct sparsebit *s) { /* * If any nodes there must be at least one bit set. Only case @@ -775,7 +775,7 @@ static void node_reduce(struct sparsebit *s, struct node *nodep) /* Returns whether the bit at the index given by idx, within the * sparsebit array is set or not. */ -bool sparsebit_is_set(struct sparsebit *s, sparsebit_idx_t idx) +bool sparsebit_is_set(const struct sparsebit *s, sparsebit_idx_t idx) { struct node *nodep; @@ -921,7 +921,7 @@ static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start) * used by test cases after they detect an unexpected condition, as a means * to capture diagnostic information. */ -static void sparsebit_dump_internal(FILE *stream, struct sparsebit *s, +static void sparsebit_dump_internal(FILE *stream, const struct sparsebit *s, unsigned int indent) { /* Dump the contents of s */ @@ -969,7 +969,7 @@ void sparsebit_free(struct sparsebit **sbitp) * sparsebit_alloc(). It can though already have bits set, which * if different from src will be cleared. */ -void sparsebit_copy(struct sparsebit *d, struct sparsebit *s) +void sparsebit_copy(struct sparsebit *d, const struct sparsebit *s) { /* First clear any bits already set in the destination */ sparsebit_clear_all(d); @@ -981,7 +981,7 @@ void sparsebit_copy(struct sparsebit *d, struct sparsebit *s) } /* Returns whether num consecutive bits starting at idx are all set. */ -bool sparsebit_is_set_num(struct sparsebit *s, +bool sparsebit_is_set_num(const struct sparsebit *s, sparsebit_idx_t idx, sparsebit_num_t num) { sparsebit_idx_t next_cleared; @@ -1005,14 +1005,14 @@ bool sparsebit_is_set_num(struct sparsebit *s, } /* Returns whether the bit at the index given by idx. */ -bool sparsebit_is_clear(struct sparsebit *s, +bool sparsebit_is_clear(const struct sparsebit *s, sparsebit_idx_t idx) { return !sparsebit_is_set(s, idx); } /* Returns whether num consecutive bits starting at idx are all cleared. */ -bool sparsebit_is_clear_num(struct sparsebit *s, +bool sparsebit_is_clear_num(const struct sparsebit *s, sparsebit_idx_t idx, sparsebit_num_t num) { sparsebit_idx_t next_set; @@ -1041,13 +1041,13 @@ bool sparsebit_is_clear_num(struct sparsebit *s, * value. Use sparsebit_any_set(), instead of sparsebit_num_set() > 0, * to determine if the sparsebit array has any bits set. */ -sparsebit_num_t sparsebit_num_set(struct sparsebit *s) +sparsebit_num_t sparsebit_num_set(const struct sparsebit *s) { return s->num_set; } /* Returns whether any bit is set in the sparsebit array. */ -bool sparsebit_any_set(struct sparsebit *s) +bool sparsebit_any_set(const struct sparsebit *s) { /* * Nodes only describe set bits. If any nodes then there @@ -1070,20 +1070,20 @@ bool sparsebit_any_set(struct sparsebit *s) } /* Returns whether all the bits in the sparsebit array are cleared. */ -bool sparsebit_all_clear(struct sparsebit *s) +bool sparsebit_all_clear(const struct sparsebit *s) { return !sparsebit_any_set(s); } /* Returns whether all the bits in the sparsebit array are set. */ -bool sparsebit_any_clear(struct sparsebit *s) +bool sparsebit_any_clear(const struct sparsebit *s) { return !sparsebit_all_set(s); } /* Returns the index of the first set bit. Abort if no bits are set. */ -sparsebit_idx_t sparsebit_first_set(struct sparsebit *s) +sparsebit_idx_t sparsebit_first_set(const struct sparsebit *s) { struct node *nodep; @@ -1097,7 +1097,7 @@ sparsebit_idx_t sparsebit_first_set(struct sparsebit *s) /* Returns the index of the first cleared bit. Abort if * no bits are cleared. */ -sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s) +sparsebit_idx_t sparsebit_first_clear(const struct sparsebit *s) { struct node *nodep1, *nodep2; @@ -1151,7 +1151,7 @@ sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s) /* Returns index of next bit set within s after the index given by prev. * Returns 0 if there are no bits after prev that are set. */ -sparsebit_idx_t sparsebit_next_set(struct sparsebit *s, +sparsebit_idx_t sparsebit_next_set(const struct sparsebit *s, sparsebit_idx_t prev) { sparsebit_idx_t lowest_possible = prev + 1; @@ -1244,7 +1244,7 @@ sparsebit_idx_t sparsebit_next_set(struct sparsebit *s, /* Returns index of next bit cleared within s after the index given by prev. * Returns 0 if there are no bits after prev that are cleared. */ -sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s, +sparsebit_idx_t sparsebit_next_clear(const struct sparsebit *s, sparsebit_idx_t prev) { sparsebit_idx_t lowest_possible = prev + 1; @@ -1300,7 +1300,7 @@ sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s, * and returns the index of the first sequence of num consecutively set * bits. Returns a value of 0 of no such sequence exists. */ -sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s, +sparsebit_idx_t sparsebit_next_set_num(const struct sparsebit *s, sparsebit_idx_t start, sparsebit_num_t num) { sparsebit_idx_t idx; @@ -1335,7 +1335,7 @@ sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s, * and returns the index of the first sequence of num consecutively cleared * bits. Returns a value of 0 of no such sequence exists. */ -sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *s, +sparsebit_idx_t sparsebit_next_clear_num(const struct sparsebit *s, sparsebit_idx_t start, sparsebit_num_t num) { sparsebit_idx_t idx; @@ -1583,7 +1583,7 @@ static size_t display_range(FILE *stream, sparsebit_idx_t low, * contiguous bits. This is done because '-' is used to specify command-line * options, and sometimes ranges are specified as command-line arguments. */ -void sparsebit_dump(FILE *stream, struct sparsebit *s, +void sparsebit_dump(FILE *stream, const struct sparsebit *s, unsigned int indent) { size_t current_line_len = 0; @@ -1681,7 +1681,7 @@ void sparsebit_dump(FILE *stream, struct sparsebit *s, * s. On error, diagnostic information is printed to stderr and * abort is called. */ -void sparsebit_validate_internal(struct sparsebit *s) +void sparsebit_validate_internal(const struct sparsebit *s) { bool error_detected = false; struct node *nodep, *prev = NULL; -- cgit 1.2.3-korg From 57e19f05775847d9d8565dad2cee6bbec03cdb06 Mon Sep 17 00:00:00 2001 From: Ackerley Tng Date: Thu, 22 Feb 2024 16:42:50 -0800 Subject: KVM: selftests: Add a macro to iterate over a sparsebit range Add sparsebit_for_each_set_range() to allow iterator over a range of set bits in a range. This will be used by x86 SEV guests to process protected physical pages (each such page needs to be encrypted _after_ being "added" to the VM). Tested-by: Carlos Bilbao Signed-off-by: Ackerley Tng [sean: split to separate patch] Link: https://lore.kernel.org/r/20240223004258.3104051-4-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/sparsebit.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h index fb5170d57fcbc..bc760761e1a32 100644 --- a/tools/testing/selftests/kvm/include/sparsebit.h +++ b/tools/testing/selftests/kvm/include/sparsebit.h @@ -66,6 +66,26 @@ void sparsebit_dump(FILE *stream, const struct sparsebit *sbit, unsigned int indent); void sparsebit_validate_internal(const struct sparsebit *sbit); +/* + * Iterate over an inclusive ranges within sparsebit @s. In each iteration, + * @range_begin and @range_end will take the beginning and end of the set + * range, which are of type sparsebit_idx_t. + * + * For example, if the range [3, 7] (inclusive) is set, within the + * iteration,@range_begin will take the value 3 and @range_end will take + * the value 7. + * + * Ensure that there is at least one bit set before using this macro with + * sparsebit_any_set(), because sparsebit_first_set() will abort if none + * are set. + */ +#define sparsebit_for_each_set_range(s, range_begin, range_end) \ + for (range_begin = sparsebit_first_set(s), \ + range_end = sparsebit_next_clear(s, range_begin) - 1; \ + range_begin && range_end; \ + range_begin = sparsebit_next_set(s, range_end), \ + range_end = sparsebit_next_clear(s, range_begin) - 1) + #ifdef __cplusplus } #endif -- cgit 1.2.3-korg From cd8eb2913205e5a13ec807061c8f72d6fee624c7 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Thu, 22 Feb 2024 16:42:51 -0800 Subject: KVM: selftests: Add support for allocating/managing protected guest memory Add support for differentiating between protected (a.k.a. private, a.k.a. encrypted) memory and normal (a.k.a. shared) memory for VMs that support protected guest memory, e.g. x86's SEV. Provide and manage a common bitmap for tracking whether a given physical page resides in protected memory, as support for protected memory isn't x86 specific, i.e. adding a arch hook would be a net negative now, and in the future. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerley Tng cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Reviewed-by: Itaru Kitayama Tested-by: Carlos Bilbao Originally-by: Michael Roth Signed-off-by: Peter Gonda Co-developed-by: Sean Christopherson Link: https://lore.kernel.org/r/20240223004258.3104051-5-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/include/kvm_util_base.h | 25 ++++++++++++++++++++-- tools/testing/selftests/kvm/lib/kvm_util.c | 22 +++++++++++++++---- 2 files changed, 41 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 7ade281682c1f..746cc13b5d6d5 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -46,6 +46,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ struct userspace_mem_region { struct kvm_userspace_memory_region2 region; struct sparsebit *unused_phy_pages; + struct sparsebit *protected_phy_pages; int fd; off_t offset; enum vm_mem_backing_src_type backing_src_type; @@ -569,6 +570,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, uint64_t guest_paddr, uint32_t slot, uint64_t npages, uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset); +#ifndef vm_arch_has_protected_memory +static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) +{ + return false; +} +#endif + void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); @@ -832,10 +840,23 @@ const char *exit_reason_str(unsigned int exit_reason); vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot); +vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot, + bool protected); vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); +static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot) +{ + /* + * By default, allocate memory as protected for VMs that support + * protected memory, as the majority of memory for such VMs is + * protected, i.e. using shared memory is effectively opt-in. + */ + return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, + vm_arch_has_protected_memory(vm)); +} + /* * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also * loads the test binary into guest memory and creates an IRQ chip (x86 only). diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 22da6a10b41f2..2845349a98ca5 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -666,6 +666,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm, vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); sparsebit_free(®ion->unused_phy_pages); + sparsebit_free(®ion->protected_phy_pages); ret = munmap(region->mmap_start, region->mmap_size); TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); if (region->fd >= 0) { @@ -1047,6 +1048,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, } region->unused_phy_pages = sparsebit_alloc(); + if (vm_arch_has_protected_memory(vm)) + region->protected_phy_pages = sparsebit_alloc(); sparsebit_set_num(region->unused_phy_pages, guest_paddr >> vm->page_shift, npages); region->region.slot = slot; @@ -1873,6 +1876,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) region->host_mem); fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); sparsebit_dump(stream, region->unused_phy_pages, 0); + if (region->protected_phy_pages) { + fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, ""); + sparsebit_dump(stream, region->protected_phy_pages, 0); + } } fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); sparsebit_dump(stream, vm->vpages_mapped, indent + 2); @@ -1974,6 +1981,7 @@ const char *exit_reason_str(unsigned int exit_reason) * num - number of pages * paddr_min - Physical address minimum * memslot - Memory region to allocate page from + * protected - True if the pages will be used as protected/private memory * * Output Args: None * @@ -1985,8 +1993,9 @@ const char *exit_reason_str(unsigned int exit_reason) * and their base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot) +vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot, + bool protected) { struct userspace_mem_region *region; sparsebit_idx_t pg, base; @@ -1999,8 +2008,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, paddr_min, vm->page_size); region = memslot2region(vm, memslot); - base = pg = paddr_min >> vm->page_shift; + TEST_ASSERT(!protected || region->protected_phy_pages, + "Region doesn't support protected memory"); + base = pg = paddr_min >> vm->page_shift; do { for (; pg < base + num; ++pg) { if (!sparsebit_is_set(region->unused_phy_pages, pg)) { @@ -2019,8 +2030,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, abort(); } - for (pg = base; pg < base + num; ++pg) + for (pg = base; pg < base + num; ++pg) { sparsebit_clear(region->unused_phy_pages, pg); + if (protected) + sparsebit_set(region->protected_phy_pages, pg); + } return base * vm->page_size; } -- cgit 1.2.3-korg From d210eebb51a23ce45b16c493a51c17b664e81de7 Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Thu, 22 Feb 2024 16:42:52 -0800 Subject: KVM: selftests: Add support for protected vm_vaddr_* allocations Test programs may wish to allocate shared vaddrs for things like sharing memory with the guest. Since protected vms will have their memory encrypted by default an interface is needed to explicitly request shared pages. Implement this by splitting the common code out from vm_vaddr_alloc() and introducing a new vm_vaddr_alloc_shared(). Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerly Tng cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Reviewed-by: Itaru Kitayama Tested-by: Carlos Bilbao Signed-off-by: Michael Roth Signed-off-by: Peter Gonda Link: https://lore.kernel.org/r/20240223004258.3104051-6-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/include/kvm_util_base.h | 3 +++ tools/testing/selftests/kvm/lib/kvm_util.c | 26 +++++++++++++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 746cc13b5d6d5..ba90eb7edc5ec 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -586,6 +586,9 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_mi vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, enum kvm_mem_region_type type); +vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, + vm_vaddr_t vaddr_min, + enum kvm_mem_region_type type); vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 2845349a98ca5..a4ef5185bb036 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1380,15 +1380,17 @@ va_found: return pgidx_start * vm->page_size; } -vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type) +static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, + vm_vaddr_t vaddr_min, + enum kvm_mem_region_type type, + bool protected) { uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); virt_pgd_alloc(vm); - vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, - KVM_UTIL_MIN_PFN * vm->page_size, - vm->memslots[type]); + vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, + KVM_UTIL_MIN_PFN * vm->page_size, + vm->memslots[type], protected); /* * Find an unused range of virtual page addresses of at least @@ -1408,6 +1410,20 @@ vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, return vaddr_start; } +vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, + enum kvm_mem_region_type type) +{ + return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, + vm_arch_has_protected_memory(vm)); +} + +vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, + vm_vaddr_t vaddr_min, + enum kvm_mem_region_type type) +{ + return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); +} + /* * VM Virtual Address Allocate * -- cgit 1.2.3-korg From 31e00dae72fda939a084cda86b068ac9c302a2d3 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Thu, 22 Feb 2024 16:42:53 -0800 Subject: KVM: selftests: Explicitly ucall pool from shared memory Allocate the common ucall pool using vm_vaddr_alloc_shared() so that the ucall structures will be placed in shared (unencrypted) memory for VMs with support for protected (encrypted) memory, e.g. x86's SEV. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerly Tng cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Tested-by: Carlos Bilbao Signed-off-by: Peter Gonda [sean: massage changelog] Link: https://lore.kernel.org/r/20240223004258.3104051-7-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/lib/ucall_common.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c index 816a3fa109bfb..f5af65a41c296 100644 --- a/tools/testing/selftests/kvm/lib/ucall_common.c +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -29,7 +29,8 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) vm_vaddr_t vaddr; int i; - vaddr = __vm_vaddr_alloc(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, MEM_REGION_DATA); + vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, + MEM_REGION_DATA); hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr); memset(hdr, 0, sizeof(*hdr)); -- cgit 1.2.3-korg From be1bd4c5394ff7eb6f14aaf8005824ed1946bb82 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Thu, 22 Feb 2024 16:42:54 -0800 Subject: KVM: selftests: Allow tagging protected memory in guest page tables Add support for tagging and untagging guest physical address, e.g. to allow x86's SEV and TDX guests to embed shared vs. private information in the GPA. SEV (encryption, a.k.a. C-bit) and TDX (shared, a.k.a. S-bit) steal bits from the guest's physical address space that is consumed by the CPU metadata, i.e. effectively aliases the "real" GPA. Implement generic "tagging" so that the shared vs. private metadata can be managed by x86 without bleeding too many details into common code. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerly Tng cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Tested-by: Carlos Bilbao Originally-by: Michael Roth Signed-off-by: Peter Gonda Link: https://lore.kernel.org/r/20240223004258.3104051-8-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/aarch64/kvm_util_arch.h | 7 +++++++ tools/testing/selftests/kvm/include/kvm_util_base.h | 13 +++++++++++++ .../selftests/kvm/include/riscv/kvm_util_arch.h | 7 +++++++ .../selftests/kvm/include/s390x/kvm_util_arch.h | 7 +++++++ .../selftests/kvm/include/x86_64/kvm_util_arch.h | 21 +++++++++++++++++++++ tools/testing/selftests/kvm/lib/kvm_util.c | 17 +++++++++++++++++ tools/testing/selftests/kvm/lib/x86_64/processor.c | 15 ++++++++++++++- 7 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/include/aarch64/kvm_util_arch.h create mode 100644 tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h create mode 100644 tools/testing/selftests/kvm/include/s390x/kvm_util_arch.h create mode 100644 tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/aarch64/kvm_util_arch.h b/tools/testing/selftests/kvm/include/aarch64/kvm_util_arch.h new file mode 100644 index 0000000000000..e43a57d99b56c --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/kvm_util_arch.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UTIL_ARCH_H +#define SELFTEST_KVM_UTIL_ARCH_H + +struct kvm_vm_arch {}; + +#endif // SELFTEST_KVM_UTIL_ARCH_H diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index ba90eb7edc5ec..4a40b332115d3 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -18,9 +18,11 @@ #include #include +#include #include +#include "kvm_util_arch.h" #include "sparsebit.h" /* @@ -113,6 +115,9 @@ struct kvm_vm { vm_vaddr_t idt; vm_vaddr_t handlers; uint32_t dirty_ring_size; + uint64_t gpa_tag_mask; + + struct kvm_vm_arch arch; /* Cache of information for binary stats interface */ int stats_fd; @@ -601,6 +606,12 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); + +static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) +{ + return gpa & ~vm->gpa_tag_mask; +} + void vcpu_run(struct kvm_vcpu *vcpu); int _vcpu_run(struct kvm_vcpu *vcpu); @@ -1113,4 +1124,6 @@ void kvm_selftest_arch_init(void); void kvm_arch_vm_post_create(struct kvm_vm *vm); +bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); + #endif /* SELFTEST_KVM_UTIL_BASE_H */ diff --git a/tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h b/tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h new file mode 100644 index 0000000000000..e43a57d99b56c --- /dev/null +++ b/tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UTIL_ARCH_H +#define SELFTEST_KVM_UTIL_ARCH_H + +struct kvm_vm_arch {}; + +#endif // SELFTEST_KVM_UTIL_ARCH_H diff --git a/tools/testing/selftests/kvm/include/s390x/kvm_util_arch.h b/tools/testing/selftests/kvm/include/s390x/kvm_util_arch.h new file mode 100644 index 0000000000000..e43a57d99b56c --- /dev/null +++ b/tools/testing/selftests/kvm/include/s390x/kvm_util_arch.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UTIL_ARCH_H +#define SELFTEST_KVM_UTIL_ARCH_H + +struct kvm_vm_arch {}; + +#endif // SELFTEST_KVM_UTIL_ARCH_H diff --git a/tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h new file mode 100644 index 0000000000000..cfdf8c5e26717 --- /dev/null +++ b/tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UTIL_ARCH_H +#define SELFTEST_KVM_UTIL_ARCH_H + +#include +#include + +struct kvm_vm_arch { + uint64_t c_bit; + uint64_t s_bit; +}; + +static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch) +{ + return arch->c_bit || arch->s_bit; +} + +#define vm_arch_has_protected_memory(vm) \ + __vm_arch_has_protected_memory(&(vm)->arch) + +#endif // SELFTEST_KVM_UTIL_ARCH_H diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index a4ef5185bb036..5d562f9698e7c 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1546,6 +1546,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) { struct userspace_mem_region *region; + gpa = vm_untag_gpa(vm, gpa); + region = userspace_mem_region_find(vm, gpa, gpa); if (!region) { TEST_FAIL("No vm physical memory at 0x%lx", gpa); @@ -2254,3 +2256,18 @@ void __attribute((constructor)) kvm_selftest_init(void) kvm_selftest_arch_init(); } + +bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) +{ + sparsebit_idx_t pg = 0; + struct userspace_mem_region *region; + + if (!vm_arch_has_protected_memory(vm)) + return false; + + region = userspace_mem_region_find(vm, paddr, paddr); + TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); + + pg = paddr >> vm->page_shift; + return sparsebit_is_set(region->protected_phy_pages, pg); +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index b9b6cb730a088..534f36a8a5e80 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -157,6 +157,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, { uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level); + paddr = vm_untag_gpa(vm, paddr); + if (!(*pte & PTE_PRESENT_MASK)) { *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK; if (current_level == target_level) @@ -200,6 +202,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) "Physical address beyond maximum supported,\n" " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", paddr, vm->max_gfn, vm->page_size); + TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, + "Unexpected bits in paddr: %lx", paddr); /* * Allocate upper level page tables, if not already present. Return @@ -222,6 +226,15 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) TEST_ASSERT(!(*pte & PTE_PRESENT_MASK), "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr); *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); + + /* + * Neither SEV nor TDX supports shared page tables, so only the final + * leaf PTE needs manually set the C/S-bit. + */ + if (vm_is_gpa_protected(vm, paddr)) + *pte |= vm->arch.c_bit; + else + *pte |= vm->arch.s_bit; } void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) @@ -496,7 +509,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) * No need for a hugepage mask on the PTE, x86-64 requires the "unused" * address bits to be zero. */ - return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level)); + return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); } static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt) -- cgit 1.2.3-korg From ae20eef5323cce441e8d6c64c8d10493bf3ee232 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Thu, 22 Feb 2024 16:42:55 -0800 Subject: KVM: selftests: Add library for creating and interacting with SEV guests Add a library/APIs for creating and interfacing with SEV guests, all of which need some amount of common functionality, e.g. an open file handle for the SEV driver (/dev/sev), ioctl() wrappers to pass said file handle to KVM, tracking of the C-bit, etc. Add an x86-specific hook to initialize address properties, a.k.a. the location of the C-bit. An arch specific hook is rather gross, but x86 already has a dedicated #ifdef-protected kvm_get_cpu_address_width() hook, i.e. the ugliest code already exists. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerly Tng cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Tested-by: Carlos Bilbao Originally-by: Michael Roth Signed-off-by: Peter Gonda Co-developed-by: Sean Christopherson Link: https://lore.kernel.org/r/20240223004258.3104051-9-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/x86_64/kvm_util_arch.h | 2 + .../selftests/kvm/include/x86_64/processor.h | 8 ++ tools/testing/selftests/kvm/include/x86_64/sev.h | 105 ++++++++++++++++++++ tools/testing/selftests/kvm/lib/kvm_util.c | 1 + tools/testing/selftests/kvm/lib/x86_64/processor.c | 17 ++++ tools/testing/selftests/kvm/lib/x86_64/sev.c | 110 +++++++++++++++++++++ 7 files changed, 244 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/x86_64/sev.h create mode 100644 tools/testing/selftests/kvm/lib/x86_64/sev.c (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 492e937fab006..4ec6f3bcb7de0 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -37,6 +37,7 @@ LIBKVM_x86_64 += lib/x86_64/handlers.S LIBKVM_x86_64 += lib/x86_64/hyperv.c LIBKVM_x86_64 += lib/x86_64/memstress.c LIBKVM_x86_64 += lib/x86_64/processor.c +LIBKVM_x86_64 += lib/x86_64/sev.c LIBKVM_x86_64 += lib/x86_64/svm.c LIBKVM_x86_64 += lib/x86_64/ucall.c LIBKVM_x86_64 += lib/x86_64/vmx.c diff --git a/tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h index cfdf8c5e26717..9f1725192aa22 100644 --- a/tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h +++ b/tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h @@ -8,6 +8,8 @@ struct kvm_vm_arch { uint64_t c_bit; uint64_t s_bit; + int sev_fd; + bool is_pt_protected; }; static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index a84863503fcb4..20c9e3b33b07c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -23,6 +23,12 @@ extern bool host_cpu_is_intel; extern bool host_cpu_is_amd; +enum vm_guest_x86_subtype { + VM_SUBTYPE_NONE = 0, + VM_SUBTYPE_SEV, + VM_SUBTYPE_SEV_ES, +}; + #define NMI_VECTOR 0x02 #define X86_EFLAGS_FIXED (1u << 1) @@ -273,6 +279,7 @@ struct kvm_x86_cpu_property { #define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31) #define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7) #define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15) +#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5) #define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11) #define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31) @@ -1059,6 +1066,7 @@ do { \ } while (0) void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); +void kvm_init_vm_address_properties(struct kvm_vm *vm); bool vm_is_unrestricted_guest(struct kvm_vm *vm); struct ex_regs { diff --git a/tools/testing/selftests/kvm/include/x86_64/sev.h b/tools/testing/selftests/kvm/include/x86_64/sev.h new file mode 100644 index 0000000000000..de5283bef7527 --- /dev/null +++ b/tools/testing/selftests/kvm/include/x86_64/sev.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Helpers used for SEV guests + * + */ +#ifndef SELFTEST_KVM_SEV_H +#define SELFTEST_KVM_SEV_H + +#include +#include + +#include "linux/psp-sev.h" + +#include "kvm_util.h" +#include "svm_util.h" +#include "processor.h" + +enum sev_guest_state { + SEV_GUEST_STATE_UNINITIALIZED = 0, + SEV_GUEST_STATE_LAUNCH_UPDATE, + SEV_GUEST_STATE_LAUNCH_SECRET, + SEV_GUEST_STATE_RUNNING, +}; + +#define SEV_POLICY_NO_DBG (1UL << 0) +#define SEV_POLICY_ES (1UL << 2) + +void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); +void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); +void sev_vm_launch_finish(struct kvm_vm *vm); + +struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t policy, void *guest_code, + struct kvm_vcpu **cpu); + +kvm_static_assert(SEV_RET_SUCCESS == 0); + +/* + * The KVM_MEMORY_ENCRYPT_OP uAPI is utter garbage and takes an "unsigned long" + * instead of a proper struct. The size of the parameter is embedded in the + * ioctl number, i.e. is ABI and thus immutable. Hack around the mess by + * creating an overlay to pass in an "unsigned long" without a cast (casting + * will make the compiler unhappy due to dereferencing an aliased pointer). + */ +#define __vm_sev_ioctl(vm, cmd, arg) \ +({ \ + int r; \ + \ + union { \ + struct kvm_sev_cmd c; \ + unsigned long raw; \ + } sev_cmd = { .c = { \ + .id = (cmd), \ + .data = (uint64_t)(arg), \ + .sev_fd = (vm)->arch.sev_fd, \ + } }; \ + \ + r = __vm_ioctl(vm, KVM_MEMORY_ENCRYPT_OP, &sev_cmd.raw); \ + r ?: sev_cmd.c.error; \ +}) + +#define vm_sev_ioctl(vm, cmd, arg) \ +({ \ + int ret = __vm_sev_ioctl(vm, cmd, arg); \ + \ + __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \ +}) + +static inline void sev_vm_init(struct kvm_vm *vm) +{ + vm->arch.sev_fd = open_sev_dev_path_or_exit(); + + vm_sev_ioctl(vm, KVM_SEV_INIT, NULL); +} + + +static inline void sev_es_vm_init(struct kvm_vm *vm) +{ + vm->arch.sev_fd = open_sev_dev_path_or_exit(); + + vm_sev_ioctl(vm, KVM_SEV_ES_INIT, NULL); +} + +static inline void sev_register_encrypted_memory(struct kvm_vm *vm, + struct userspace_mem_region *region) +{ + struct kvm_enc_region range = { + .addr = region->region.userspace_addr, + .size = region->region.memory_size, + }; + + vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range); +} + +static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, + uint64_t size) +{ + struct kvm_sev_launch_update_data update_data = { + .uaddr = (unsigned long)addr_gpa2hva(vm, gpa), + .len = size, + }; + + vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); +} + +#endif /* SELFTEST_KVM_SEV_H */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 5d562f9698e7c..adc51b0712cae 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -266,6 +266,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape) case VM_MODE_PXXV48_4K: #ifdef __x86_64__ kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); + kvm_init_vm_address_properties(vm); /* * Ignore KVM support for 5-level paging (vm->va_bits == 57), * it doesn't take effect unless a CR4.LA57 is set, which it diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 534f36a8a5e80..f1139ba351128 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -9,6 +9,7 @@ #include "test_util.h" #include "kvm_util.h" #include "processor.h" +#include "sev.h" #ifndef NUM_INTERRUPTS #define NUM_INTERRUPTS 256 @@ -278,6 +279,9 @@ uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, { uint64_t *pml4e, *pdpe, *pde; + TEST_ASSERT(!vm->arch.is_pt_protected, + "Walking page tables of protected guests is impossible"); + TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM, "Invalid PG_LEVEL_* '%d'", *level); @@ -573,6 +577,11 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm) vm_create_irqchip(vm); sync_global_to_guest(vm, host_cpu_is_intel); sync_global_to_guest(vm, host_cpu_is_amd); + + if (vm->subtype == VM_SUBTYPE_SEV) + sev_vm_init(vm); + else if (vm->subtype == VM_SUBTYPE_SEV_ES) + sev_es_vm_init(vm); } void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) @@ -1061,6 +1070,14 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) } } +void kvm_init_vm_address_properties(struct kvm_vm *vm) +{ + if (vm->subtype == VM_SUBTYPE_SEV) { + vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); + vm->gpa_tag_mask = vm->arch.c_bit; + } +} + static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, int dpl, unsigned short selector) { diff --git a/tools/testing/selftests/kvm/lib/x86_64/sev.c b/tools/testing/selftests/kvm/lib/x86_64/sev.c new file mode 100644 index 0000000000000..9f5a3dbb5e65b --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/sev.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include +#include + +#include "sev.h" + +/* + * sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the + * -1 would then cause an underflow back to 2**64 - 1. This is expected and + * correct. + * + * If the last range in the sparsebit is [x, y] and we try to iterate, + * sparsebit_next_set() will return 0, and sparsebit_next_clear() will try + * and find the first range, but that's correct because the condition + * expression would cause us to quit the loop. + */ +static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region) +{ + const struct sparsebit *protected_phy_pages = region->protected_phy_pages; + const vm_paddr_t gpa_base = region->region.guest_phys_addr; + const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; + sparsebit_idx_t i, j; + + if (!sparsebit_any_set(protected_phy_pages)) + return; + + sev_register_encrypted_memory(vm, region); + + sparsebit_for_each_set_range(protected_phy_pages, i, j) { + const uint64_t size = (j - i + 1) * vm->page_size; + const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; + + sev_launch_update_data(vm, gpa_base + offset, size); + } +} + +void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) +{ + struct kvm_sev_launch_start launch_start = { + .policy = policy, + }; + struct userspace_mem_region *region; + struct kvm_sev_guest_status status; + int ctr; + + vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start); + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); + + TEST_ASSERT_EQ(status.policy, policy); + TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE); + + hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) + encrypt_region(vm, region); + + vm->arch.is_pt_protected = true; +} + +void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement) +{ + struct kvm_sev_launch_measure launch_measure; + struct kvm_sev_guest_status guest_status; + + launch_measure.len = 256; + launch_measure.uaddr = (__u64)measurement; + vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure); + + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status); + TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET); +} + +void sev_vm_launch_finish(struct kvm_vm *vm) +{ + struct kvm_sev_guest_status status; + + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); + TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE || + status.state == SEV_GUEST_STATE_LAUNCH_SECRET, + "Unexpected guest state: %d", status.state); + + vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL); + + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); + TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); +} + +struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t policy, void *guest_code, + struct kvm_vcpu **cpu) +{ + struct vm_shape shape = { + .type = VM_TYPE_DEFAULT, + .mode = VM_MODE_DEFAULT, + .subtype = VM_SUBTYPE_SEV, + }; + struct kvm_vm *vm; + struct kvm_vcpu *cpus[1]; + uint8_t measurement[512]; + + vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus); + *cpu = cpus[0]; + + sev_vm_launch(vm, policy); + + /* TODO: Validate the measurement is as expected. */ + sev_vm_launch_measure(vm, measurement); + + sev_vm_launch_finish(vm); + + return vm; +} -- cgit 1.2.3-korg From 69f8e15ab61f2f011612dad5f0522ea4d56971d9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 Feb 2024 16:42:56 -0800 Subject: KVM: selftests: Use the SEV library APIs in the intra-host migration test Port the existing intra-host SEV(-ES) migration test to the recently added SEV library, which handles much of the boilerplate needed to create and configure SEV guests. Tested-by: Carlos Bilbao Link: https://lore.kernel.org/r/20240223004258.3104051-10-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/sev_migrate_tests.c | 60 +++++++--------------- 1 file changed, 18 insertions(+), 42 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c index c7ef97561038e..ec3709e1c6847 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c +++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c @@ -10,11 +10,9 @@ #include "test_util.h" #include "kvm_util.h" #include "processor.h" -#include "svm_util.h" +#include "sev.h" #include "kselftest.h" -#define SEV_POLICY_ES 0b100 - #define NR_MIGRATE_TEST_VCPUS 4 #define NR_MIGRATE_TEST_VMS 3 #define NR_LOCK_TESTING_THREADS 3 @@ -22,46 +20,24 @@ bool have_sev_es; -static int __sev_ioctl(int vm_fd, int cmd_id, void *data, __u32 *fw_error) -{ - struct kvm_sev_cmd cmd = { - .id = cmd_id, - .data = (uint64_t)data, - .sev_fd = open_sev_dev_path_or_exit(), - }; - int ret; - - ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd); - *fw_error = cmd.error; - return ret; -} - -static void sev_ioctl(int vm_fd, int cmd_id, void *data) -{ - int ret; - __u32 fw_error; - - ret = __sev_ioctl(vm_fd, cmd_id, data, &fw_error); - TEST_ASSERT(ret == 0 && fw_error == SEV_RET_SUCCESS, - "%d failed: return code: %d, errno: %d, fw error: %d", - cmd_id, ret, errno, fw_error); -} - static struct kvm_vm *sev_vm_create(bool es) { struct kvm_vm *vm; - struct kvm_sev_launch_start start = { 0 }; int i; vm = vm_create_barebones(); - sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL); + if (!es) + sev_vm_init(vm); + else + sev_es_vm_init(vm); + for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) __vm_vcpu_add(vm, i); + + sev_vm_launch(vm, es ? SEV_POLICY_ES : 0); + if (es) - start.policy |= SEV_POLICY_ES; - sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start); - if (es) - sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); return vm; } @@ -181,7 +157,7 @@ static void test_sev_migrate_parameters(void) sev_vm = sev_vm_create(/* es= */ false); sev_es_vm = sev_vm_create(/* es= */ true); sev_es_vm_no_vmsa = vm_create_barebones(); - sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL); + sev_es_vm_init(sev_es_vm_no_vmsa); __vm_vcpu_add(sev_es_vm_no_vmsa, 1); ret = __sev_migrate_from(sev_vm, sev_es_vm); @@ -230,13 +206,13 @@ static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src) TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno); } -static void verify_mirror_allowed_cmds(int vm_fd) +static void verify_mirror_allowed_cmds(struct kvm_vm *vm) { struct kvm_sev_guest_status status; + int cmd_id; - for (int cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) { + for (cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) { int ret; - __u32 fw_error; /* * These commands are allowed for mirror VMs, all others are @@ -256,14 +232,14 @@ static void verify_mirror_allowed_cmds(int vm_fd) * These commands should be disallowed before the data * parameter is examined so NULL is OK here. */ - ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error); + ret = __vm_sev_ioctl(vm, cmd_id, NULL); TEST_ASSERT( ret == -1 && errno == EINVAL, "Should not be able call command: %d. ret: %d, errno: %d\n", cmd_id, ret, errno); } - sev_ioctl(vm_fd, KVM_SEV_GUEST_STATUS, &status); + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); } static void test_sev_mirror(bool es) @@ -281,9 +257,9 @@ static void test_sev_mirror(bool es) __vm_vcpu_add(dst_vm, i); if (es) - sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + vm_sev_ioctl(dst_vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); - verify_mirror_allowed_cmds(dst_vm->fd); + verify_mirror_allowed_cmds(dst_vm); kvm_vm_free(src_vm); kvm_vm_free(dst_vm); -- cgit 1.2.3-korg From be250ff437fa260550113a361ff0b31ccd34e9e5 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Thu, 22 Feb 2024 16:42:57 -0800 Subject: KVM: selftests: Add a basic SEV smoke test Add a basic smoke test for SEV guests to verify that KVM can launch an SEV guest and run a few instructions without exploding. To verify that SEV is indeed enabled, assert that SEV is reported as enabled in MSR_AMD64_SEV, a.k.a. SEV_STATUS, which cannot be intercepted by KVM (architecturally enforced). Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerly Tng cc: Andrew Jones Cc: Tom Lendacky Cc: Michael Roth Suggested-by: Michael Roth Tested-by: Carlos Bilbao Signed-off-by: Peter Gonda [sean: rename to "sev_smoke_test"] Link: https://lore.kernel.org/r/20240223004258.3104051-11-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 1 + .../testing/selftests/kvm/x86_64/sev_smoke_test.c | 58 ++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/sev_smoke_test.c (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 4ec6f3bcb7de0..c75251d5c97c1 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -118,6 +118,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_caps_test TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests +TEST_GEN_PROGS_x86_64 += x86_64/sev_smoke_test TEST_GEN_PROGS_x86_64 += x86_64/amx_test TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test diff --git a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c new file mode 100644 index 0000000000000..54d72efd9b4d9 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "svm_util.h" +#include "linux/psp-sev.h" +#include "sev.h" + +static void guest_sev_code(void) +{ + GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV)); + GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); + + GUEST_DONE(); +} + +static void test_sev(void *guest_code, uint64_t policy) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + struct ucall uc; + + vm = vm_sev_create_with_one_vcpu(policy, guest_code, &vcpu); + + for (;;) { + vcpu_run(vcpu); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_SYNC: + continue; + case UCALL_DONE: + return; + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + default: + TEST_FAIL("Unexpected exit: %s", + exit_reason_str(vcpu->run->exit_reason)); + } + } + + kvm_vm_free(vm); +} + +int main(int argc, char *argv[]) +{ + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV)); + + test_sev(guest_sev_code, SEV_POLICY_NO_DBG); + test_sev(guest_sev_code, 0); + + return 0; +} -- cgit 1.2.3-korg From 40e09b3ccfacc640d58e1e3d6b8f29b2db0a9848 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 Feb 2024 16:42:58 -0800 Subject: KVM: selftests: Add a basic SEV-ES smoke test Extend sev_smoke_test to also run a minimal SEV-ES smoke test so that it's possible to test KVM's unique VMRUN=>#VMEXIT path for SEV-ES guests without needing a full blown SEV-ES capable VM, which requires a rather absurd amount of properly configured collateral. Punt on proper GHCB and ucall support, and instead use the GHCB MSR protocol to signal test completion. The most important thing at this point is to have _any_ kind of testing of KVM's __svm_sev_es_vcpu_run(). Cc: Tom Lendacky Cc: Michael Roth Cc: Peter Gonda Cc: Carlos Bilbao Tested-by: Carlos Bilbao Link: https://lore.kernel.org/r/20240223004258.3104051-12-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86_64/sev.h | 2 ++ tools/testing/selftests/kvm/lib/x86_64/processor.c | 2 +- tools/testing/selftests/kvm/lib/x86_64/sev.c | 6 ++++- .../testing/selftests/kvm/x86_64/sev_smoke_test.c | 30 ++++++++++++++++++++++ 4 files changed, 38 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/x86_64/sev.h b/tools/testing/selftests/kvm/include/x86_64/sev.h index de5283bef7527..8a1bf88474c92 100644 --- a/tools/testing/selftests/kvm/include/x86_64/sev.h +++ b/tools/testing/selftests/kvm/include/x86_64/sev.h @@ -25,6 +25,8 @@ enum sev_guest_state { #define SEV_POLICY_NO_DBG (1UL << 0) #define SEV_POLICY_ES (1UL << 2) +#define GHCB_MSR_TERM_REQ 0x100 + void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); void sev_vm_launch_finish(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index f1139ba351128..49288fe10cd34 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1072,7 +1072,7 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) void kvm_init_vm_address_properties(struct kvm_vm *vm) { - if (vm->subtype == VM_SUBTYPE_SEV) { + if (vm->subtype == VM_SUBTYPE_SEV || vm->subtype == VM_SUBTYPE_SEV_ES) { vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); vm->gpa_tag_mask = vm->arch.c_bit; } diff --git a/tools/testing/selftests/kvm/lib/x86_64/sev.c b/tools/testing/selftests/kvm/lib/x86_64/sev.c index 9f5a3dbb5e65b..e248d3364b9c3 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/sev.c +++ b/tools/testing/selftests/kvm/lib/x86_64/sev.c @@ -53,6 +53,9 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) encrypt_region(vm, region); + if (policy & SEV_POLICY_ES) + vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + vm->arch.is_pt_protected = true; } @@ -90,7 +93,8 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t policy, void *guest_code, struct vm_shape shape = { .type = VM_TYPE_DEFAULT, .mode = VM_MODE_DEFAULT, - .subtype = VM_SUBTYPE_SEV, + .subtype = policy & SEV_POLICY_ES ? VM_SUBTYPE_SEV_ES : + VM_SUBTYPE_SEV, }; struct kvm_vm *vm; struct kvm_vcpu *cpus[1]; diff --git a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c index 54d72efd9b4d9..026779f3ed06d 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c +++ b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c @@ -12,6 +12,21 @@ #include "linux/psp-sev.h" #include "sev.h" + +static void guest_sev_es_code(void) +{ + /* TODO: Check CPUID after GHCB-based hypercall support is added. */ + GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); + GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED); + + /* + * TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply + * force "termination" to signal "done" via the GHCB MSR protocol. + */ + wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ); + __asm__ __volatile__("rep; vmmcall"); +} + static void guest_sev_code(void) { GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV)); @@ -31,6 +46,16 @@ static void test_sev(void *guest_code, uint64_t policy) for (;;) { vcpu_run(vcpu); + if (policy & SEV_POLICY_ES) { + TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT, + "Wanted SYSTEM_EVENT, got %s", + exit_reason_str(vcpu->run->exit_reason)); + TEST_ASSERT_EQ(vcpu->run->system_event.type, KVM_SYSTEM_EVENT_SEV_TERM); + TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1); + TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ); + break; + } + switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: continue; @@ -54,5 +79,10 @@ int main(int argc, char *argv[]) test_sev(guest_sev_code, SEV_POLICY_NO_DBG); test_sev(guest_sev_code, 0); + if (kvm_cpu_has(X86_FEATURE_SEV_ES)) { + test_sev(guest_sev_es_code, SEV_POLICY_ES | SEV_POLICY_NO_DBG); + test_sev(guest_sev_es_code, SEV_POLICY_ES); + } + return 0; } -- cgit 1.2.3-korg From 43b3bedb7cc4348f2885a30e960b63b94d1be381 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Wed, 22 Nov 2023 22:15:26 +0000 Subject: KVM: selftests: aarch64: Remove unused functions from vpmu test vpmu_counter_access's disable_counter() carries a bug that disables all the counters that are enabled, instead of just the requested one. Fortunately, it's not an issue as there are no callers of it. Hence, instead of fixing it, remove the definition entirely. Remove enable_counter() as it's unused as well. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Zenghui Yu Link: https://lore.kernel.org/r/20231122221526.2750966-1-rananta@google.com Signed-off-by: Oliver Upton --- .../testing/selftests/kvm/aarch64/vpmu_counter_access.c | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index f8f0c655c7232..1b51cd11ee93c 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -93,22 +93,6 @@ static inline void write_sel_evtyper(int sel, unsigned long val) isb(); } -static inline void enable_counter(int idx) -{ - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenset_el0); - isb(); -} - -static inline void disable_counter(int idx) -{ - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenclr_el0); - isb(); -} - static void pmu_disable_reset(void) { uint64_t pmcr = read_sysreg(pmcr_el0); -- cgit 1.2.3-korg From e9da6f08edb0bd4c621165496778d77a222e1174 Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Mon, 26 Feb 2024 17:57:16 -0800 Subject: KVM: selftests: Explicitly close guest_memfd files in some gmem tests Explicitly close() guest_memfd files in various guest_memfd and private_mem_conversions tests, there's no reason to keep the files open until the test exits. Fixes: 8a89efd43423 ("KVM: selftests: Add basic selftest for guest_memfd()") Fixes: 43f623f350ce ("KVM: selftests: Add x86-only selftest for private memory conversions") Signed-off-by: Dongli Zhang Link: https://lore.kernel.org/r/20240227015716.27284-1-dongli.zhang@oracle.com [sean: massage changelog] Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/guest_memfd_test.c | 3 +++ tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c | 2 ++ 2 files changed, 5 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index c78a98c1a915c..92eae206baa62 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -167,6 +167,9 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) TEST_ASSERT(ret != -1, "memfd fstat should succeed"); TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size"); TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers"); + + close(fd2); + close(fd1); } int main(int argc, char *argv[]) diff --git a/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c index 65ad38b6be1f1..e0f642d2a3c4b 100644 --- a/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c +++ b/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c @@ -434,6 +434,8 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t r = fallocate(memfd, FALLOC_FL_KEEP_SIZE, 0, memfd_size); TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); + + close(memfd); } static void usage(const char *cmd) -- cgit 1.2.3-korg From 38f680c25ece49c1f8ff55ee78dca0ee4e1793a6 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:39 +0800 Subject: KVM: riscv: selftests: Add exception handling support Add the infrastructure for guest exception handling in riscv selftests. Customized handlers can be enabled by vm_install_exception_handler(vector) or vm_install_interrupt_handler(). The code is inspired from that of x86/arm64. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/riscv/processor.h | 50 ++++++++++ tools/testing/selftests/kvm/lib/riscv/handlers.S | 101 +++++++++++++++++++++ tools/testing/selftests/kvm/lib/riscv/processor.c | 69 ++++++++++++++ 4 files changed, 221 insertions(+) create mode 100644 tools/testing/selftests/kvm/lib/riscv/handlers.S (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 63592045720f4..3e94e915c9f00 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -53,6 +53,7 @@ LIBKVM_s390x += lib/s390x/diag318_test_handler.c LIBKVM_s390x += lib/s390x/processor.c LIBKVM_s390x += lib/s390x/ucall.c +LIBKVM_riscv += lib/riscv/handlers.S LIBKVM_riscv += lib/riscv/processor.c LIBKVM_riscv += lib/riscv/ucall.c diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index b6b95e747688e..fe8eda69ce00c 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -48,6 +48,56 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, KVM_REG_RISCV_SBI_SINGLE, \ idx, KVM_REG_SIZE_ULONG) +struct ex_regs { + unsigned long ra; + unsigned long sp; + unsigned long gp; + unsigned long tp; + unsigned long t0; + unsigned long t1; + unsigned long t2; + unsigned long s0; + unsigned long s1; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long s2; + unsigned long s3; + unsigned long s4; + unsigned long s5; + unsigned long s6; + unsigned long s7; + unsigned long s8; + unsigned long s9; + unsigned long s10; + unsigned long s11; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; + unsigned long epc; + unsigned long status; + unsigned long cause; +}; + +#define NR_VECTORS 2 +#define NR_EXCEPTIONS 32 +#define EC_MASK (NR_EXCEPTIONS - 1) + +typedef void(*exception_handler_fn)(struct ex_regs *); + +void vm_init_vector_tables(struct kvm_vm *vm); +void vcpu_init_vector_tables(struct kvm_vcpu *vcpu); + +void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler); + +void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler); + /* L3 index Bit[47:39] */ #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL #define PGTBL_L3_INDEX_SHIFT 39 diff --git a/tools/testing/selftests/kvm/lib/riscv/handlers.S b/tools/testing/selftests/kvm/lib/riscv/handlers.S new file mode 100644 index 0000000000000..aa0abd3f35bb0 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/riscv/handlers.S @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Intel Corporation + */ + +#ifndef __ASSEMBLY__ +#define __ASSEMBLY__ +#endif + +#include + +.macro save_context + addi sp, sp, (-8*34) + sd x1, 0(sp) + sd x2, 8(sp) + sd x3, 16(sp) + sd x4, 24(sp) + sd x5, 32(sp) + sd x6, 40(sp) + sd x7, 48(sp) + sd x8, 56(sp) + sd x9, 64(sp) + sd x10, 72(sp) + sd x11, 80(sp) + sd x12, 88(sp) + sd x13, 96(sp) + sd x14, 104(sp) + sd x15, 112(sp) + sd x16, 120(sp) + sd x17, 128(sp) + sd x18, 136(sp) + sd x19, 144(sp) + sd x20, 152(sp) + sd x21, 160(sp) + sd x22, 168(sp) + sd x23, 176(sp) + sd x24, 184(sp) + sd x25, 192(sp) + sd x26, 200(sp) + sd x27, 208(sp) + sd x28, 216(sp) + sd x29, 224(sp) + sd x30, 232(sp) + sd x31, 240(sp) + csrr s0, CSR_SEPC + csrr s1, CSR_SSTATUS + csrr s2, CSR_SCAUSE + sd s0, 248(sp) + sd s1, 256(sp) + sd s2, 264(sp) +.endm + +.macro restore_context + ld s2, 264(sp) + ld s1, 256(sp) + ld s0, 248(sp) + csrw CSR_SCAUSE, s2 + csrw CSR_SSTATUS, s1 + csrw CSR_SEPC, s0 + ld x31, 240(sp) + ld x30, 232(sp) + ld x29, 224(sp) + ld x28, 216(sp) + ld x27, 208(sp) + ld x26, 200(sp) + ld x25, 192(sp) + ld x24, 184(sp) + ld x23, 176(sp) + ld x22, 168(sp) + ld x21, 160(sp) + ld x20, 152(sp) + ld x19, 144(sp) + ld x18, 136(sp) + ld x17, 128(sp) + ld x16, 120(sp) + ld x15, 112(sp) + ld x14, 104(sp) + ld x13, 96(sp) + ld x12, 88(sp) + ld x11, 80(sp) + ld x10, 72(sp) + ld x9, 64(sp) + ld x8, 56(sp) + ld x7, 48(sp) + ld x6, 40(sp) + ld x5, 32(sp) + ld x4, 24(sp) + ld x3, 16(sp) + ld x2, 8(sp) + ld x1, 0(sp) + addi sp, sp, (8*34) +.endm + +.balign 4 +.global exception_vectors +exception_vectors: + save_context + move a0, sp + call route_exception + restore_context + sret diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 2bb33a8ac03c2..87ce44992c87c 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -13,6 +13,8 @@ #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 +static vm_vaddr_t exception_handlers; + static uint64_t page_align(struct kvm_vm *vm, uint64_t v) { return (v + vm->page_size) & ~(vm->page_size - 1); @@ -364,8 +366,75 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) va_end(ap); } +void kvm_exit_unexpected_exception(int vector, int ec) +{ + ucall(UCALL_UNHANDLED, 2, vector, ec); +} + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) { + struct ucall uc; + + if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) { + TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)", + uc.args[0], uc.args[1]); + } +} + +struct handlers { + exception_handler_fn exception_handlers[NR_VECTORS][NR_EXCEPTIONS]; +}; + +void route_exception(struct ex_regs *regs) +{ + struct handlers *handlers = (struct handlers *)exception_handlers; + int vector = 0, ec; + + ec = regs->cause & ~CAUSE_IRQ_FLAG; + if (ec >= NR_EXCEPTIONS) + goto unexpected_exception; + + /* Use the same handler for all the interrupts */ + if (regs->cause & CAUSE_IRQ_FLAG) { + vector = 1; + ec = 0; + } + + if (handlers && handlers->exception_handlers[vector][ec]) + return handlers->exception_handlers[vector][ec](regs); + +unexpected_exception: + return kvm_exit_unexpected_exception(vector, ec); +} + +void vcpu_init_vector_tables(struct kvm_vcpu *vcpu) +{ + extern char exception_vectors; + + vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)&exception_vectors); +} + +void vm_init_vector_tables(struct kvm_vm *vm) +{ + vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), + vm->page_size, MEM_REGION_DATA); + + *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; +} + +void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler) +{ + struct handlers *handlers = addr_gva2hva(vm, vm->handlers); + + assert(vector < NR_EXCEPTIONS); + handlers->exception_handlers[0][vector] = handler; +} + +void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler) +{ + struct handlers *handlers = addr_gva2hva(vm, vm->handlers); + + handlers->exception_handlers[1][0] = handler; } struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, -- cgit 1.2.3-korg From 1e979288c9b50a1eef1c5fa2fa93936012a0ed6f Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:40 +0800 Subject: KVM: riscv: selftests: Add guest helper to get vcpu id Add guest_get_vcpuid() helper to simplify accessing to per-cpu private data. The sscratch CSR was used to store the vcpu id. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/include/aarch64/processor.h | 4 ---- tools/testing/selftests/kvm/include/kvm_util_base.h | 2 ++ tools/testing/selftests/kvm/lib/riscv/processor.c | 8 ++++++++ 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index cf20e44e86f2f..9e518b5628273 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -226,8 +226,4 @@ void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, uint64_t arg6, struct arm_smccc_res *res); - - -uint32_t guest_get_vcpuid(void); - #endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 9e5afc472c142..39c2499df3410 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -1081,4 +1081,6 @@ void kvm_selftest_arch_init(void); void kvm_arch_vm_post_create(struct kvm_vm *vm); +uint32_t guest_get_vcpuid(void); + #endif /* SELFTEST_KVM_UTIL_BASE_H */ diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 87ce44992c87c..4be94d0f0b94f 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -316,6 +316,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); + /* Setup sscratch for guest_get_vcpuid() */ + vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id); + /* Setup default exception vector of guest */ vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap); @@ -437,6 +440,11 @@ void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handle handlers->exception_handlers[1][0] = handler; } +uint32_t guest_get_vcpuid(void) +{ + return csr_read(CSR_SSCRATCH); +} + struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, -- cgit 1.2.3-korg From 812806bd1e70f79cc69061f9fd9bb1d367990d37 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:41 +0800 Subject: KVM: riscv: selftests: Change vcpu_has_ext to a common function Move vcpu_has_ext to the processor.c and rename it to __vcpu_has_ext so that other test cases can use it for vCPU extension check. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/include/riscv/processor.h | 2 ++ tools/testing/selftests/kvm/lib/riscv/processor.c | 10 ++++++++++ tools/testing/selftests/kvm/riscv/get-reg-list.c | 11 +---------- 3 files changed, 13 insertions(+), 10 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index fe8eda69ce00c..1b0a9e9d2d301 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -48,6 +48,8 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, KVM_REG_RISCV_SBI_SINGLE, \ idx, KVM_REG_SIZE_ULONG) +bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext); + struct ex_regs { unsigned long ra; unsigned long sp; diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 4be94d0f0b94f..ec66d331a1273 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -15,6 +15,16 @@ static vm_vaddr_t exception_handlers; +bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext) +{ + unsigned long value = 0; + int ret; + + ret = __vcpu_get_reg(vcpu, ext, &value); + + return !ret && !!value; +} + static uint64_t page_align(struct kvm_vm *vm, uint64_t v) { return (v + vm->page_size) & ~(vm->page_size - 1); diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index 6435e7a656425..8cece02ca23ad 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -123,15 +123,6 @@ bool check_reject_set(int err) return err == EINVAL; } -static bool vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext_id) -{ - int ret; - unsigned long value; - - ret = __vcpu_get_reg(vcpu, ext_id, &value); - return (ret) ? false : !!value; -} - void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) { unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; @@ -176,7 +167,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) __vcpu_set_reg(vcpu, feature, 1); /* Double check whether the desired extension was enabled */ - __TEST_REQUIRE(vcpu_has_ext(vcpu, feature), + __TEST_REQUIRE(__vcpu_has_ext(vcpu, feature), "%s not available, skipping tests", s->name); } } -- cgit 1.2.3-korg From d0b94bcbb04262b9ffe6e172223e8cbb663a2c9d Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Mon, 22 Jan 2024 17:58:42 +0800 Subject: KVM: riscv: selftests: Add sstc timer test Add a KVM selftests to validate the Sstc timer functionality. The test was ported from arm64 arch timer test. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/aarch64/arch_timer.c | 12 ++- tools/testing/selftests/kvm/arch_timer.c | 10 +- .../selftests/kvm/include/riscv/arch_timer.h | 71 +++++++++++++ .../selftests/kvm/include/riscv/processor.h | 10 ++ tools/testing/selftests/kvm/include/timer_test.h | 5 +- tools/testing/selftests/kvm/riscv/arch_timer.c | 111 +++++++++++++++++++++ 7 files changed, 210 insertions(+), 10 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/riscv/arch_timer.h create mode 100644 tools/testing/selftests/kvm/riscv/arch_timer.c (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 3e94e915c9f00..426f85798aead 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -185,6 +185,7 @@ TEST_GEN_PROGS_s390x += rseq_test TEST_GEN_PROGS_s390x += set_memory_region_test TEST_GEN_PROGS_s390x += kvm_binary_stats_test +TEST_GEN_PROGS_riscv += arch_timer TEST_GEN_PROGS_riscv += demand_paging_test TEST_GEN_PROGS_riscv += dirty_log_test TEST_GEN_PROGS_riscv += get-reg-list diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index b9ce8c6455db6..6dfd7fa1fced1 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -194,10 +194,14 @@ struct kvm_vm *test_vm_create(void) vm_init_descriptor_tables(vm); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); - if (!test_args.offset.reserved) { - if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) - vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset); - else + if (!test_args.reserved) { + if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) { + struct kvm_arm_counter_offset offset = { + .counter_offset = test_args.counter_offset, + .reserved = 0, + }; + vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &offset); + } else TEST_FAIL("no support for global offset"); } diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c index f7e4cee8cb349..ae1f1a6d83123 100644 --- a/tools/testing/selftests/kvm/arch_timer.c +++ b/tools/testing/selftests/kvm/arch_timer.c @@ -36,7 +36,7 @@ struct test_args test_args = { .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, - .offset = { .reserved = 1 }, + .reserved = 1, }; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; @@ -75,6 +75,8 @@ static void *test_vcpu_run(void *arg) TEST_FAIL("Unexpected guest exit"); } + pr_info("PASS(vCPU-%d).\n", vcpu_idx); + return NULL; } @@ -190,7 +192,7 @@ static void test_print_help(char *name) TIMER_TEST_PERIOD_MS_DEF); pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", TIMER_TEST_MIGRATION_FREQ_MS); - pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n"); + pr_info("\t-o: Counter offset (in counter cycles, default: 0) [aarch64-only]\n"); pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", TIMER_TEST_ERR_MARGIN_US); pr_info("\t-h: print this help screen\n"); @@ -223,8 +225,8 @@ static bool parse_args(int argc, char *argv[]) test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg); break; case 'o': - test_args.offset.counter_offset = strtol(optarg, NULL, 0); - test_args.offset.reserved = 0; + test_args.counter_offset = strtol(optarg, NULL, 0); + test_args.reserved = 0; break; case 'h': default: diff --git a/tools/testing/selftests/kvm/include/riscv/arch_timer.h b/tools/testing/selftests/kvm/include/riscv/arch_timer.h new file mode 100644 index 0000000000000..225d81dad064f --- /dev/null +++ b/tools/testing/selftests/kvm/include/riscv/arch_timer.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * RISC-V Arch Timer(sstc) specific interface + * + * Copyright (c) 2024 Intel Corporation + */ + +#ifndef SELFTEST_KVM_ARCH_TIMER_H +#define SELFTEST_KVM_ARCH_TIMER_H + +#include +#include + +static unsigned long timer_freq; + +#define msec_to_cycles(msec) \ + ((timer_freq) * (uint64_t)(msec) / 1000) + +#define usec_to_cycles(usec) \ + ((timer_freq) * (uint64_t)(usec) / 1000000) + +#define cycles_to_usec(cycles) \ + ((uint64_t)(cycles) * 1000000 / (timer_freq)) + +static inline uint64_t timer_get_cycles(void) +{ + return csr_read(CSR_TIME); +} + +static inline void timer_set_cmp(uint64_t cval) +{ + csr_write(CSR_STIMECMP, cval); +} + +static inline uint64_t timer_get_cmp(void) +{ + return csr_read(CSR_STIMECMP); +} + +static inline void timer_irq_enable(void) +{ + csr_set(CSR_SIE, IE_TIE); +} + +static inline void timer_irq_disable(void) +{ + csr_clear(CSR_SIE, IE_TIE); +} + +static inline void timer_set_next_cmp_ms(uint32_t msec) +{ + uint64_t now_ct = timer_get_cycles(); + uint64_t next_ct = now_ct + msec_to_cycles(msec); + + timer_set_cmp(next_ct); +} + +static inline void __delay(uint64_t cycles) +{ + uint64_t start = timer_get_cycles(); + + while ((timer_get_cycles() - start) < cycles) + cpu_relax(); +} + +static inline void udelay(unsigned long usec) +{ + __delay(usec_to_cycles(usec)); +} + +#endif /* SELFTEST_KVM_ARCH_TIMER_H */ diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index 1b0a9e9d2d301..ce473fe251dde 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -193,4 +193,14 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, bool guest_sbi_probe_extension(int extid, long *out_val); +static inline void local_irq_enable(void) +{ + csr_set(CSR_SSTATUS, SR_SIE); +} + +static inline void local_irq_disable(void) +{ + csr_clear(CSR_SSTATUS, SR_SIE); +} + #endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h index 256e2d2137cf1..9b6edaafe6d49 100644 --- a/tools/testing/selftests/kvm/include/timer_test.h +++ b/tools/testing/selftests/kvm/include/timer_test.h @@ -23,8 +23,9 @@ struct test_args { uint32_t timer_period_ms; uint32_t migration_freq_ms; uint32_t timer_err_margin_us; - /* TODO: Change arm specific type to a common one */ - struct kvm_arm_counter_offset offset; + /* Members of struct kvm_arm_counter_offset */ + uint64_t counter_offset; + uint64_t reserved; }; /* Shared variables between host and guest */ diff --git a/tools/testing/selftests/kvm/riscv/arch_timer.c b/tools/testing/selftests/kvm/riscv/arch_timer.c new file mode 100644 index 0000000000000..e22848f747c01 --- /dev/null +++ b/tools/testing/selftests/kvm/riscv/arch_timer.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch_timer.c - Tests the riscv64 sstc timer IRQ functionality + * + * The test validates the sstc timer IRQs using vstimecmp registers. + * It's ported from the aarch64 arch_timer test. + * + * Copyright (c) 2024, Intel Corporation. + */ + +#define _GNU_SOURCE + +#include "arch_timer.h" +#include "kvm_util.h" +#include "processor.h" +#include "timer_test.h" + +static int timer_irq = IRQ_S_TIMER; + +static void guest_irq_handler(struct ex_regs *regs) +{ + uint64_t xcnt, xcnt_diff_us, cmp; + unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG; + uint32_t cpu = guest_get_vcpuid(); + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; + + timer_irq_disable(); + + xcnt = timer_get_cycles(); + cmp = timer_get_cmp(); + xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt); + + /* Make sure we are dealing with the correct timer IRQ */ + GUEST_ASSERT_EQ(intid, timer_irq); + + __GUEST_ASSERT(xcnt >= cmp, + "xcnt = 0x%"PRIx64", cmp = 0x%"PRIx64", xcnt_diff_us = 0x%" PRIx64, + xcnt, cmp, xcnt_diff_us); + + WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); +} + +static void guest_run(struct test_vcpu_shared_data *shared_data) +{ + uint32_t irq_iter, config_iter; + + shared_data->nr_iter = 0; + shared_data->guest_stage = 0; + + for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) { + /* Setup the next interrupt */ + timer_set_next_cmp_ms(test_args.timer_period_ms); + shared_data->xcnt = timer_get_cycles(); + timer_irq_enable(); + + /* Setup a timeout for the interrupt to arrive */ + udelay(msecs_to_usecs(test_args.timer_period_ms) + + test_args.timer_err_margin_us); + + irq_iter = READ_ONCE(shared_data->nr_iter); + __GUEST_ASSERT(config_iter + 1 == irq_iter, + "config_iter + 1 = 0x%x, irq_iter = 0x%x.\n" + " Guest timer interrupt was not trigged within the specified\n" + " interval, try to increase the error margin by [-e] option.\n", + config_iter + 1, irq_iter); + } +} + +static void guest_code(void) +{ + uint32_t cpu = guest_get_vcpuid(); + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; + + timer_irq_disable(); + local_irq_enable(); + + guest_run(shared_data); + + GUEST_DONE(); +} + +struct kvm_vm *test_vm_create(void) +{ + struct kvm_vm *vm; + int nr_vcpus = test_args.nr_vcpus; + + vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); + __TEST_REQUIRE(__vcpu_has_ext(vcpus[0], RISCV_ISA_EXT_REG(KVM_RISCV_ISA_EXT_SSTC)), + "SSTC not available, skipping test\n"); + + vm_init_vector_tables(vm); + vm_install_interrupt_handler(vm, guest_irq_handler); + + for (int i = 0; i < nr_vcpus; i++) + vcpu_init_vector_tables(vcpus[i]); + + /* Initialize guest timer frequency. */ + vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency), &timer_freq); + sync_global_to_guest(vm, timer_freq); + pr_debug("timer_freq: %lu\n", timer_freq); + + /* Make all the test's cmdline args visible to the guest */ + sync_global_to_guest(vm, test_args); + + return vm; +} + +void test_vm_cleanup(struct kvm_vm *vm) +{ + kvm_vm_free(vm); +} -- cgit 1.2.3-korg From d9bb4eca32f99c5fcde705d1bb4cb6b445dbd6e8 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 13 Feb 2024 10:36:16 +0530 Subject: KVM: riscv: selftests: Add Ztso extension to get-reg-list test The KVM RISC-V allows Ztso extension for Guest/VM so add this extension to get-reg-list test. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/riscv/get-reg-list.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index 8cece02ca23ad..a464aa6c0b091 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -73,6 +73,7 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZTSO: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH: @@ -436,6 +437,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) KVM_ISA_EXT_ARR(ZKSED), KVM_ISA_EXT_ARR(ZKSH), KVM_ISA_EXT_ARR(ZKT), + KVM_ISA_EXT_ARR(ZTSO), KVM_ISA_EXT_ARR(ZVBB), KVM_ISA_EXT_ARR(ZVBC), KVM_ISA_EXT_ARR(ZVFH), @@ -957,6 +959,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR); KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED); KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH); KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT); +KVM_ISA_EXT_SIMPLE_CONFIG(ztso, ZTSO); KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB); KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC); KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH); @@ -1010,6 +1013,7 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_zksed, &config_zksh, &config_zkt, + &config_ztso, &config_zvbb, &config_zvbc, &config_zvfh, -- cgit 1.2.3-korg From d8c0831348e78fdaf67aa95070bae2ef8e819b05 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 13 Feb 2024 13:39:17 +0530 Subject: KVM: riscv: selftests: Add Zacas extension to get-reg-list test The KVM RISC-V allows Zacas extension for Guest/VM so add this extension to get-reg-list test. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/riscv/get-reg-list.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index a464aa6c0b091..b882b7b9b7850 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -47,6 +47,7 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZACAS: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC: @@ -411,6 +412,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVNAPOT), KVM_ISA_EXT_ARR(SVPBMT), + KVM_ISA_EXT_ARR(ZACAS), KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), KVM_ISA_EXT_ARR(ZBC), @@ -933,6 +935,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC); KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL); KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT); KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT); +KVM_ISA_EXT_SIMPLE_CONFIG(zacas, ZACAS); KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA); KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB); KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC); @@ -987,6 +990,7 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_svinval, &config_svnapot, &config_svpbmt, + &config_zacas, &config_zba, &config_zbb, &config_zbc, -- cgit 1.2.3-korg From 4781179012d9380005649b0fe07f77dcaa2610e3 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 15 Mar 2024 06:52:55 -0400 Subject: selftests: kvm: remove meaningless assignments in Makefiles $(shell ...) expands to the output of the command. It expands to the empty string when the command does not print anything to stdout. Hence, $(shell mkdir ...) is sufficient and does not need any variable assignment in front of it. Commit c2bd08ba20a5 ("treewide: remove meaningless assignments in Makefiles", 2024-02-23) did this to all of tools/ but ignored in-flight changes to tools/testing/selftests/kvm/Makefile, so reapply the change. Cc: Masahiro Yamada Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 19f5710bb4568..741c7dc16afc7 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -277,7 +277,7 @@ TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS)) TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TEST_GEN_OBJ)) -include $(TEST_DEP_FILES) -x := $(shell mkdir -p $(sort $(OUTPUT)/$(ARCH_DIR) $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) +$(shell mkdir -p $(sort $(OUTPUT)/$(ARCH_DIR) $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) $(filter-out $(SPLIT_TEST_GEN_PROGS), $(TEST_GEN_PROGS)) \ $(TEST_GEN_PROGS_EXTENDED): %: %.o @@ -309,7 +309,7 @@ $(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S $(GEN_HDRS) $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@ -x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) +$(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) $(SPLIT_TEST_GEN_OBJ): $(GEN_HDRS) $(TEST_GEN_PROGS): $(LIBKVM_OBJS) $(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS) -- cgit 1.2.3-korg