KVM selftests fixes for 6.2
- Fix an inverted check in the access tracking perf test, and restore support for asserting that there aren't too many idle pages when running on bare metal. - Fix an ordering issue in the AMX test introduced by recent conversions to use kvm_cpu_has(), and harden the code to guard against similar bugs in the future. Anything that tiggers caching of KVM's supported CPUID, kvm_cpu_has() in this case, effectively hides opt-in XSAVE features if the caching occurs before the test opts in via prctl(). - Fix build errors that occur in certain setups (unsure exactly what is unique about the problematic setup) due to glibc overriding static_assert() to a variant that requires a custom message. -----BEGIN PGP SIGNATURE----- iQJGBAABCgAwFiEEMHr+pfEFOIzK+KY1YJEiAU0MEvkFAmOJPQkSHHNlYW5qY0Bn b29nbGUuY29tAAoJEGCRIgFNDBL5v2AQALv4BN7H4JZWCo0Sjl8cPtLJgLyxJRzt gsMnxMkxLO5sf0wGVppJBVcNbrhCRwz/VktaTnwdTqr74B4oGPozRdCzXxHe5OMo Ngy3bREEfmDD7fPW6Tjp2qjEmFW3sSHBWl7nrtWmV/h8JqZKj/SntxqZCGF8xjtM C3g51DRfkY6gBvX8vObh5iUVP/yr3fVKlrjgLQHtWirt3JybfgW51ULN1yA2j0QM 2r9lYwPK9N0MBRQOd2r/9LBmYXc7nkHdZJnnCUIPPX+ZUZuGuNJ5OQVNqVjlZtCZ ZjQBq31ZoUshUFtuA9CopBJ97vUUVw/esKo0eGVX8oUVlyn/iklrr4gArktSwUPX aFVVFNMsBNcyVaJUHeFafYvpjiPg76Fh+Ts3PX2B/O9HHKuiYLIeD8c8RDmlsviA JcL3fETIXpUwcqbCyYWdCOO3Mv6tHllbkbYYcS2wbHuvXmSTPqN0ebE+PdtoU00v kAkbnHBLVSMBH09ohR7wK6K9jl7V1tRb9Wyq0Hl1pI6Of/B2/q7gs3SNh8zQaWQH 2TNbx5grLN8ElpjXnMZILwCy3HjnOEbyFP3sp70sTo2lK6u0eduVAds89LI06Adi 5Zz57oPxrW22VyvI9uItDvfwef57w+E6kkPucwsDFj65sCZ/rcMz2do6HWCtU8sn NglwBZ6V616T =Gkt4 -----END PGP SIGNATURE----- Merge tag 'kvm-selftests-6.2-2' of https://github.com/kvm-x86/linux into HEAD KVM selftests fixes for 6.2 - Fix an inverted check in the access tracking perf test, and restore support for asserting that there aren't too many idle pages when running on bare metal. - Fix an ordering issue in the AMX test introduced by recent conversions to use kvm_cpu_has(), and harden the code to guard against similar bugs in the future. Anything that tiggers caching of KVM's supported CPUID, kvm_cpu_has() in this case, effectively hides opt-in XSAVE features if the caching occurs before the test opts in via prctl(). - Fix build errors that occur in certain setups (unsure exactly what is unique about the problematic setup) due to glibc overriding static_assert() to a variant that requires a custom message.
This commit is contained in:
commit
44bc6115d8
tools/testing/selftests/kvm
|
@ -46,6 +46,7 @@
|
|||
#include "test_util.h"
|
||||
#include "memstress.h"
|
||||
#include "guest_modes.h"
|
||||
#include "processor.h"
|
||||
|
||||
/* Global variable used to synchronize all of the vCPU threads. */
|
||||
static int iteration;
|
||||
|
@ -180,16 +181,21 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
|
|||
* access tracking but low enough as to not make the test too brittle
|
||||
* over time and across architectures.
|
||||
*
|
||||
* Note that when run in nested virtualization, this check will trigger
|
||||
* much more frequently because TLB size is unlimited and since no flush
|
||||
* happens, much more pages are cached there and guest won't see the
|
||||
* "idle" bit cleared.
|
||||
* When running the guest as a nested VM, "warn" instead of asserting
|
||||
* as the TLB size is effectively unlimited and the KVM doesn't
|
||||
* explicitly flush the TLB when aging SPTEs. As a result, more pages
|
||||
* are cached and the guest won't see the "idle" bit cleared.
|
||||
*/
|
||||
if (still_idle < pages / 10)
|
||||
printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64
|
||||
"out of %" PRIu64 "), this will affect performance results"
|
||||
".\n",
|
||||
if (still_idle >= pages / 10) {
|
||||
#ifdef __x86_64__
|
||||
TEST_ASSERT(this_cpu_has(X86_FEATURE_HYPERVISOR),
|
||||
"vCPU%d: Too many pages still idle (%lu out of %lu)",
|
||||
vcpu_idx, still_idle, pages);
|
||||
#endif
|
||||
printf("WARNING: vCPU%d: Too many pages still idle (%lu out of %lu), "
|
||||
"this will affect performance results.\n",
|
||||
vcpu_idx, still_idle, pages);
|
||||
}
|
||||
|
||||
close(page_idle_fd);
|
||||
close(pagemap_fd);
|
||||
|
|
|
@ -22,6 +22,18 @@
|
|||
|
||||
#include "sparsebit.h"
|
||||
|
||||
/*
|
||||
* Provide a version of static_assert() that is guaranteed to have an optional
|
||||
* message param. If _ISOC11_SOURCE is defined, glibc (/usr/include/assert.h)
|
||||
* #undefs and #defines static_assert() as a direct alias to _Static_assert(),
|
||||
* i.e. effectively makes the message mandatory. Many KVM selftests #define
|
||||
* _GNU_SOURCE for various reasons, and _GNU_SOURCE implies _ISOC11_SOURCE. As
|
||||
* a result, static_assert() behavior is non-deterministic and may or may not
|
||||
* require a message depending on #include order.
|
||||
*/
|
||||
#define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
|
||||
#define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
|
||||
|
||||
#define KVM_DEV_PATH "/dev/kvm"
|
||||
#define KVM_MAX_VCPUS 512
|
||||
|
||||
|
@ -196,7 +208,7 @@ static inline bool kvm_has_cap(long cap)
|
|||
|
||||
#define kvm_do_ioctl(fd, cmd, arg) \
|
||||
({ \
|
||||
static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd), ""); \
|
||||
kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \
|
||||
ioctl(fd, cmd, arg); \
|
||||
})
|
||||
|
||||
|
|
|
@ -72,11 +72,11 @@ struct kvm_x86_cpu_feature {
|
|||
.bit = __bit, \
|
||||
}; \
|
||||
\
|
||||
static_assert((fn & 0xc0000000) == 0 || \
|
||||
(fn & 0xc0000000) == 0x40000000 || \
|
||||
(fn & 0xc0000000) == 0x80000000 || \
|
||||
(fn & 0xc0000000) == 0xc0000000); \
|
||||
static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
|
||||
kvm_static_assert((fn & 0xc0000000) == 0 || \
|
||||
(fn & 0xc0000000) == 0x40000000 || \
|
||||
(fn & 0xc0000000) == 0x80000000 || \
|
||||
(fn & 0xc0000000) == 0xc0000000); \
|
||||
kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
|
||||
feature; \
|
||||
})
|
||||
|
||||
|
@ -94,6 +94,7 @@ struct kvm_x86_cpu_feature {
|
|||
#define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
|
||||
#define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
|
||||
#define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
|
||||
#define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
|
||||
#define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
|
||||
#define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
|
||||
#define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
|
||||
|
@ -190,12 +191,12 @@ struct kvm_x86_cpu_property {
|
|||
.hi_bit = high_bit, \
|
||||
}; \
|
||||
\
|
||||
static_assert(low_bit < high_bit); \
|
||||
static_assert((fn & 0xc0000000) == 0 || \
|
||||
(fn & 0xc0000000) == 0x40000000 || \
|
||||
(fn & 0xc0000000) == 0x80000000 || \
|
||||
(fn & 0xc0000000) == 0xc0000000); \
|
||||
static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
|
||||
kvm_static_assert(low_bit < high_bit); \
|
||||
kvm_static_assert((fn & 0xc0000000) == 0 || \
|
||||
(fn & 0xc0000000) == 0x40000000 || \
|
||||
(fn & 0xc0000000) == 0x80000000 || \
|
||||
(fn & 0xc0000000) == 0xc0000000); \
|
||||
kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
|
||||
property; \
|
||||
})
|
||||
|
||||
|
|
|
@ -552,40 +552,6 @@ static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
|
|||
vcpu_sregs_set(vcpu, &sregs);
|
||||
}
|
||||
|
||||
void __vm_xsave_require_permission(int bit, const char *name)
|
||||
{
|
||||
int kvm_fd;
|
||||
u64 bitmask;
|
||||
long rc;
|
||||
struct kvm_device_attr attr = {
|
||||
.group = 0,
|
||||
.attr = KVM_X86_XCOMP_GUEST_SUPP,
|
||||
.addr = (unsigned long) &bitmask
|
||||
};
|
||||
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
|
||||
|
||||
kvm_fd = open_kvm_dev_path_or_exit();
|
||||
rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
|
||||
close(kvm_fd);
|
||||
|
||||
if (rc == -1 && (errno == ENXIO || errno == EINVAL))
|
||||
__TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
|
||||
|
||||
TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
|
||||
|
||||
__TEST_REQUIRE(bitmask & (1ULL << bit),
|
||||
"Required XSAVE feature '%s' not supported", name);
|
||||
|
||||
TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit));
|
||||
|
||||
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
|
||||
TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
|
||||
TEST_ASSERT(bitmask & (1ULL << bit),
|
||||
"prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx",
|
||||
bitmask);
|
||||
}
|
||||
|
||||
void kvm_arch_vm_post_create(struct kvm_vm *vm)
|
||||
{
|
||||
vm_create_irqchip(vm);
|
||||
|
@ -635,21 +601,24 @@ void vcpu_arch_free(struct kvm_vcpu *vcpu)
|
|||
free(vcpu->cpuid);
|
||||
}
|
||||
|
||||
/* Do not use kvm_supported_cpuid directly except for validity checks. */
|
||||
static void *kvm_supported_cpuid;
|
||||
|
||||
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
|
||||
{
|
||||
static struct kvm_cpuid2 *cpuid;
|
||||
int kvm_fd;
|
||||
|
||||
if (cpuid)
|
||||
return cpuid;
|
||||
if (kvm_supported_cpuid)
|
||||
return kvm_supported_cpuid;
|
||||
|
||||
cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
|
||||
kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
|
||||
kvm_fd = open_kvm_dev_path_or_exit();
|
||||
|
||||
kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
|
||||
kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID,
|
||||
(struct kvm_cpuid2 *)kvm_supported_cpuid);
|
||||
|
||||
close(kvm_fd);
|
||||
return cpuid;
|
||||
return kvm_supported_cpuid;
|
||||
}
|
||||
|
||||
static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
|
||||
|
@ -707,6 +676,41 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
|
|||
return buffer.entry.data;
|
||||
}
|
||||
|
||||
void __vm_xsave_require_permission(int bit, const char *name)
|
||||
{
|
||||
int kvm_fd;
|
||||
u64 bitmask;
|
||||
long rc;
|
||||
struct kvm_device_attr attr = {
|
||||
.group = 0,
|
||||
.attr = KVM_X86_XCOMP_GUEST_SUPP,
|
||||
.addr = (unsigned long) &bitmask
|
||||
};
|
||||
|
||||
TEST_ASSERT(!kvm_supported_cpuid,
|
||||
"kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM");
|
||||
|
||||
kvm_fd = open_kvm_dev_path_or_exit();
|
||||
rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
|
||||
close(kvm_fd);
|
||||
|
||||
if (rc == -1 && (errno == ENXIO || errno == EINVAL))
|
||||
__TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
|
||||
|
||||
TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
|
||||
|
||||
__TEST_REQUIRE(bitmask & (1ULL << bit),
|
||||
"Required XSAVE feature '%s' not supported", name);
|
||||
|
||||
TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit));
|
||||
|
||||
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
|
||||
TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
|
||||
TEST_ASSERT(bitmask & (1ULL << bit),
|
||||
"prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx",
|
||||
bitmask);
|
||||
}
|
||||
|
||||
void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
|
||||
{
|
||||
TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
|
||||
|
|
|
@ -249,16 +249,21 @@ int main(int argc, char *argv[])
|
|||
u32 amx_offset;
|
||||
int stage, ret;
|
||||
|
||||
/*
|
||||
* Note, all off-by-default features must be enabled before anything
|
||||
* caches KVM_GET_SUPPORTED_CPUID, e.g. before using kvm_cpu_has().
|
||||
*/
|
||||
vm_xsave_require_permission(XSTATE_XTILE_DATA_BIT);
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_AMX_TILE));
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG));
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA));
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
TEST_ASSERT(kvm_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE),
|
||||
"KVM should enumerate max XSAVE size when XSAVE is supported");
|
||||
xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE);
|
||||
|
|
Loading…
Reference in New Issue