KVM: selftests: Make memslot_perf_test arch independent

memslot_perf_test uses ucalls for synchronization between guest and
host. Ucalls API is architecture independent: tests do not need to know
details like what kind of exit they generate on a specific arch.  More
specifically, there is no need to check whether an exit is KVM_EXIT_IO
in x86 for the host to know that the exit is ucall related, as
get_ucall() already makes that check.

Change memslot_perf_test to not require specifying what exit does a
ucall generate. Also add a missing ucall_init.

Signed-off-by: Ricardo Koller <ricarkol@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210907180957.609966-2-ricarkol@google.com
This commit is contained in:
Ricardo Koller 2021-09-07 11:09:56 -07:00 committed by Marc Zyngier
parent 9e1ff307c7
commit ffb4ce3c49
1 changed files with 34 additions and 22 deletions

View File

@ -127,43 +127,54 @@ static bool verbose;
pr_info(__VA_ARGS__); \
} while (0)
static void check_mmio_access(struct vm_data *vm, struct kvm_run *run)
{
TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
TEST_ASSERT(run->mmio.len == 8,
"Unexpected exit mmio size = %u", run->mmio.len);
TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
run->mmio.phys_addr <= vm->mmio_gpa_max,
"Unexpected exit mmio address = 0x%llx",
run->mmio.phys_addr);
}
static void *vcpu_worker(void *data)
{
struct vm_data *vm = data;
struct kvm_run *run;
struct ucall uc;
uint64_t cmd;
run = vcpu_state(vm->vm, VCPU_ID);
while (1) {
vcpu_run(vm->vm, VCPU_ID);
if (run->exit_reason == KVM_EXIT_IO) {
cmd = get_ucall(vm->vm, VCPU_ID, &uc);
if (cmd != UCALL_SYNC)
break;
switch (get_ucall(vm->vm, VCPU_ID, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0,
"Unexpected sync ucall, got %lx",
(ulong)uc.args[1]);
sem_post(&vcpu_ready);
continue;
}
if (run->exit_reason != KVM_EXIT_MMIO)
case UCALL_NONE:
if (run->exit_reason == KVM_EXIT_MMIO)
check_mmio_access(vm, run);
else
goto done;
break;
TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
TEST_ASSERT(run->mmio.len == 8,
"Unexpected exit mmio size = %u", run->mmio.len);
TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
run->mmio.phys_addr <= vm->mmio_gpa_max,
"Unexpected exit mmio address = 0x%llx",
run->mmio.phys_addr);
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld, val = %lu",
(const char *)uc.args[0],
__FILE__, uc.args[1], uc.args[2]);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
__FILE__, uc.args[1], uc.args[2]);
done:
return NULL;
}
@ -268,6 +279,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
TEST_ASSERT(data->hva_slots, "malloc() fail");
data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
ucall_init(data->vm, NULL);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
max_mem_slots - 1, data->pages_per_slot, rempages);