KVM: selftests: Consolidate KVM_{G,S}ET_ONE_REG helpers
Rework vcpu_{g,s}et_reg() to provide the APIs that tests actually want to use, and drop the three "one-off" implementations that cropped up due to the poor API. Ignore the handful of direct KVM_{G,S}ET_ONE_REG calls that don't fit the APIs for one reason or another. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
45f568084a
commit
bfff0f60db
|
@ -242,7 +242,7 @@ static int debug_version(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint64_t id_aa64dfr0;
|
uint64_t id_aa64dfr0;
|
||||||
|
|
||||||
get_reg(vcpu->vm, vcpu->id, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
|
vcpu_get_reg(vcpu->vm, vcpu->id, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
|
||||||
return id_aa64dfr0 & 0xf;
|
return id_aa64dfr0 & 0xf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -458,7 +458,7 @@ static void run_test(struct vcpu_config *c)
|
||||||
bool reject_reg = false;
|
bool reject_reg = false;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, ®);
|
ret = __vcpu_get_reg(vm, 0, reg_list->reg[i], &addr);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printf("%s: Failed to get ", config_name(c));
|
printf("%s: Failed to get ", config_name(c));
|
||||||
print_reg(c, reg.id);
|
print_reg(c, reg.id);
|
||||||
|
|
|
@ -141,26 +141,6 @@ static void guest_code(void)
|
||||||
GUEST_DONE();
|
GUEST_DONE();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int set_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t val)
|
|
||||||
{
|
|
||||||
struct kvm_one_reg reg = {
|
|
||||||
.id = id,
|
|
||||||
.addr = (uint64_t)&val,
|
|
||||||
};
|
|
||||||
|
|
||||||
return __vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, ®);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void get_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t *addr)
|
|
||||||
{
|
|
||||||
struct kvm_one_reg reg = {
|
|
||||||
.id = id,
|
|
||||||
.addr = (uint64_t)addr,
|
|
||||||
};
|
|
||||||
|
|
||||||
vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, ®);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct st_time {
|
struct st_time {
|
||||||
uint32_t rev;
|
uint32_t rev;
|
||||||
uint32_t attr;
|
uint32_t attr;
|
||||||
|
@ -196,18 +176,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vm *vm)
|
||||||
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
|
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
|
||||||
|
|
||||||
/* First 'read' should be an upper limit of the features supported */
|
/* First 'read' should be an upper limit of the features supported */
|
||||||
get_fw_reg(vm, reg_info->reg, &val);
|
vcpu_get_reg(vm, 0, reg_info->reg, &val);
|
||||||
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
|
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
|
||||||
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
|
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
|
||||||
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
|
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
|
||||||
|
|
||||||
/* Test a 'write' by disabling all the features of the register map */
|
/* Test a 'write' by disabling all the features of the register map */
|
||||||
ret = set_fw_reg(vm, reg_info->reg, 0);
|
ret = __vcpu_set_reg(vm, 0, reg_info->reg, 0);
|
||||||
TEST_ASSERT(ret == 0,
|
TEST_ASSERT(ret == 0,
|
||||||
"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
|
"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
|
||||||
reg_info->reg, errno);
|
reg_info->reg, errno);
|
||||||
|
|
||||||
get_fw_reg(vm, reg_info->reg, &val);
|
vcpu_get_reg(vm, 0, reg_info->reg, &val);
|
||||||
TEST_ASSERT(val == 0,
|
TEST_ASSERT(val == 0,
|
||||||
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
|
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
|
||||||
|
|
||||||
|
@ -216,7 +196,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vm *vm)
|
||||||
* Avoid this check if all the bits are occupied.
|
* Avoid this check if all the bits are occupied.
|
||||||
*/
|
*/
|
||||||
if (reg_info->max_feat_bit < 63) {
|
if (reg_info->max_feat_bit < 63) {
|
||||||
ret = set_fw_reg(vm, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
|
ret = __vcpu_set_reg(vm, 0, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
|
||||||
TEST_ASSERT(ret != 0 && errno == EINVAL,
|
TEST_ASSERT(ret != 0 && errno == EINVAL,
|
||||||
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
|
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
|
||||||
errno, reg_info->reg);
|
errno, reg_info->reg);
|
||||||
|
@ -237,7 +217,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vm *vm)
|
||||||
* Before starting the VM, the test clears all the bits.
|
* Before starting the VM, the test clears all the bits.
|
||||||
* Check if that's still the case.
|
* Check if that's still the case.
|
||||||
*/
|
*/
|
||||||
get_fw_reg(vm, reg_info->reg, &val);
|
vcpu_get_reg(vm, 0, reg_info->reg, &val);
|
||||||
TEST_ASSERT(val == 0,
|
TEST_ASSERT(val == 0,
|
||||||
"Expected all the features to be cleared for reg: 0x%lx\n",
|
"Expected all the features to be cleared for reg: 0x%lx\n",
|
||||||
reg_info->reg);
|
reg_info->reg);
|
||||||
|
@ -247,7 +227,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vm *vm)
|
||||||
* the registers and should return EBUSY. Set the registers and check for
|
* the registers and should return EBUSY. Set the registers and check for
|
||||||
* the expected errno.
|
* the expected errno.
|
||||||
*/
|
*/
|
||||||
ret = set_fw_reg(vm, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
|
ret = __vcpu_set_reg(vm, 0, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
|
||||||
TEST_ASSERT(ret != 0 && errno == EBUSY,
|
TEST_ASSERT(ret != 0 && errno == EBUSY,
|
||||||
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
|
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
|
||||||
errno, reg_info->reg);
|
errno, reg_info->reg);
|
||||||
|
|
|
@ -102,8 +102,8 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint64_t obs_pc, obs_x0;
|
uint64_t obs_pc, obs_x0;
|
||||||
|
|
||||||
get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.pc), &obs_pc);
|
vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.pc), &obs_pc);
|
||||||
get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
|
vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
|
||||||
|
|
||||||
TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
|
TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
|
||||||
"unexpected target cpu pc: %lx (expected: %lx)",
|
"unexpected target cpu pc: %lx (expected: %lx)",
|
||||||
|
@ -143,7 +143,7 @@ static void host_test_cpu_on(void)
|
||||||
*/
|
*/
|
||||||
vcpu_power_off(target);
|
vcpu_power_off(target);
|
||||||
|
|
||||||
get_reg(vm, target->id, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
|
vcpu_get_reg(vm, target->id, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
|
||||||
vcpu_args_set(vm, source->id, 1, target_mpidr & MPIDR_HWID_BITMASK);
|
vcpu_args_set(vm, source->id, 1, target_mpidr & MPIDR_HWID_BITMASK);
|
||||||
enter_guest(source);
|
enter_guest(source);
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
/*
|
/*
|
||||||
* KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert
|
* KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert
|
||||||
* SYS_* register definitions in asm/sysreg.h to use in KVM
|
* SYS_* register definitions in asm/sysreg.h to use in KVM
|
||||||
* calls such as get_reg() and set_reg().
|
* calls such as vcpu_get_reg() and vcpu_set_reg().
|
||||||
*/
|
*/
|
||||||
#define KVM_ARM64_SYS_REG(sys_reg_id) \
|
#define KVM_ARM64_SYS_REG(sys_reg_id) \
|
||||||
ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \
|
ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \
|
||||||
|
@ -47,22 +47,6 @@
|
||||||
|
|
||||||
#define MPIDR_HWID_BITMASK (0xff00fffffful)
|
#define MPIDR_HWID_BITMASK (0xff00fffffful)
|
||||||
|
|
||||||
static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr)
|
|
||||||
{
|
|
||||||
struct kvm_one_reg reg;
|
|
||||||
reg.id = id;
|
|
||||||
reg.addr = (uint64_t)addr;
|
|
||||||
vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, ®);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val)
|
|
||||||
{
|
|
||||||
struct kvm_one_reg reg;
|
|
||||||
reg.id = id;
|
|
||||||
reg.addr = (uint64_t)&val;
|
|
||||||
vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®);
|
|
||||||
}
|
|
||||||
|
|
||||||
void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
|
void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
|
||||||
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||||
struct kvm_vcpu_init *init, void *guest_code);
|
struct kvm_vcpu_init *init, void *guest_code);
|
||||||
|
|
|
@ -374,16 +374,36 @@ static inline void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||||
{
|
{
|
||||||
vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
|
vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
|
||||||
}
|
}
|
||||||
static inline void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid,
|
|
||||||
struct kvm_one_reg *reg)
|
static inline int __vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid,
|
||||||
|
uint64_t reg_id, void *addr)
|
||||||
{
|
{
|
||||||
vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg);
|
struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr };
|
||||||
|
|
||||||
|
return __vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, ®);
|
||||||
|
}
|
||||||
|
static inline int __vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid,
|
||||||
|
uint64_t reg_id, uint64_t val)
|
||||||
|
{
|
||||||
|
struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val };
|
||||||
|
|
||||||
|
return __vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®);
|
||||||
|
}
|
||||||
|
static inline void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid,
|
||||||
|
uint64_t reg_id, void *addr)
|
||||||
|
{
|
||||||
|
struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr };
|
||||||
|
|
||||||
|
vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, ®);
|
||||||
}
|
}
|
||||||
static inline void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid,
|
static inline void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid,
|
||||||
struct kvm_one_reg *reg)
|
uint64_t reg_id, uint64_t val)
|
||||||
{
|
{
|
||||||
vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg);
|
struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val };
|
||||||
|
|
||||||
|
vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __KVM_HAVE_VCPU_EVENTS
|
#ifdef __KVM_HAVE_VCPU_EVENTS
|
||||||
static inline void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
|
static inline void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
|
||||||
struct kvm_vcpu_events *events)
|
struct kvm_vcpu_events *events)
|
||||||
|
|
|
@ -38,26 +38,6 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
|
||||||
KVM_REG_RISCV_TIMER_REG(name), \
|
KVM_REG_RISCV_TIMER_REG(name), \
|
||||||
KVM_REG_SIZE_U64)
|
KVM_REG_SIZE_U64)
|
||||||
|
|
||||||
static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
|
|
||||||
unsigned long *addr)
|
|
||||||
{
|
|
||||||
struct kvm_one_reg reg;
|
|
||||||
|
|
||||||
reg.id = id;
|
|
||||||
reg.addr = (unsigned long)addr;
|
|
||||||
vcpu_get_reg(vm, vcpuid, ®);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
|
|
||||||
unsigned long val)
|
|
||||||
{
|
|
||||||
struct kvm_one_reg reg;
|
|
||||||
|
|
||||||
reg.id = id;
|
|
||||||
reg.addr = (unsigned long)&val;
|
|
||||||
vcpu_set_reg(vm, vcpuid, ®);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* L3 index Bit[47:39] */
|
/* L3 index Bit[47:39] */
|
||||||
#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
|
#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
|
||||||
#define PGTBL_L3_INDEX_SHIFT 39
|
#define PGTBL_L3_INDEX_SHIFT 39
|
||||||
|
|
|
@ -232,10 +232,10 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
|
||||||
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
|
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
|
||||||
* registers, which the variable argument list macros do.
|
* registers, which the variable argument list macros do.
|
||||||
*/
|
*/
|
||||||
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
|
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
|
||||||
|
|
||||||
get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
|
vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
|
||||||
get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
|
vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
|
||||||
|
|
||||||
/* Configure base granule size */
|
/* Configure base granule size */
|
||||||
switch (vm->mode) {
|
switch (vm->mode) {
|
||||||
|
@ -296,19 +296,19 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
|
||||||
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
|
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
|
||||||
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
|
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
|
||||||
|
|
||||||
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
|
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
|
||||||
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
|
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
|
||||||
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
|
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
|
||||||
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
|
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
|
||||||
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid);
|
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
||||||
{
|
{
|
||||||
uint64_t pstate, pc;
|
uint64_t pstate, pc;
|
||||||
|
|
||||||
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
|
vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
|
||||||
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
|
vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
|
||||||
|
|
||||||
fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
|
fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
|
||||||
indent, "", pstate, pc);
|
indent, "", pstate, pc);
|
||||||
|
@ -326,8 +326,8 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||||
|
|
||||||
aarch64_vcpu_setup(vm, vcpu_id, init);
|
aarch64_vcpu_setup(vm, vcpu_id, init);
|
||||||
|
|
||||||
set_reg(vm, vcpu_id, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
|
vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
|
||||||
set_reg(vm, vcpu_id, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
||||||
|
|
||||||
return vcpu;
|
return vcpu;
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
|
||||||
va_start(ap, num);
|
va_start(ap, num);
|
||||||
|
|
||||||
for (i = 0; i < num; i++) {
|
for (i = 0; i < num; i++) {
|
||||||
set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
|
vcpu_set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
|
||||||
va_arg(ap, uint64_t));
|
va_arg(ap, uint64_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,7 +389,7 @@ void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
|
||||||
{
|
{
|
||||||
extern char vectors;
|
extern char vectors;
|
||||||
|
|
||||||
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
|
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
|
||||||
}
|
}
|
||||||
|
|
||||||
void route_exception(struct ex_regs *regs, int vector)
|
void route_exception(struct ex_regs *regs, int vector)
|
||||||
|
|
|
@ -198,46 +198,46 @@ void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
|
||||||
satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
|
satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
|
||||||
satp |= SATP_MODE_48;
|
satp |= SATP_MODE_48;
|
||||||
|
|
||||||
set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp);
|
vcpu_set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
||||||
{
|
{
|
||||||
struct kvm_riscv_core core;
|
struct kvm_riscv_core core;
|
||||||
|
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5);
|
||||||
get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6);
|
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6);
|
||||||
|
|
||||||
fprintf(stream,
|
fprintf(stream,
|
||||||
" MODE: 0x%lx\n", core.mode);
|
" MODE: 0x%lx\n", core.mode);
|
||||||
|
@ -302,17 +302,17 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||||
/* Setup global pointer of guest to be same as the host */
|
/* Setup global pointer of guest to be same as the host */
|
||||||
asm volatile (
|
asm volatile (
|
||||||
"add %0, gp, zero" : "=r" (current_gp) : : "memory");
|
"add %0, gp, zero" : "=r" (current_gp) : : "memory");
|
||||||
set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.gp), current_gp);
|
vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.gp), current_gp);
|
||||||
|
|
||||||
/* Setup stack pointer and program counter of guest */
|
/* Setup stack pointer and program counter of guest */
|
||||||
set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.sp),
|
vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.sp),
|
||||||
stack_vaddr + stack_size);
|
stack_vaddr + stack_size);
|
||||||
set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.pc),
|
vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.pc),
|
||||||
(unsigned long)guest_code);
|
(unsigned long)guest_code);
|
||||||
|
|
||||||
/* Setup default exception vector of guest */
|
/* Setup default exception vector of guest */
|
||||||
set_reg(vm, vcpu_id, RISCV_CSR_REG(stvec),
|
vcpu_set_reg(vm, vcpu_id, RISCV_CSR_REG(stvec),
|
||||||
(unsigned long)guest_unexp_trap);
|
(unsigned long)guest_unexp_trap);
|
||||||
|
|
||||||
return vcpu;
|
return vcpu;
|
||||||
}
|
}
|
||||||
|
@ -355,7 +355,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
|
||||||
id = RISCV_CORE_REG(regs.a7);
|
id = RISCV_CORE_REG(regs.a7);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
set_reg(vm, vcpuid, id, va_arg(ap, uint64_t));
|
vcpu_set_reg(vm, vcpuid, id, va_arg(ap, uint64_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
|
|
|
@ -61,12 +61,9 @@ static void guest_code_initial(void)
|
||||||
|
|
||||||
static void test_one_reg(uint64_t id, uint64_t value)
|
static void test_one_reg(uint64_t id, uint64_t value)
|
||||||
{
|
{
|
||||||
struct kvm_one_reg reg;
|
|
||||||
uint64_t eval_reg;
|
uint64_t eval_reg;
|
||||||
|
|
||||||
reg.addr = (uintptr_t)&eval_reg;
|
vcpu_get_reg(vm, VCPU_ID, id, &eval_reg);
|
||||||
reg.id = id;
|
|
||||||
vcpu_get_reg(vm, VCPU_ID, ®);
|
|
||||||
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
|
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue