* ARM/ARM64 locking fixes
* x86 fixes: PCID, UMIP, locking * Improved support for recent Windows version that have a 2048 Hz APIC timer. * Rename KVM_HINTS_DEDICATED CPUID bit to KVM_HINTS_REALTIME * Better behaved selftests. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJa/bkTAAoJEL/70l94x66Dzf8IAJ1GqtXi0CNbq8MvU4QIqw0L HLIRoe/QgkTeTUa2fwirEuu5I+/wUyPvy5sAIsn/F5eiZM7nciLm+fYzw6F2uPIm lSCqKpVwmh8dPl1SBaqPnTcB1HPVwcCgc2SF9Ph7yZCUwFUtoeUuPj8v6Qy6y21g jfobHFZa3MrFgi7kPxOXSrC1qxuNJL9yLB5mwCvCK/K7jj2nrGJkLLDuzgReCqvz isOdpof3hz8whXDQG5cTtybBgE9veym4YqJY8R5ANXBKqbFlhaNF1T3xXrdPMISZ 7bsGgkhYEOqeQsPrFwzAIiFxe2DogFwkn1BcvJ1B+duXrayt5CBnDPRB6Yxg00M= =H0d0 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: - ARM/ARM64 locking fixes - x86 fixes: PCID, UMIP, locking - improved support for recent Windows version that have a 2048 Hz APIC timer - rename KVM_HINTS_DEDICATED CPUID bit to KVM_HINTS_REALTIME - better behaved selftests * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm: rename KVM_HINTS_DEDICATED to KVM_HINTS_REALTIME KVM: arm/arm64: VGIC/ITS save/restore: protect kvm_read_guest() calls KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock KVM: arm/arm64: VGIC/ITS: Promote irq_lock() in update_affinity KVM: arm/arm64: Properly protect VGIC locks from IRQs KVM: X86: Lower the default timer frequency limit to 200us KVM: vmx: update sec exec controls for UMIP iff emulating UMIP kvm: x86: Suppress CR3_PCID_INVD bit only when PCIDs are enabled KVM: selftests: exit with 0 status code when tests cannot be run KVM: hyperv: idr_find needs RCU protection x86: Delay skip of emulated hypercall instruction KVM: Extend MAX_IRQ_ROUTES to 4096 for all archs
This commit is contained in:
commit
58ddfe6c3a
|
@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
|
|||
|
||||
flag || value || meaning
|
||||
==================================================================================
|
||||
KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to
|
||||
|| || determine if there is vCPU pinning
|
||||
|| || and there is no vCPU over-commitment,
|
||||
KVM_HINTS_REALTIME || 0 || guest checks this feature bit to
|
||||
|| || determine that vCPUs are never
|
||||
|| || preempted for an unlimited time,
|
||||
|| || allowing optimizations
|
||||
----------------------------------------------------------------------------------
|
||||
|
|
|
@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
|||
return 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* We are not in the kvm->srcu critical section most of the time, so we take
|
||||
* the SRCU read lock here. Since we copy the data from the user page, we
|
||||
* can immediately drop the lock again.
|
||||
*/
|
||||
static inline int kvm_read_guest_lock(struct kvm *kvm,
|
||||
gpa_t gpa, void *data, unsigned long len)
|
||||
{
|
||||
int srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
int ret = kvm_read_guest(kvm, gpa, data, len);
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
return kvm_ksym_ref(__kvm_hyp_vector);
|
||||
|
|
|
@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
|||
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* We are not in the kvm->srcu critical section most of the time, so we take
|
||||
* the SRCU read lock here. Since we copy the data from the user page, we
|
||||
* can immediately drop the lock again.
|
||||
*/
|
||||
static inline int kvm_read_guest_lock(struct kvm *kvm,
|
||||
gpa_t gpa, void *data, unsigned long len)
|
||||
{
|
||||
int srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
int ret = kvm_read_guest(kvm, gpa, data, len);
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||
/*
|
||||
* EL2 vectors can be mapped and rerouted in a number of ways,
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#define KVM_FEATURE_PV_TLB_FLUSH 9
|
||||
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
|
||||
|
||||
#define KVM_HINTS_DEDICATED 0
|
||||
#define KVM_HINTS_REALTIME 0
|
||||
|
||||
/* The last 8 bits are used to indicate how to interpret the flags field
|
||||
* in pvclock structure. If no bits are set, all flags are ignored.
|
||||
|
|
|
@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void)
|
|||
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
native_smp_prepare_cpus(max_cpus);
|
||||
if (kvm_para_has_hint(KVM_HINTS_DEDICATED))
|
||||
if (kvm_para_has_hint(KVM_HINTS_REALTIME))
|
||||
static_branch_disable(&virt_spin_lock_key);
|
||||
}
|
||||
|
||||
|
@ -553,7 +553,7 @@ static void __init kvm_guest_init(void)
|
|||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
|
||||
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
|
||||
|
||||
|
@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
|
|||
int cpu;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
|
||||
|
@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void)
|
|||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return;
|
||||
|
||||
if (kvm_para_has_hint(KVM_HINTS_DEDICATED))
|
||||
if (kvm_para_has_hint(KVM_HINTS_REALTIME))
|
||||
return;
|
||||
|
||||
__pv_init_lock_hash();
|
||||
|
|
|
@ -1265,7 +1265,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
|
|||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
|
||||
return 1;
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
|
||||
|
@ -1296,8 +1296,10 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
|
|||
if (param & ~KVM_HYPERV_CONN_ID_MASK)
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
|
||||
/* conn_to_evt is protected by vcpu->kvm->srcu */
|
||||
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
|
||||
rcu_read_lock();
|
||||
eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
|
||||
rcu_read_unlock();
|
||||
if (!eventfd)
|
||||
return HV_STATUS_INVALID_PORT_ID;
|
||||
|
||||
|
|
|
@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
|
|||
SECONDARY_EXEC_ENABLE_VMFUNC;
|
||||
}
|
||||
|
||||
static bool vmx_umip_emulated(void)
|
||||
{
|
||||
return vmcs_config.cpu_based_2nd_exec_ctrl &
|
||||
SECONDARY_EXEC_DESC;
|
||||
}
|
||||
|
||||
static inline bool report_flexpriority(void)
|
||||
{
|
||||
return flexpriority_enabled;
|
||||
|
@ -4761,14 +4767,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||
else
|
||||
hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
|
||||
|
||||
if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) {
|
||||
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
|
||||
SECONDARY_EXEC_DESC);
|
||||
hw_cr4 &= ~X86_CR4_UMIP;
|
||||
} else if (!is_guest_mode(vcpu) ||
|
||||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
|
||||
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
|
||||
if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
|
||||
if (cr4 & X86_CR4_UMIP) {
|
||||
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
|
||||
SECONDARY_EXEC_DESC);
|
||||
hw_cr4 &= ~X86_CR4_UMIP;
|
||||
} else if (!is_guest_mode(vcpu) ||
|
||||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
|
||||
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
|
||||
SECONDARY_EXEC_DESC);
|
||||
}
|
||||
|
||||
if (cr4 & X86_CR4_VMXE) {
|
||||
/*
|
||||
|
@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void)
|
|||
SECONDARY_EXEC_XSAVES;
|
||||
}
|
||||
|
||||
static bool vmx_umip_emulated(void)
|
||||
{
|
||||
return vmcs_config.cpu_based_2nd_exec_ctrl &
|
||||
SECONDARY_EXEC_DESC;
|
||||
}
|
||||
|
||||
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
|
||||
{
|
||||
u32 exit_intr_info;
|
||||
|
|
|
@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
|
|||
static bool __read_mostly report_ignored_msrs = true;
|
||||
module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
|
||||
|
||||
unsigned int min_timer_period_us = 500;
|
||||
unsigned int min_timer_period_us = 200;
|
||||
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
|
||||
|
||||
static bool __read_mostly kvmclock_periodic_sync = true;
|
||||
|
@ -843,7 +843,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
|
|||
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
cr3 &= ~CR3_PCID_INVD;
|
||||
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
|
||||
|
||||
if (pcid_enabled)
|
||||
cr3 &= ~CR3_PCID_INVD;
|
||||
#endif
|
||||
|
||||
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
|
||||
|
@ -6671,12 +6674,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
|
|||
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long nr, a0, a1, a2, a3, ret;
|
||||
int op_64_bit, r;
|
||||
int op_64_bit;
|
||||
|
||||
r = kvm_skip_emulated_instruction(vcpu);
|
||||
|
||||
if (kvm_hv_hypercall_enabled(vcpu->kvm))
|
||||
return kvm_hv_hypercall(vcpu);
|
||||
if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
|
||||
if (!kvm_hv_hypercall(vcpu))
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
||||
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
|
||||
|
@ -6697,7 +6701,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
|
||||
ret = -KVM_EPERM;
|
||||
goto out;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
switch (nr) {
|
||||
|
@ -6717,12 +6721,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|||
ret = -KVM_ENOSYS;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
out_error:
|
||||
if (!op_64_bit)
|
||||
ret = (u32)ret;
|
||||
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
|
||||
|
||||
out:
|
||||
++vcpu->stat.hypercalls;
|
||||
return r;
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
|
||||
|
||||
|
|
|
@ -1045,13 +1045,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
|
|||
|
||||
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
|
||||
|
||||
#ifdef CONFIG_S390
|
||||
#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
|
||||
#elif defined(CONFIG_ARM64)
|
||||
#define KVM_MAX_IRQ_ROUTES 4096
|
||||
#else
|
||||
#define KVM_MAX_IRQ_ROUTES 1024
|
||||
#endif
|
||||
#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
|
||||
|
||||
bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
|
||||
int kvm_set_irq_routing(struct kvm *kvm,
|
||||
|
|
|
@ -15,7 +15,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
|
|||
|
||||
INSTALL_HDR_PATH = $(top_srcdir)/usr
|
||||
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
|
||||
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D)
|
||||
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
|
||||
|
||||
# After inclusion, $(OUTPUT) is defined and
|
||||
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include "kselftest.h"
|
||||
|
||||
ssize_t test_write(int fd, const void *buf, size_t count);
|
||||
ssize_t test_read(int fd, void *buf, size_t count);
|
||||
|
|
|
@ -50,8 +50,8 @@ int kvm_check_cap(long cap)
|
|||
int kvm_fd;
|
||||
|
||||
kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
|
||||
TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i",
|
||||
KVM_DEV_PATH, kvm_fd, errno);
|
||||
if (kvm_fd < 0)
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
|
||||
TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
|
||||
|
@ -91,8 +91,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
|
|||
|
||||
vm->mode = mode;
|
||||
kvm_fd = open(KVM_DEV_PATH, perm);
|
||||
TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i",
|
||||
KVM_DEV_PATH, kvm_fd, errno);
|
||||
if (kvm_fd < 0)
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
/* Create VM. */
|
||||
vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL);
|
||||
|
@ -418,8 +418,8 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
|
|||
|
||||
cpuid = allocate_kvm_cpuid2();
|
||||
kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
|
||||
TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i",
|
||||
KVM_DEV_PATH, kvm_fd, errno);
|
||||
if (kvm_fd < 0)
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
|
||||
TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
|
||||
|
@ -675,8 +675,8 @@ static int vcpu_mmap_sz(void)
|
|||
int dev_fd, ret;
|
||||
|
||||
dev_fd = open(KVM_DEV_PATH, O_RDONLY);
|
||||
TEST_ASSERT(dev_fd >= 0, "%s open %s failed, rc: %i errno: %i",
|
||||
__func__, KVM_DEV_PATH, dev_fd, errno);
|
||||
if (dev_fd < 0)
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
|
||||
TEST_ASSERT(ret >= sizeof(struct kvm_run),
|
||||
|
|
|
@ -85,6 +85,9 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left,
|
|||
{
|
||||
}
|
||||
|
||||
#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
|
||||
#define INVALID_SYNC_FIELD 0x80000000
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
|
@ -98,9 +101,14 @@ int main(int argc, char *argv[])
|
|||
setbuf(stdout, NULL);
|
||||
|
||||
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
|
||||
TEST_ASSERT((unsigned long)cap == KVM_SYNC_X86_VALID_FIELDS,
|
||||
"KVM_CAP_SYNC_REGS (0x%x) != KVM_SYNC_X86_VALID_FIELDS (0x%lx)\n",
|
||||
cap, KVM_SYNC_X86_VALID_FIELDS);
|
||||
if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
|
||||
fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
if ((cap & INVALID_SYNC_FIELD) != 0) {
|
||||
fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_default(VCPU_ID, guest_code);
|
||||
|
@ -108,7 +116,14 @@ int main(int argc, char *argv[])
|
|||
run = vcpu_state(vm, VCPU_ID);
|
||||
|
||||
/* Request reading invalid register set from VCPU. */
|
||||
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS << 1;
|
||||
run->kvm_valid_regs = INVALID_SYNC_FIELD;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(rv < 0 && errno == EINVAL,
|
||||
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
|
||||
rv);
|
||||
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
|
||||
|
||||
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(rv < 0 && errno == EINVAL,
|
||||
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
|
||||
|
@ -116,7 +131,14 @@ int main(int argc, char *argv[])
|
|||
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
|
||||
|
||||
/* Request setting invalid register set into VCPU. */
|
||||
run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS << 1;
|
||||
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(rv < 0 && errno == EINVAL,
|
||||
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
|
||||
rv);
|
||||
vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
|
||||
|
||||
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(rv < 0 && errno == EINVAL,
|
||||
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
|
||||
|
@ -125,7 +147,7 @@ int main(int argc, char *argv[])
|
|||
|
||||
/* Request and verify all valid register sets. */
|
||||
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
|
||||
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS;
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
||||
"Unexpected exit reason: %u (%s),\n",
|
||||
|
@ -146,7 +168,7 @@ int main(int argc, char *argv[])
|
|||
run->s.regs.sregs.apic_base = 1 << 11;
|
||||
/* TODO run->s.regs.events.XYZ = ABC; */
|
||||
|
||||
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS;
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
||||
|
@ -172,7 +194,7 @@ int main(int argc, char *argv[])
|
|||
/* Clear kvm_dirty_regs bits, verify new s.regs values are
|
||||
* overwritten with existing guest values.
|
||||
*/
|
||||
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS;
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
run->kvm_dirty_regs = 0;
|
||||
run->s.regs.regs.r11 = 0xDEADBEEF;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
|
@ -211,7 +233,7 @@ int main(int argc, char *argv[])
|
|||
* with kvm_sync_regs values.
|
||||
*/
|
||||
run->kvm_valid_regs = 0;
|
||||
run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS;
|
||||
run->kvm_dirty_regs = TEST_SYNC_FIELDS;
|
||||
run->s.regs.regs.r11 = 0xBBBB;
|
||||
rv = _vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
||||
|
|
|
@ -189,8 +189,8 @@ int main(int argc, char *argv[])
|
|||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
||||
|
||||
if (!(entry->ecx & CPUID_VMX)) {
|
||||
printf("nested VMX not enabled, skipping test");
|
||||
return 0;
|
||||
fprintf(stderr, "nested VMX not enabled, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
|
||||
|
|
|
@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
|
|||
struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
|
||||
struct vgic_irq *irq;
|
||||
struct kvm_vcpu *vcpu = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
if (iter->dist_id == 0) {
|
||||
print_dist_state(s, &kvm->arch.vgic);
|
||||
|
@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
|
|||
irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
|
||||
}
|
||||
|
||||
spin_lock(&irq->irq_lock);
|
||||
spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
print_irq_state(s, irq, vcpu);
|
||||
spin_unlock(&irq->irq_lock);
|
||||
spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* In this case there is no put, since we keep the reference. */
|
||||
|
@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|||
irq->intid = intid;
|
||||
irq->target_vcpu = vcpu;
|
||||
|
||||
spin_lock(&dist->lpi_list_lock);
|
||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
|
||||
/*
|
||||
* There could be a race with another vgic_add_lpi(), so we need to
|
||||
|
@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|||
dist->lpi_list_count++;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&dist->lpi_list_lock);
|
||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
/*
|
||||
* We "cache" the configuration table entries in our struct vgic_irq's.
|
||||
|
@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
|
|||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
||||
&prop, 1);
|
||||
ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
||||
&prop, 1);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
struct vgic_irq *irq;
|
||||
unsigned long flags;
|
||||
u32 *intids;
|
||||
int irq_count, i = 0;
|
||||
|
||||
|
@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|||
if (!intids)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&dist->lpi_list_lock);
|
||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
||||
if (i == irq_count)
|
||||
break;
|
||||
|
@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|||
continue;
|
||||
intids[i++] = irq->intid;
|
||||
}
|
||||
spin_unlock(&dist->lpi_list_lock);
|
||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
*intid_ptr = intids;
|
||||
return i;
|
||||
|
@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|||
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&irq->irq_lock);
|
||||
spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->target_vcpu = vcpu;
|
||||
spin_unlock(&irq->irq_lock);
|
||||
spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
|
||||
if (irq->hw) {
|
||||
struct its_vlpi_map map;
|
||||
|
@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
|
|||
* this very same byte in the last iteration. Reuse that.
|
||||
*/
|
||||
if (byte_offset != last_byte_offset) {
|
||||
ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
|
||||
&pendmask, 1);
|
||||
ret = kvm_read_guest_lock(vcpu->kvm,
|
||||
pendbase + byte_offset,
|
||||
&pendmask, 1);
|
||||
if (ret) {
|
||||
kfree(intids);
|
||||
return ret;
|
||||
|
@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
|
|||
return false;
|
||||
|
||||
/* Each 1st level entry is represented by a 64-bit value. */
|
||||
if (kvm_read_guest(its->dev->kvm,
|
||||
if (kvm_read_guest_lock(its->dev->kvm,
|
||||
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
|
||||
&indirect_ptr, sizeof(indirect_ptr)))
|
||||
return false;
|
||||
|
@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
|
|||
cbaser = CBASER_ADDRESS(its->cbaser);
|
||||
|
||||
while (its->cwriter != its->creadr) {
|
||||
int ret = kvm_read_guest(kvm, cbaser + its->creadr,
|
||||
cmd_buf, ITS_CMD_SIZE);
|
||||
int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
|
||||
cmd_buf, ITS_CMD_SIZE);
|
||||
/*
|
||||
* If kvm_read_guest() fails, this could be due to the guest
|
||||
* programming a bogus value in CBASER or something else going
|
||||
|
@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
|
|||
int next_offset;
|
||||
size_t byte_offset;
|
||||
|
||||
ret = kvm_read_guest(kvm, gpa, entry, esz);
|
||||
ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
|
|||
int ret;
|
||||
|
||||
BUG_ON(esz > sizeof(val));
|
||||
ret = kvm_read_guest(kvm, gpa, &val, esz);
|
||||
ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
|
||||
if (ret)
|
||||
return ret;
|
||||
val = le64_to_cpu(val);
|
||||
|
|
|
@ -344,7 +344,7 @@ retry:
|
|||
bit_nr = irq->intid % BITS_PER_BYTE;
|
||||
ptr = pendbase + byte_offset;
|
||||
|
||||
ret = kvm_read_guest(kvm, ptr, &val, 1);
|
||||
ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -397,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
|
|||
ptr = pendbase + byte_offset;
|
||||
|
||||
if (byte_offset != last_byte_offset) {
|
||||
ret = kvm_read_guest(kvm, ptr, &val, 1);
|
||||
ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
last_byte_offset = byte_offset;
|
||||
|
|
|
@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
|||
* kvm->lock (mutex)
|
||||
* its->cmd_lock (mutex)
|
||||
* its->its_lock (mutex)
|
||||
* vgic_cpu->ap_list_lock
|
||||
* kvm->lpi_list_lock
|
||||
* vgic_irq->irq_lock
|
||||
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
|
||||
* kvm->lpi_list_lock must be taken with IRQs disabled
|
||||
* vgic_irq->irq_lock must be taken with IRQs disabled
|
||||
*
|
||||
* As the ap_list_lock might be taken from the timer interrupt handler,
|
||||
* we have to disable IRQs before taking this lock and everything lower
|
||||
* than it.
|
||||
*
|
||||
* If you need to take multiple locks, always take the upper lock first,
|
||||
* then the lower ones, e.g. first take the its_lock, then the irq_lock.
|
||||
|
@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
|||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct vgic_irq *irq = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&dist->lpi_list_lock);
|
||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
|
||||
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
||||
if (irq->intid != intid)
|
||||
|
@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
|||
irq = NULL;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&dist->lpi_list_lock);
|
||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref)
|
|||
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
unsigned long flags;
|
||||
|
||||
if (irq->intid < VGIC_MIN_LPI)
|
||||
return;
|
||||
|
||||
spin_lock(&dist->lpi_list_lock);
|
||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
if (!kref_put(&irq->refcount, vgic_irq_release)) {
|
||||
spin_unlock(&dist->lpi_list_lock);
|
||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
return;
|
||||
};
|
||||
|
||||
list_del(&irq->lpi_list);
|
||||
dist->lpi_list_count--;
|
||||
spin_unlock(&dist->lpi_list_lock);
|
||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
kfree(irq);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue