KVM: x86: Move uret MSR slot management to common x86
Now that SVM and VMX both probe MSRs before "defining" user return slots for them, consolidate the code for probe+define into common x86 and eliminate the odd behavior of having the vendor code define the slot for a given MSR. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210504171734.1434054-14-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9cc39a5a43
commit
e5fda4bbad
|
@ -1777,9 +1777,8 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
|
||||||
unsigned long ipi_bitmap_high, u32 min,
|
unsigned long ipi_bitmap_high, u32 min,
|
||||||
unsigned long icr, int op_64_bit);
|
unsigned long icr, int op_64_bit);
|
||||||
|
|
||||||
void kvm_define_user_return_msr(unsigned index, u32 msr);
|
int kvm_add_user_return_msr(u32 msr);
|
||||||
int kvm_find_user_return_msr(u32 msr);
|
int kvm_find_user_return_msr(u32 msr);
|
||||||
int kvm_probe_user_return_msr(u32 msr);
|
|
||||||
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
|
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
|
||||||
|
|
||||||
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
|
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
|
||||||
|
|
|
@ -959,10 +959,7 @@ static __init int svm_hardware_setup(void)
|
||||||
kvm_tsc_scaling_ratio_frac_bits = 32;
|
kvm_tsc_scaling_ratio_frac_bits = 32;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!kvm_probe_user_return_msr(MSR_TSC_AUX)) {
|
tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
|
||||||
tsc_aux_uret_slot = 0;
|
|
||||||
kvm_define_user_return_msr(tsc_aux_uret_slot, MSR_TSC_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for pause filtering support */
|
/* Check for pause filtering support */
|
||||||
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
|
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
|
||||||
|
|
|
@ -454,9 +454,6 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
|
||||||
|
|
||||||
static unsigned long host_idt_base;
|
static unsigned long host_idt_base;
|
||||||
|
|
||||||
/* Number of user return MSRs that are actually supported in hardware. */
|
|
||||||
static int vmx_nr_uret_msrs;
|
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_HYPERV)
|
#if IS_ENABLED(CONFIG_HYPERV)
|
||||||
static bool __read_mostly enlightened_vmcs = true;
|
static bool __read_mostly enlightened_vmcs = true;
|
||||||
module_param(enlightened_vmcs, bool, 0444);
|
module_param(enlightened_vmcs, bool, 0444);
|
||||||
|
@ -1218,7 +1215,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
if (!vmx->guest_uret_msrs_loaded) {
|
if (!vmx->guest_uret_msrs_loaded) {
|
||||||
vmx->guest_uret_msrs_loaded = true;
|
vmx->guest_uret_msrs_loaded = true;
|
||||||
for (i = 0; i < vmx_nr_uret_msrs; ++i) {
|
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
|
||||||
if (!vmx->guest_uret_msrs[i].load_into_hardware)
|
if (!vmx->guest_uret_msrs[i].load_into_hardware)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -6921,7 +6918,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
||||||
goto free_vpid;
|
goto free_vpid;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < vmx_nr_uret_msrs; ++i) {
|
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
|
||||||
vmx->guest_uret_msrs[i].data = 0;
|
vmx->guest_uret_msrs[i].data = 0;
|
||||||
vmx->guest_uret_msrs[i].mask = -1ull;
|
vmx->guest_uret_msrs[i].mask = -1ull;
|
||||||
}
|
}
|
||||||
|
@ -7810,20 +7807,12 @@ static __init void vmx_setup_user_return_msrs(void)
|
||||||
MSR_EFER, MSR_TSC_AUX, MSR_STAR,
|
MSR_EFER, MSR_TSC_AUX, MSR_STAR,
|
||||||
MSR_IA32_TSX_CTRL,
|
MSR_IA32_TSX_CTRL,
|
||||||
};
|
};
|
||||||
u32 msr;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
|
BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
|
for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
|
||||||
msr = vmx_uret_msrs_list[i];
|
kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
|
||||||
|
|
||||||
if (kvm_probe_user_return_msr(msr))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
kvm_define_user_return_msr(vmx_nr_uret_msrs, msr);
|
|
||||||
vmx_nr_uret_msrs++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int hardware_setup(void)
|
static __init int hardware_setup(void)
|
||||||
|
|
|
@ -336,7 +336,7 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_probe_user_return_msr(u32 msr)
|
static int kvm_probe_user_return_msr(u32 msr)
|
||||||
{
|
{
|
||||||
u64 val;
|
u64 val;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -350,16 +350,18 @@ out:
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
|
|
||||||
|
|
||||||
void kvm_define_user_return_msr(unsigned slot, u32 msr)
|
int kvm_add_user_return_msr(u32 msr)
|
||||||
{
|
{
|
||||||
BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
|
BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
|
||||||
kvm_uret_msrs_list[slot] = msr;
|
|
||||||
if (slot >= kvm_nr_uret_msrs)
|
if (kvm_probe_user_return_msr(msr))
|
||||||
kvm_nr_uret_msrs = slot + 1;
|
return -1;
|
||||||
|
|
||||||
|
kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
|
||||||
|
return kvm_nr_uret_msrs++;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
|
EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
|
||||||
|
|
||||||
int kvm_find_user_return_msr(u32 msr)
|
int kvm_find_user_return_msr(u32 msr)
|
||||||
{
|
{
|
||||||
|
@ -8132,6 +8134,7 @@ int kvm_arch_init(void *opaque)
|
||||||
printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
|
printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
|
||||||
goto out_free_x86_emulator_cache;
|
goto out_free_x86_emulator_cache;
|
||||||
}
|
}
|
||||||
|
kvm_nr_uret_msrs = 0;
|
||||||
|
|
||||||
r = kvm_mmu_module_init();
|
r = kvm_mmu_module_init();
|
||||||
if (r)
|
if (r)
|
||||||
|
|
Loading…
Reference in New Issue