KVM: x86: Move CPU compat checks hook to kvm_x86_ops (from kvm_x86_init_ops)

Move the .check_processor_compatibility() callback from kvm_x86_init_ops
to kvm_x86_ops to allow a future patch to do compatibility checks during
CPU hotplug.

Do kvm_ops_update() before compat checks so that static_call() can be
used during compat checks.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Message-Id: <20221130230934.1014142-40-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2022-11-30 23:09:23 +00:00 committed by Paolo Bonzini
parent 325fc9579c
commit d83420c2d7
7 changed files with 27 additions and 33 deletions

View File

@ -14,6 +14,7 @@ BUILD_BUG_ON(1)
* to make a definition optional, but in this case the default will
* be __static_call_return0.
*/
KVM_X86_OP(check_processor_compatibility)
KVM_X86_OP(hardware_enable)
KVM_X86_OP(hardware_disable)
KVM_X86_OP(hardware_unsetup)

View File

@ -1511,6 +1511,8 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
struct kvm_x86_ops {
const char *name;
int (*check_processor_compatibility)(void);
int (*hardware_enable)(void);
void (*hardware_disable)(void);
void (*hardware_unsetup)(void);
@ -1722,7 +1724,6 @@ struct kvm_x86_nested_ops {
};
struct kvm_x86_init_ops {
int (*check_processor_compatibility)(void);
int (*hardware_setup)(void);
unsigned int (*handle_intel_pt_intr)(void);

View File

@ -543,7 +543,7 @@ static bool kvm_is_svm_supported(void)
return true;
}
static int __init svm_check_processor_compat(void)
static int svm_check_processor_compat(void)
{
if (!kvm_is_svm_supported())
return -EIO;
@ -4701,6 +4701,8 @@ static int svm_vm_init(struct kvm *kvm)
static struct kvm_x86_ops svm_x86_ops __initdata = {
.name = KBUILD_MODNAME,
.check_processor_compatibility = svm_check_processor_compat,
.hardware_unsetup = svm_hardware_unsetup,
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
@ -5085,7 +5087,6 @@ err:
static struct kvm_x86_init_ops svm_init_ops __initdata = {
.hardware_setup = svm_hardware_setup,
.check_processor_compatibility = svm_check_processor_compat,
.runtime_ops = &svm_x86_ops,
.pmu_ops = &amd_pmu_ops,

View File

@ -526,7 +526,7 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
} \
while (0)
__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
{
evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);

View File

@ -271,7 +271,7 @@ static inline void evmcs_load(u64 phys_addr)
vp_ap->enlighten_vmentry = 1;
}
__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
#else /* !IS_ENABLED(CONFIG_HYPERV) */
static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
static inline void evmcs_write32(unsigned long field, u32 value) {}

View File

@ -2551,8 +2551,7 @@ static bool cpu_has_perf_global_ctrl_bug(void)
return false;
}
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
u32 msr, u32 *result)
static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
{
u32 vmx_msr_low, vmx_msr_high;
u32 ctl = ctl_min | ctl_opt;
@ -2570,7 +2569,7 @@ static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
return 0;
}
static __init u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
{
u64 allowed;
@ -2579,7 +2578,7 @@ static __init u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
return ctl_opt & allowed;
}
static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
struct vmx_capability *vmx_cap)
{
u32 vmx_msr_low, vmx_msr_high;
@ -2746,7 +2745,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return 0;
}
static bool __init kvm_is_vmx_supported(void)
static bool kvm_is_vmx_supported(void)
{
if (!cpu_has_vmx()) {
pr_err("CPU doesn't support VMX\n");
@ -2762,7 +2761,7 @@ static bool __init kvm_is_vmx_supported(void)
return true;
}
static int __init vmx_check_processor_compat(void)
static int vmx_check_processor_compat(void)
{
struct vmcs_config vmcs_conf;
struct vmx_capability vmx_cap;
@ -8147,6 +8146,8 @@ static void vmx_vm_destroy(struct kvm *kvm)
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = KBUILD_MODNAME,
.check_processor_compatibility = vmx_check_processor_compat,
.hardware_unsetup = vmx_hardware_unsetup,
.hardware_enable = vmx_hardware_enable,
@ -8544,7 +8545,6 @@ static __init int hardware_setup(void)
}
static struct kvm_x86_init_ops vmx_init_ops __initdata = {
.check_processor_compatibility = vmx_check_processor_compat,
.hardware_setup = hardware_setup,
.handle_intel_pt_intr = NULL,

View File

@ -9294,12 +9294,7 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
kvm_pmu_ops_update(ops->pmu_ops);
}
struct kvm_cpu_compat_check {
struct kvm_x86_init_ops *ops;
int *ret;
};
static int kvm_x86_check_processor_compatibility(struct kvm_x86_init_ops *ops)
static int kvm_x86_check_processor_compatibility(void)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
@ -9309,19 +9304,16 @@ static int kvm_x86_check_processor_compatibility(struct kvm_x86_init_ops *ops)
__cr4_reserved_bits(cpu_has, &boot_cpu_data))
return -EIO;
return ops->check_processor_compatibility();
return static_call(kvm_x86_check_processor_compatibility)();
}
static void kvm_x86_check_cpu_compat(void *data)
static void kvm_x86_check_cpu_compat(void *ret)
{
struct kvm_cpu_compat_check *c = data;
*c->ret = kvm_x86_check_processor_compatibility(c->ops);
*(int *)ret = kvm_x86_check_processor_compatibility();
}
static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{
struct kvm_cpu_compat_check c;
u64 host_pat;
int r, cpu;
@ -9392,12 +9384,12 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (r != 0)
goto out_mmu_exit;
c.ret = &r;
c.ops = ops;
kvm_ops_update(ops);
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &c, 1);
smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1);
if (r < 0)
goto out_hardware_unsetup;
goto out_unwind_ops;
}
/*
@ -9405,8 +9397,6 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
* absolutely necessary, as most operations from this point forward
* require unwinding.
*/
kvm_ops_update(ops);
kvm_timer_init();
if (pi_inject_timer == -1)
@ -9442,8 +9432,9 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_init_msr_list();
return 0;
out_hardware_unsetup:
ops->runtime_ops->hardware_unsetup();
out_unwind_ops:
kvm_x86_ops.hardware_enable = NULL;
static_call(kvm_x86_hardware_unsetup)();
out_mmu_exit:
kvm_mmu_vendor_module_exit();
out_free_percpu: