KVM: x86: Drop "pre_" from enter/leave_smm() helpers

Now that .post_leave_smm() is gone, drop "pre_" from the remaining
helpers.  The helpers aren't invoked purely before SMI/RSM processing,
e.g. both helpers are invoked after state is snapshotted (from regs or
SMRAM), and the RSM helper is invoked after some amount of register state
has been stuffed.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210609185619.992058-10-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-06-09 11:56:19 -07:00 committed by Paolo Bonzini
parent 0128116550
commit ecc513e5bb
7 changed files with 23 additions and 24 deletions

View File

@ -109,8 +109,8 @@ KVM_X86_OP_NULL(set_hv_timer)
KVM_X86_OP_NULL(cancel_hv_timer) KVM_X86_OP_NULL(cancel_hv_timer)
KVM_X86_OP(setup_mce) KVM_X86_OP(setup_mce)
KVM_X86_OP(smi_allowed) KVM_X86_OP(smi_allowed)
KVM_X86_OP(pre_enter_smm) KVM_X86_OP(enter_smm)
KVM_X86_OP(pre_leave_smm) KVM_X86_OP(leave_smm)
KVM_X86_OP(enable_smi_window) KVM_X86_OP(enable_smi_window)
KVM_X86_OP_NULL(mem_enc_op) KVM_X86_OP_NULL(mem_enc_op)
KVM_X86_OP_NULL(mem_enc_reg_region) KVM_X86_OP_NULL(mem_enc_reg_region)

View File

@ -1372,8 +1372,8 @@ struct kvm_x86_ops {
void (*setup_mce)(struct kvm_vcpu *vcpu); void (*setup_mce)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate); int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
void (*enable_smi_window)(struct kvm_vcpu *vcpu); void (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_op)(struct kvm *kvm, void __user *argp);

View File

@ -2574,11 +2574,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
} }
/* /*
* Give pre_leave_smm() a chance to make ISA-specific changes to the * Give leave_smm() a chance to make ISA-specific changes to the vCPU
* vCPU state (e.g. enter guest mode) before loading state from the SMM * state (e.g. enter guest mode) before loading state from the SMM
* state-save area. * state-save area.
*/ */
if (ctxt->ops->pre_leave_smm(ctxt, buf)) if (ctxt->ops->leave_smm(ctxt, buf))
goto emulate_shutdown; goto emulate_shutdown;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64

View File

@ -231,8 +231,7 @@ struct x86_emulate_ops {
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
void (*exiting_smm)(struct x86_emulate_ctxt *ctxt); void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
const char *smstate);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt); void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
}; };

View File

@ -4258,7 +4258,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !svm_smi_blocked(vcpu); return !svm_smi_blocked(vcpu);
} }
static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
int ret; int ret;
@ -4280,7 +4280,7 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0; return 0;
} }
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map; struct kvm_host_map map;
@ -4555,8 +4555,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.setup_mce = svm_setup_mce, .setup_mce = svm_setup_mce,
.smi_allowed = svm_smi_allowed, .smi_allowed = svm_smi_allowed,
.pre_enter_smm = svm_pre_enter_smm, .enter_smm = svm_enter_smm,
.pre_leave_smm = svm_pre_leave_smm, .leave_smm = svm_leave_smm,
.enable_smi_window = svm_enable_smi_window, .enable_smi_window = svm_enable_smi_window,
.mem_enc_op = svm_mem_enc_op, .mem_enc_op = svm_mem_enc_op,

View File

@ -7544,7 +7544,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !is_smm(vcpu); return !is_smm(vcpu);
} }
static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -7558,7 +7558,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0; return 0;
} }
static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret; int ret;
@ -7736,8 +7736,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.setup_mce = vmx_setup_mce, .setup_mce = vmx_setup_mce,
.smi_allowed = vmx_smi_allowed, .smi_allowed = vmx_smi_allowed,
.pre_enter_smm = vmx_pre_enter_smm, .enter_smm = vmx_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm, .leave_smm = vmx_leave_smm,
.enable_smi_window = vmx_enable_smi_window, .enable_smi_window = vmx_enable_smi_window,
.can_emulate_instruction = vmx_can_emulate_instruction, .can_emulate_instruction = vmx_can_emulate_instruction,

View File

@ -7216,10 +7216,10 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
kvm_smm_changed(vcpu, false); kvm_smm_changed(vcpu, false);
} }
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
const char *smstate) const char *smstate)
{ {
return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate); return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
} }
static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
@ -7274,7 +7274,7 @@ static const struct x86_emulate_ops emulate_ops = {
.set_nmi_mask = emulator_set_nmi_mask, .set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags, .get_hflags = emulator_get_hflags,
.exiting_smm = emulator_exiting_smm, .exiting_smm = emulator_exiting_smm,
.pre_leave_smm = emulator_pre_leave_smm, .leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault, .triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr, .set_xcr = emulator_set_xcr,
}; };
@ -9006,11 +9006,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
enter_smm_save_state_32(vcpu, buf); enter_smm_save_state_32(vcpu, buf);
/* /*
* Give pre_enter_smm() a chance to make ISA-specific changes to the * Give enter_smm() a chance to make ISA-specific changes to the vCPU
* vCPU state (e.g. leave guest mode) after we've saved the state into * state (e.g. leave guest mode) after we've saved the state into the
* the SMM state-save area. * SMM state-save area.
*/ */
static_call(kvm_x86_pre_enter_smm)(vcpu, buf); static_call(kvm_x86_enter_smm)(vcpu, buf);
kvm_smm_changed(vcpu, true); kvm_smm_changed(vcpu, true);
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));