x86/KVM/VMX: Move the l1tf_flush_l1d test to vmx_l1d_flush()
Currently, vmx_vcpu_run() checks if l1tf_flush_l1d is set and invokes vmx_l1d_flush() if so. This test is unncessary for the "always flush L1D" mode. Move the check to vmx_l1d_flush()'s conditional mode code path. Notes: - vmx_l1d_flush() is likely to get inlined anyway and thus, there's no extra function call. - This inverts the (static) branch prediction, but there hadn't been any explicit likely()/unlikely() annotations before and so it stays as is. Signed-off-by: Nicolai Stange <nstange@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
427362a142
commit
5b6ccc6c3b
|
@ -9693,12 +9693,16 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
|
|||
* 'always'
|
||||
*/
|
||||
if (static_branch_likely(&vmx_l1d_flush_cond)) {
|
||||
bool flush_l1d = vcpu->arch.l1tf_flush_l1d;
|
||||
|
||||
/*
|
||||
* Clear the flush bit, it gets set again either from
|
||||
* vcpu_run() or from one of the unsafe VMEXIT
|
||||
* handlers.
|
||||
*/
|
||||
vcpu->arch.l1tf_flush_l1d = false;
|
||||
if (!flush_l1d)
|
||||
return;
|
||||
}
|
||||
|
||||
vcpu->stat.l1d_flush++;
|
||||
|
@ -10228,10 +10232,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
|
||||
(unsigned long)¤t_evmcs->host_rsp : 0;
|
||||
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush)) {
|
||||
if (vcpu->arch.l1tf_flush_l1d)
|
||||
vmx_l1d_flush(vcpu);
|
||||
}
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
||||
vmx_l1d_flush(vcpu);
|
||||
|
||||
asm(
|
||||
/* Store host registers */
|
||||
|
|
Loading…
Reference in New Issue