KVM: VMX: Ensure vcpu time stamp counter is monotonous
If the time stamp counter goes backwards, a guest delay loop can become infinite. This can happen if a vcpu is migrated to another cpu, where the counter has a lower value than the first cpu. Since we're doing an IPI to the first cpu anyway, we can use that to pick up the old tsc, and use that to calculate the adjustment we need to make to the tsc offset. Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
94cea1bb9d
commit
7700270ee3
|
@ -160,6 +160,7 @@ static void __vcpu_clear(void *arg)
|
||||||
vmcs_clear(vcpu->vmcs);
|
vmcs_clear(vcpu->vmcs);
|
||||||
if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
|
if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
|
||||||
per_cpu(current_vmcs, cpu) = NULL;
|
per_cpu(current_vmcs, cpu) = NULL;
|
||||||
|
rdtscll(vcpu->host_tsc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vcpu_clear(struct kvm_vcpu *vcpu)
|
static void vcpu_clear(struct kvm_vcpu *vcpu)
|
||||||
|
@ -376,6 +377,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u64 phys_addr = __pa(vcpu->vmcs);
|
u64 phys_addr = __pa(vcpu->vmcs);
|
||||||
int cpu;
|
int cpu;
|
||||||
|
u64 tsc_this, delta;
|
||||||
|
|
||||||
cpu = get_cpu();
|
cpu = get_cpu();
|
||||||
|
|
||||||
|
@ -409,6 +411,13 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
|
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
|
||||||
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
|
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the time stamp counter is monotonous.
|
||||||
|
*/
|
||||||
|
rdtscll(tsc_this);
|
||||||
|
delta = vcpu->host_tsc - tsc_this;
|
||||||
|
vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue