KVM: x86: Replace call-back set_tsc_khz() with a common function
Both VMX and SVM propagate virtual_tsc_khz in the same way, so this patch removes the call-back set_tsc_khz() and replaces it with a common function. Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
35181e86df
commit
381d585c80
|
@ -853,7 +853,6 @@ struct kvm_x86_ops {
|
|||
|
||||
bool (*has_wbinvd_exit)(void);
|
||||
|
||||
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
|
||||
u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
|
||||
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
||||
|
||||
|
|
|
@ -957,41 +957,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
|
|||
seg->base = 0;
|
||||
}
|
||||
|
||||
static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
|
||||
{
|
||||
u64 ratio;
|
||||
u64 khz;
|
||||
|
||||
/* Guest TSC same frequency as host TSC? */
|
||||
if (!scale) {
|
||||
vcpu->arch.tsc_scaling_ratio = TSC_RATIO_DEFAULT;
|
||||
return;
|
||||
}
|
||||
|
||||
/* TSC scaling supported? */
|
||||
if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
|
||||
if (user_tsc_khz > tsc_khz) {
|
||||
vcpu->arch.tsc_catchup = 1;
|
||||
vcpu->arch.tsc_always_catchup = 1;
|
||||
} else
|
||||
WARN(1, "user requested TSC rate below hardware speed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
khz = user_tsc_khz;
|
||||
|
||||
/* TSC scaling required - calculate ratio */
|
||||
ratio = khz << 32;
|
||||
do_div(ratio, tsc_khz);
|
||||
|
||||
if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
|
||||
WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
|
||||
user_tsc_khz);
|
||||
return;
|
||||
}
|
||||
vcpu->arch.tsc_scaling_ratio = ratio;
|
||||
}
|
||||
|
||||
static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -4402,7 +4367,6 @@ static struct kvm_x86_ops svm_x86_ops = {
|
|||
|
||||
.has_wbinvd_exit = svm_has_wbinvd_exit,
|
||||
|
||||
.set_tsc_khz = svm_set_tsc_khz,
|
||||
.read_tsc_offset = svm_read_tsc_offset,
|
||||
.write_tsc_offset = svm_write_tsc_offset,
|
||||
.adjust_tsc_offset = svm_adjust_tsc_offset,
|
||||
|
|
|
@ -2382,22 +2382,6 @@ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
|
|||
return host_tsc + tsc_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Engage any workarounds for mis-matched TSC rates. Currently limited to
|
||||
* software catchup for faster rates on slower CPUs.
|
||||
*/
|
||||
static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
|
||||
{
|
||||
if (!scale)
|
||||
return;
|
||||
|
||||
if (user_tsc_khz > tsc_khz) {
|
||||
vcpu->arch.tsc_catchup = 1;
|
||||
vcpu->arch.tsc_always_catchup = 1;
|
||||
} else
|
||||
WARN(1, "user requested TSC rate below hardware speed\n");
|
||||
}
|
||||
|
||||
static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vmcs_read64(TSC_OFFSET);
|
||||
|
@ -10826,7 +10810,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|||
|
||||
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
|
||||
|
||||
.set_tsc_khz = vmx_set_tsc_khz,
|
||||
.read_tsc_offset = vmx_read_tsc_offset,
|
||||
.write_tsc_offset = vmx_write_tsc_offset,
|
||||
.adjust_tsc_offset = vmx_adjust_tsc_offset,
|
||||
|
|
|
@ -1253,7 +1253,43 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
|
|||
return v;
|
||||
}
|
||||
|
||||
static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
|
||||
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
|
||||
{
|
||||
u64 ratio;
|
||||
|
||||
/* Guest TSC same frequency as host TSC? */
|
||||
if (!scale) {
|
||||
vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TSC scaling supported? */
|
||||
if (!kvm_has_tsc_control) {
|
||||
if (user_tsc_khz > tsc_khz) {
|
||||
vcpu->arch.tsc_catchup = 1;
|
||||
vcpu->arch.tsc_always_catchup = 1;
|
||||
return 0;
|
||||
} else {
|
||||
WARN(1, "user requested TSC rate below hardware speed\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* TSC scaling required - calculate ratio */
|
||||
ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
|
||||
user_tsc_khz, tsc_khz);
|
||||
|
||||
if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
|
||||
WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
|
||||
user_tsc_khz);
|
||||
return -1;
|
||||
}
|
||||
|
||||
vcpu->arch.tsc_scaling_ratio = ratio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
|
||||
{
|
||||
u32 thresh_lo, thresh_hi;
|
||||
int use_scaling = 0;
|
||||
|
@ -1262,7 +1298,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
|
|||
if (this_tsc_khz == 0) {
|
||||
/* set tsc_scaling_ratio to a safe value */
|
||||
vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Compute a scale to convert nanoseconds in TSC cycles */
|
||||
|
@ -1283,7 +1319,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
|
|||
pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
|
||||
use_scaling = 1;
|
||||
}
|
||||
kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
|
||||
return set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
|
||||
}
|
||||
|
||||
static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
|
||||
|
@ -3353,9 +3389,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
if (user_tsc_khz == 0)
|
||||
user_tsc_khz = tsc_khz;
|
||||
|
||||
kvm_set_tsc_khz(vcpu, user_tsc_khz);
|
||||
if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
|
||||
r = 0;
|
||||
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
case KVM_GET_TSC_KHZ: {
|
||||
|
|
|
@ -214,4 +214,33 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
|
|||
|
||||
#endif
|
||||
|
||||
#ifndef mul_u64_u32_div
|
||||
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
|
||||
{
|
||||
union {
|
||||
u64 ll;
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN
|
||||
u32 high, low;
|
||||
#else
|
||||
u32 low, high;
|
||||
#endif
|
||||
} l;
|
||||
} u, rl, rh;
|
||||
|
||||
u.ll = a;
|
||||
rl.ll = (u64)u.l.low * mul;
|
||||
rh.ll = (u64)u.l.high * mul + rl.l.high;
|
||||
|
||||
/* Bits 32-63 of the result will be in rh.l.low. */
|
||||
rl.l.high = do_div(rh.ll, divisor);
|
||||
|
||||
/* Bits 0-31 of the result will be in rl.l.low. */
|
||||
do_div(rl.ll, divisor);
|
||||
|
||||
rl.l.high = rh.l.low;
|
||||
return rl.ll;
|
||||
}
|
||||
#endif /* mul_u64_u32_div */
|
||||
|
||||
#endif /* _LINUX_MATH64_H */
|
||||
|
|
Loading…
Reference in New Issue