KVM: Choose better candidate for directed yield
Currently, on a large vcpu guests, there is a high probability of yielding to the same vcpu who had recently done a pause-loop exit or cpu relax intercepted. Such a yield can lead to the vcpu spinning again and hence degrade the performance. The patchset keeps track of the pause loop exit/cpu relax interception and gives chance to a vcpu which: (a) Has not done pause loop exit or cpu relax intercepted at all (probably he is preempted lock-holder) (b) Was skipped in last iteration because it did pause loop exit or cpu relax intercepted, and probably has become eligible now (next eligible lock holder) Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> # on s390x Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
4c088493c8
commit
06e48c510a
|
@ -931,6 +931,11 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -1579,6 +1579,43 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||||
|
/*
|
||||||
|
* Helper that checks whether a VCPU is eligible for directed yield.
|
||||||
|
* Most eligible candidate to yield is decided by following heuristics:
|
||||||
|
*
|
||||||
|
* (a) VCPU which has not done pl-exit or cpu relax intercepted recently
|
||||||
|
* (preempted lock holder), indicated by @in_spin_loop.
|
||||||
|
* Set at the beiginning and cleared at the end of interception/PLE handler.
|
||||||
|
*
|
||||||
|
* (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
|
||||||
|
* chance last time (mostly it has become eligible now since we have probably
|
||||||
|
* yielded to lockholder in last iteration. This is done by toggling
|
||||||
|
* @dy_eligible each time a VCPU checked for eligibility.)
|
||||||
|
*
|
||||||
|
* Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
|
||||||
|
* to preempted lock-holder could result in wrong VCPU selection and CPU
|
||||||
|
* burning. Giving priority for a potential lock-holder increases lock
|
||||||
|
* progress.
|
||||||
|
*
|
||||||
|
* Since algorithm is based on heuristics, accessing another VCPU data without
|
||||||
|
* locking does not harm. It may result in trying to yield to same VCPU, fail
|
||||||
|
* and continue with next VCPU and so on.
|
||||||
|
*/
|
||||||
|
bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
bool eligible;
|
||||||
|
|
||||||
|
eligible = !vcpu->spin_loop.in_spin_loop ||
|
||||||
|
(vcpu->spin_loop.in_spin_loop &&
|
||||||
|
vcpu->spin_loop.dy_eligible);
|
||||||
|
|
||||||
|
if (vcpu->spin_loop.in_spin_loop)
|
||||||
|
kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
|
||||||
|
|
||||||
|
return eligible;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
|
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
|
||||||
{
|
{
|
||||||
struct kvm *kvm = me->kvm;
|
struct kvm *kvm = me->kvm;
|
||||||
|
@ -1607,6 +1644,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
|
||||||
continue;
|
continue;
|
||||||
if (waitqueue_active(&vcpu->wq))
|
if (waitqueue_active(&vcpu->wq))
|
||||||
continue;
|
continue;
|
||||||
|
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
|
||||||
|
continue;
|
||||||
if (kvm_vcpu_yield_to(vcpu)) {
|
if (kvm_vcpu_yield_to(vcpu)) {
|
||||||
kvm->last_boosted_vcpu = i;
|
kvm->last_boosted_vcpu = i;
|
||||||
yielded = 1;
|
yielded = 1;
|
||||||
|
@ -1615,6 +1654,9 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kvm_vcpu_set_in_spin_loop(me, false);
|
kvm_vcpu_set_in_spin_loop(me, false);
|
||||||
|
|
||||||
|
/* Ensure vcpu is not eligible during next spinloop */
|
||||||
|
kvm_vcpu_set_dy_eligible(me, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue