locking/qspinlock/x86: Fix performance regression under unaccelerated VMs
Dave ran into horrible performance on a VM without PARAVIRT_SPINLOCKS set and Linus noted that the test-and-set implementation was retarded. One should spin on the variable with a load, not a RMW. While there, remove 'queued' from the name, as the lock isn't queued at all, but a simple test-and-set. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reported-by: Dave Chinner <david@fromorbit.com> Tested-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <Waiman.Long@hp.com> Cc: stable@vger.kernel.org # v4.2+ Link: http://lkml.kernel.org/r/20150904152523.GR18673@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
edcd591c77
commit
43b3f02899
|
@ -39,15 +39,23 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define virt_queued_spin_lock virt_queued_spin_lock
|
#define virt_spin_lock virt_spin_lock
|
||||||
|
|
||||||
static inline bool virt_queued_spin_lock(struct qspinlock *lock)
|
static inline bool virt_spin_lock(struct qspinlock *lock)
|
||||||
{
|
{
|
||||||
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
|
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
|
/*
|
||||||
cpu_relax();
|
* On hypervisors without PARAVIRT_SPINLOCKS support we fall
|
||||||
|
* back to a Test-and-Set spinlock, because fair locks have
|
||||||
|
* horrible lock 'holder' preemption issues.
|
||||||
|
*/
|
||||||
|
|
||||||
|
do {
|
||||||
|
while (atomic_read(&lock->val) != 0)
|
||||||
|
cpu_relax();
|
||||||
|
} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef virt_queued_spin_lock
|
#ifndef virt_spin_lock
|
||||||
static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
|
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||||
if (pv_enabled())
|
if (pv_enabled())
|
||||||
goto queue;
|
goto queue;
|
||||||
|
|
||||||
if (virt_queued_spin_lock(lock))
|
if (virt_spin_lock(lock))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue