Merge branch 'locking/urgent' into locking/core, to pick up dependency
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
ae0b5c2f03
|
@ -21,38 +21,34 @@
|
||||||
|
|
||||||
#include <asm-generic/qspinlock_types.h>
|
#include <asm-generic/qspinlock_types.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
|
||||||
|
* @lock : Pointer to queued spinlock structure
|
||||||
|
*
|
||||||
|
* There is a very slight possibility of live-lock if the lockers keep coming
|
||||||
|
* and the waiter is just unfortunate enough to not see any unlock state.
|
||||||
|
*/
|
||||||
|
#ifndef queued_spin_unlock_wait
|
||||||
|
extern void queued_spin_unlock_wait(struct qspinlock *lock);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* queued_spin_is_locked - is the spinlock locked?
|
* queued_spin_is_locked - is the spinlock locked?
|
||||||
* @lock: Pointer to queued spinlock structure
|
* @lock: Pointer to queued spinlock structure
|
||||||
* Return: 1 if it is locked, 0 otherwise
|
* Return: 1 if it is locked, 0 otherwise
|
||||||
*/
|
*/
|
||||||
|
#ifndef queued_spin_is_locked
|
||||||
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
|
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* queued_spin_lock_slowpath() can ACQUIRE the lock before
|
* See queued_spin_unlock_wait().
|
||||||
* issuing the unordered store that sets _Q_LOCKED_VAL.
|
|
||||||
*
|
*
|
||||||
* See both smp_cond_acquire() sites for more detail.
|
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
|
||||||
*
|
* isn't immediately observable.
|
||||||
* This however means that in code like:
|
|
||||||
*
|
|
||||||
* spin_lock(A) spin_lock(B)
|
|
||||||
* spin_unlock_wait(B) spin_is_locked(A)
|
|
||||||
* do_something() do_something()
|
|
||||||
*
|
|
||||||
* Both CPUs can end up running do_something() because the store
|
|
||||||
* setting _Q_LOCKED_VAL will pass through the loads in
|
|
||||||
* spin_unlock_wait() and/or spin_is_locked().
|
|
||||||
*
|
|
||||||
* Avoid this by issuing a full memory barrier between the spin_lock()
|
|
||||||
* and the loads in spin_unlock_wait() and spin_is_locked().
|
|
||||||
*
|
|
||||||
* Note that regular mutual exclusion doesn't care about this
|
|
||||||
* delayed store.
|
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
return atomic_read(&lock->val);
|
||||||
return atomic_read(&lock->val) & _Q_LOCKED_MASK;
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* queued_spin_value_unlocked - is the spinlock structure unlocked?
|
* queued_spin_value_unlocked - is the spinlock structure unlocked?
|
||||||
|
@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* queued_spin_unlock_wait - wait until current lock holder releases the lock
|
|
||||||
* @lock : Pointer to queued spinlock structure
|
|
||||||
*
|
|
||||||
* There is a very slight possibility of live-lock if the lockers keep coming
|
|
||||||
* and the waiter is just unfortunate enough to not see any unlock state.
|
|
||||||
*/
|
|
||||||
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
|
|
||||||
{
|
|
||||||
/* See queued_spin_is_locked() */
|
|
||||||
smp_mb();
|
|
||||||
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
|
|
||||||
cpu_relax();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef virt_spin_lock
|
#ifndef virt_spin_lock
|
||||||
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
|
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
|
||||||
{
|
{
|
||||||
|
|
|
@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
|
||||||
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
|
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
|
||||||
|
* issuing an _unordered_ store to set _Q_LOCKED_VAL.
|
||||||
|
*
|
||||||
|
* This means that the store can be delayed, but no later than the
|
||||||
|
* store-release from the unlock. This means that simply observing
|
||||||
|
* _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
|
||||||
|
*
|
||||||
|
* There are two paths that can issue the unordered store:
|
||||||
|
*
|
||||||
|
* (1) clear_pending_set_locked(): *,1,0 -> *,0,1
|
||||||
|
*
|
||||||
|
* (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
|
||||||
|
* atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
|
||||||
|
*
|
||||||
|
* However, in both cases we have other !0 state we've set before to queue
|
||||||
|
* ourseves:
|
||||||
|
*
|
||||||
|
* For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
|
||||||
|
* load is constrained by that ACQUIRE to not pass before that, and thus must
|
||||||
|
* observe the store.
|
||||||
|
*
|
||||||
|
* For (2) we have a more intersting scenario. We enqueue ourselves using
|
||||||
|
* xchg_tail(), which ends up being a RELEASE. This in itself is not
|
||||||
|
* sufficient, however that is followed by an smp_cond_acquire() on the same
|
||||||
|
* word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
|
||||||
|
* guarantees we must observe that store.
|
||||||
|
*
|
||||||
|
* Therefore both cases have other !0 state that is observable before the
|
||||||
|
* unordered locked byte store comes through. This means we can use that to
|
||||||
|
* wait for the lock store, and then wait for an unlock.
|
||||||
|
*/
|
||||||
|
#ifndef queued_spin_unlock_wait
|
||||||
|
void queued_spin_unlock_wait(struct qspinlock *lock)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
val = atomic_read(&lock->val);
|
||||||
|
|
||||||
|
if (!val) /* not locked, we're done */
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* not locked, but pending, wait until we observe the lock */
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* any unlock is good */
|
||||||
|
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
done:
|
||||||
|
smp_rmb(); /* CTRL + RMB -> ACQUIRE */
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(queued_spin_unlock_wait);
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _GEN_PV_LOCK_SLOWPATH */
|
#endif /* _GEN_PV_LOCK_SLOWPATH */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue