locking/qspinlock: Rework some comments
While working my way through the code again; I felt the comments could use help. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andrea.parri@amarulasolutions.com Cc: longman@redhat.com Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
53bf57fab7
commit
756b1df4c2
|
@ -326,16 +326,23 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||||
/*
|
/*
|
||||||
* trylock || pending
|
* trylock || pending
|
||||||
*
|
*
|
||||||
* 0,0,0 -> 0,0,1 ; trylock
|
* 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
|
||||||
* 0,0,1 -> 0,1,1 ; pending
|
|
||||||
*/
|
*/
|
||||||
val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
|
val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we observe any contention; undo and queue.
|
* If we observe contention, there is a concurrent locker.
|
||||||
|
*
|
||||||
|
* Undo and queue; our setting of PENDING might have made the
|
||||||
|
* n,0,0 -> 0,0,0 transition fail and it will now be waiting
|
||||||
|
* on @next to become !NULL.
|
||||||
*/
|
*/
|
||||||
if (unlikely(val & ~_Q_LOCKED_MASK)) {
|
if (unlikely(val & ~_Q_LOCKED_MASK)) {
|
||||||
|
|
||||||
|
/* Undo PENDING if we set it. */
|
||||||
if (!(val & _Q_PENDING_MASK))
|
if (!(val & _Q_PENDING_MASK))
|
||||||
clear_pending(lock);
|
clear_pending(lock);
|
||||||
|
|
||||||
goto queue;
|
goto queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -474,16 +481,25 @@ locked:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the PV case we might already have _Q_LOCKED_VAL set.
|
* In the PV case we might already have _Q_LOCKED_VAL set, because
|
||||||
|
* of lock stealing; therefore we must also allow:
|
||||||
*
|
*
|
||||||
* The atomic_cond_read_acquire() call above has provided the
|
* n,0,1 -> 0,0,1
|
||||||
* necessary acquire semantics required for locking.
|
*
|
||||||
|
* Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
|
||||||
|
* above wait condition, therefore any concurrent setting of
|
||||||
|
* PENDING will make the uncontended transition fail.
|
||||||
*/
|
*/
|
||||||
if (((val & _Q_TAIL_MASK) == tail) &&
|
if ((val & _Q_TAIL_MASK) == tail) {
|
||||||
atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
|
if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
|
||||||
goto release; /* No contention */
|
goto release; /* No contention */
|
||||||
|
}
|
||||||
|
|
||||||
/* Either somebody is queued behind us or _Q_PENDING_VAL is set */
|
/*
|
||||||
|
* Either somebody is queued behind us or _Q_PENDING_VAL got set
|
||||||
|
* which will then detect the remaining tail and queue behind us
|
||||||
|
* ensuring we'll see a @next.
|
||||||
|
*/
|
||||||
set_locked(lock);
|
set_locked(lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue