s390/spinlock: refactor arch_spin_lock_wait[_flags]
Reorder the spinlock wait code to make it more readable. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
939c5ae402
commit
470ada6b1a
|
@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
|||
int count;
|
||||
|
||||
while (1) {
|
||||
owner = lp->lock;
|
||||
if (!owner || smp_vcpu_scheduled(~owner)) {
|
||||
count = spin_retry;
|
||||
do {
|
||||
if (arch_spin_is_locked(lp))
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
} while (count-- > 0);
|
||||
if (MACHINE_IS_LPAR)
|
||||
continue;
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
owner = lp->lock;
|
||||
if (owner)
|
||||
/* Check if the lock owner is running. */
|
||||
if (!smp_vcpu_scheduled(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
continue;
|
||||
}
|
||||
/* Loop for a while on the lock value. */
|
||||
count = spin_retry;
|
||||
do {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
continue;
|
||||
/*
|
||||
* For multiple layers of hypervisors, e.g. z/VM + LPAR
|
||||
* yield the CPU if the lock is still unavailable.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR)
|
||||
smp_yield_cpu(~owner);
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait);
|
||||
|
@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||
|
||||
local_irq_restore(flags);
|
||||
while (1) {
|
||||
owner = lp->lock;
|
||||
if (!owner || smp_vcpu_scheduled(~owner)) {
|
||||
count = spin_retry;
|
||||
do {
|
||||
if (arch_spin_is_locked(lp))
|
||||
continue;
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
} while (count-- > 0);
|
||||
if (MACHINE_IS_LPAR)
|
||||
continue;
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
owner = lp->lock;
|
||||
if (owner)
|
||||
/* Check if the lock owner is running. */
|
||||
if (!smp_vcpu_scheduled(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
continue;
|
||||
}
|
||||
/* Loop for a while on the lock value. */
|
||||
count = spin_retry;
|
||||
do {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
continue;
|
||||
/*
|
||||
* For multiple layers of hypervisors, e.g. z/VM + LPAR
|
||||
* yield the CPU if the lock is still unavailable.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR)
|
||||
smp_yield_cpu(~owner);
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
||||
|
|
Loading…
Reference in New Issue