locking/spinlock/debug: Remove spinlock lockup detection code
The current spinlock lockup detection code can sometimes produce false positives because of the unfairness of the locking algorithm itself. So the lockup detection code is now removed. Instead, we are relying on the NMI watchdog to detect potential lockup. We won't have lockup detection if the watchdog isn't running. The commented-out read-write lock lockup detection code are also removed. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1486583208-11038-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f9af456a61
commit
bc88c10d7e
|
@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
|
|||
lock->owner_cpu = -1;
|
||||
}
|
||||
|
||||
static void __spin_lock_debug(raw_spinlock_t *lock)
|
||||
{
|
||||
u64 i;
|
||||
u64 loops = loops_per_jiffy * HZ;
|
||||
|
||||
for (i = 0; i < loops; i++) {
|
||||
if (arch_spin_trylock(&lock->raw_lock))
|
||||
return;
|
||||
__delay(1);
|
||||
}
|
||||
/* lockup suspected: */
|
||||
spin_dump(lock, "lockup suspected");
|
||||
#ifdef CONFIG_SMP
|
||||
trigger_all_cpu_backtrace();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The trylock above was causing a livelock. Give the lower level arch
|
||||
* specific lock code a chance to acquire the lock. We have already
|
||||
* printed a warning/backtrace at this point. The non-debug arch
|
||||
* specific code might actually succeed in acquiring the lock. If it is
|
||||
* not successful, the end-result is the same - there is no forward
|
||||
* progress.
|
||||
/*
|
||||
* We are now relying on the NMI watchdog to detect lockup instead of doing
|
||||
* the detection here with an unfair lock which can cause problem of its own.
|
||||
*/
|
||||
arch_spin_lock(&lock->raw_lock);
|
||||
}
|
||||
|
||||
void do_raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
debug_spin_lock_before(lock);
|
||||
if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
|
||||
__spin_lock_debug(lock);
|
||||
arch_spin_lock(&lock->raw_lock);
|
||||
debug_spin_lock_after(lock);
|
||||
}
|
||||
|
||||
|
@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
|
|||
|
||||
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
|
||||
|
||||
#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
|
||||
static void __read_lock_debug(rwlock_t *lock)
|
||||
{
|
||||
u64 i;
|
||||
u64 loops = loops_per_jiffy * HZ;
|
||||
int print_once = 1;
|
||||
|
||||
for (;;) {
|
||||
for (i = 0; i < loops; i++) {
|
||||
if (arch_read_trylock(&lock->raw_lock))
|
||||
return;
|
||||
__delay(1);
|
||||
}
|
||||
/* lockup suspected: */
|
||||
if (print_once) {
|
||||
print_once = 0;
|
||||
printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
|
||||
"%s/%d, %p\n",
|
||||
raw_smp_processor_id(), current->comm,
|
||||
current->pid, lock);
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void do_raw_read_lock(rwlock_t *lock)
|
||||
{
|
||||
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
|
||||
|
@ -247,32 +197,6 @@ static inline void debug_write_unlock(rwlock_t *lock)
|
|||
lock->owner_cpu = -1;
|
||||
}
|
||||
|
||||
#if 0 /* This can cause lockups */
|
||||
static void __write_lock_debug(rwlock_t *lock)
|
||||
{
|
||||
u64 i;
|
||||
u64 loops = loops_per_jiffy * HZ;
|
||||
int print_once = 1;
|
||||
|
||||
for (;;) {
|
||||
for (i = 0; i < loops; i++) {
|
||||
if (arch_write_trylock(&lock->raw_lock))
|
||||
return;
|
||||
__delay(1);
|
||||
}
|
||||
/* lockup suspected: */
|
||||
if (print_once) {
|
||||
print_once = 0;
|
||||
printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
|
||||
"%s/%d, %p\n",
|
||||
raw_smp_processor_id(), current->comm,
|
||||
current->pid, lock);
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void do_raw_write_lock(rwlock_t *lock)
|
||||
{
|
||||
debug_write_lock_before(lock);
|
||||
|
|
Loading…
Reference in New Issue