2006-01-10 07:59:20 +08:00
|
|
|
/*
|
|
|
|
* Debugging code for mutexes
|
|
|
|
*
|
|
|
|
* Started by Ingo Molnar:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
|
|
*
|
|
|
|
* lock debugging, locking tree, deadlock detection started by:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
|
|
|
|
* Released under the General Public License (GPL).
|
|
|
|
*/
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/delay.h>
|
2011-05-24 02:51:41 +08:00
|
|
|
#include <linux/export.h>
|
2006-06-27 17:53:54 +08:00
|
|
|
#include <linux/poison.h>
|
2009-10-07 21:09:06 +08:00
|
|
|
#include <linux/sched.h>
|
2006-01-10 07:59:20 +08:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/interrupt.h>
|
2006-07-03 15:24:33 +08:00
|
|
|
#include <linux/debug_locks.h>
|
2006-01-10 07:59:20 +08:00
|
|
|
|
2021-08-17 22:17:38 +08:00
|
|
|
#include "mutex.h"
|
2006-01-10 07:59:20 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be called with lock->wait_lock held.
|
|
|
|
*/
|
2006-07-03 15:24:33 +08:00
|
|
|
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
|
2006-01-10 07:59:20 +08:00
|
|
|
{
|
2006-06-27 17:53:54 +08:00
|
|
|
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
|
2006-01-10 07:59:20 +08:00
|
|
|
waiter->magic = waiter;
|
|
|
|
INIT_LIST_HEAD(&waiter->list);
|
2021-08-16 05:28:39 +08:00
|
|
|
waiter->ww_ctx = MUTEX_POISON_WW_CTX;
|
2006-01-10 07:59:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
|
|
|
|
{
|
2018-10-05 14:45:46 +08:00
|
|
|
lockdep_assert_held(&lock->wait_lock);
|
2006-07-03 15:24:30 +08:00
|
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
|
|
|
|
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
|
|
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
2006-01-10 07:59:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
|
|
|
|
{
|
2006-07-03 15:24:30 +08:00
|
|
|
DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
|
2006-06-27 17:53:54 +08:00
|
|
|
memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
|
2006-01-10 07:59:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
2016-06-24 03:11:17 +08:00
|
|
|
struct task_struct *task)
|
2006-01-10 07:59:20 +08:00
|
|
|
{
|
2018-10-05 14:45:46 +08:00
|
|
|
lockdep_assert_held(&lock->wait_lock);
|
2006-07-03 15:24:33 +08:00
|
|
|
|
2006-01-10 07:59:20 +08:00
|
|
|
/* Mark the current thread as blocked on the lock: */
|
2016-06-24 03:11:17 +08:00
|
|
|
task->blocked_on = waiter;
|
2006-01-10 07:59:20 +08:00
|
|
|
}
|
|
|
|
|
2021-05-17 11:40:05 +08:00
|
|
|
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
2016-06-24 03:11:17 +08:00
|
|
|
struct task_struct *task)
|
2006-01-10 07:59:20 +08:00
|
|
|
{
|
2006-07-03 15:24:30 +08:00
|
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
2016-06-24 03:11:17 +08:00
|
|
|
DEBUG_LOCKS_WARN_ON(waiter->task != task);
|
|
|
|
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
|
|
|
|
task->blocked_on = NULL;
|
2006-01-10 07:59:20 +08:00
|
|
|
|
2021-05-17 11:40:05 +08:00
|
|
|
INIT_LIST_HEAD(&waiter->list);
|
2006-01-10 07:59:20 +08:00
|
|
|
waiter->task = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void debug_mutex_unlock(struct mutex *lock)
|
|
|
|
{
|
2014-04-10 22:15:59 +08:00
|
|
|
if (likely(debug_locks)) {
|
|
|
|
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
|
|
|
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
|
|
|
|
}
|
2006-01-10 07:59:20 +08:00
|
|
|
}
|
|
|
|
|
2006-07-03 15:24:55 +08:00
|
|
|
void debug_mutex_init(struct mutex *lock, const char *name,
|
|
|
|
struct lock_class_key *key)
|
2006-01-10 07:59:20 +08:00
|
|
|
{
|
2006-07-03 15:24:55 +08:00
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
2006-01-10 07:59:20 +08:00
|
|
|
/*
|
|
|
|
* Make sure we are not reinitializing a held lock:
|
|
|
|
*/
|
2006-07-03 15:24:33 +08:00
|
|
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
lockdep: Introduce wait-type checks
Extend lockdep to validate lock wait-type context.
The current wait-types are:
LD_WAIT_FREE, /* wait free, rcu etc.. */
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
Where lockdep validates that the current lock (the one being acquired)
fits in the current wait-context (as generated by the held stack).
This ensures that there is no attempt to acquire mutexes while holding
spinlocks, to acquire spinlocks while holding raw_spinlocks and so on. In
other words, its a more fancy might_sleep().
Obviously RCU made the entire ordeal more complex than a simple single
value test because RCU can be acquired in (pretty much) any context and
while it presents a context to nested locks it is not the same as it
got acquired in.
Therefore its necessary to split the wait_type into two values, one
representing the acquire (outer) and one representing the nested context
(inner). For most 'normal' locks these two are the same.
[ To make static initialization easier we have the rule that:
.outer == INV means .outer == .inner; because INV == 0. ]
It further means that its required to find the minimal .inner of the held
stack to compare against the outer of the new lock; because while 'normal'
RCU presents a CONFIG type to nested locks, if it is taken while already
holding a SPIN type it obviously doesn't relax the rules.
Below is an example output generated by the trivial test code:
raw_spin_lock(&foo);
spin_lock(&bar);
spin_unlock(&bar);
raw_spin_unlock(&foo);
[ BUG: Invalid wait context ]
-----------------------------
swapper/0/1 is trying to lock:
ffffc90000013f20 (&bar){....}-{3:3}, at: kernel_init+0xdb/0x187
other info that might help us debug this:
1 lock held by swapper/0/1:
#0: ffffc90000013ee0 (&foo){+.+.}-{2:2}, at: kernel_init+0xd1/0x187
The way to read it is to look at the new -{n,m} part in the lock
description; -{3:3} for the attempted lock, and try and match that up to
the held locks, which in this case is the one: -{2,2}.
This tells that the acquiring lock requires a more relaxed environment than
presented by the lock stack.
Currently only the normal locks and RCU are converted, the rest of the
lockdep users defaults to .inner = INV which is ignored. More conversions
can be done when desired.
The check for spinlock_t nesting is not enabled by default. It's a separate
config option for now as there are known problems which are currently
addressed. The config option allows to identify these problems and to
verify that the solutions found are indeed solving them.
The config switch will be removed and the checks will permanently enabled
once the vast majority of issues has been addressed.
[ bigeasy: Move LD_WAIT_FREE,… out of CONFIG_LOCKDEP to avoid compile
failure with CONFIG_DEBUG_SPINLOCK + !CONFIG_LOCKDEP]
[ tglx: Add the config option ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200321113242.427089655@linutronix.de
2020-03-21 19:26:01 +08:00
|
|
|
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
2006-07-03 15:24:55 +08:00
|
|
|
#endif
|
2006-01-10 07:59:20 +08:00
|
|
|
lock->magic = lock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/***
|
|
|
|
* mutex_destroy - mark a mutex unusable
|
|
|
|
* @lock: the mutex to be destroyed
|
|
|
|
*
|
|
|
|
* This function marks the mutex uninitialized, and any subsequent
|
|
|
|
* use of the mutex is forbidden. The mutex must not be locked when
|
|
|
|
* this function is called.
|
|
|
|
*/
|
2008-02-08 20:19:53 +08:00
|
|
|
void mutex_destroy(struct mutex *lock)
|
2006-01-10 07:59:20 +08:00
|
|
|
{
|
2006-07-03 15:24:30 +08:00
|
|
|
DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
|
2006-01-10 07:59:20 +08:00
|
|
|
lock->magic = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_destroy);
|