Lockdep: add lockdep_set_class_and_subclass() and lockdep_set_subclass()
This annotation makes it possible to assign a subclass on lock init. This annotation is meant to reduce the _nested() annotations by assigning a default subclass. One could do without this annotation and rely on lockdep_set_class() exclusively, but that would require a manual stack of struct lock_class_key objects. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
This commit is contained in:
parent
86255d9d0b
commit
4dfbb9d8c6
|
@ -202,7 +202,7 @@ extern int lockdep_internal(void);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||||
struct lock_class_key *key);
|
struct lock_class_key *key, int subclass);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reinitialize a lock key - for cases where there is special locking or
|
* Reinitialize a lock key - for cases where there is special locking or
|
||||||
|
@ -211,9 +211,14 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||||
* or they are too narrow (they suffer from a false class-split):
|
* or they are too narrow (they suffer from a false class-split):
|
||||||
*/
|
*/
|
||||||
#define lockdep_set_class(lock, key) \
|
#define lockdep_set_class(lock, key) \
|
||||||
lockdep_init_map(&(lock)->dep_map, #key, key)
|
lockdep_init_map(&(lock)->dep_map, #key, key, 0)
|
||||||
#define lockdep_set_class_and_name(lock, key, name) \
|
#define lockdep_set_class_and_name(lock, key, name) \
|
||||||
lockdep_init_map(&(lock)->dep_map, name, key)
|
lockdep_init_map(&(lock)->dep_map, name, key, 0)
|
||||||
|
#define lockdep_set_class_and_subclass(lock, key, sub) \
|
||||||
|
lockdep_init_map(&(lock)->dep_map, #key, key, sub)
|
||||||
|
#define lockdep_set_subclass(lock, sub) \
|
||||||
|
lockdep_init_map(&(lock)->dep_map, #lock, \
|
||||||
|
(lock)->dep_map.key, sub)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Acquire a lock.
|
* Acquire a lock.
|
||||||
|
@ -257,10 +262,12 @@ static inline int lockdep_internal(void)
|
||||||
# define lock_release(l, n, i) do { } while (0)
|
# define lock_release(l, n, i) do { } while (0)
|
||||||
# define lockdep_init() do { } while (0)
|
# define lockdep_init() do { } while (0)
|
||||||
# define lockdep_info() do { } while (0)
|
# define lockdep_info() do { } while (0)
|
||||||
# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
|
# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
|
||||||
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
|
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
|
||||||
# define lockdep_set_class_and_name(lock, key, name) \
|
# define lockdep_set_class_and_name(lock, key, name) \
|
||||||
do { (void)(key); } while (0)
|
do { (void)(key); } while (0)
|
||||||
|
#define lockdep_set_class_and_subclass(lock, key, sub) \
|
||||||
|
do { (void)(key); } while (0)
|
||||||
# define INIT_LOCKDEP
|
# define INIT_LOCKDEP
|
||||||
# define lockdep_reset() do { debug_locks = 1; } while (0)
|
# define lockdep_reset() do { debug_locks = 1; } while (0)
|
||||||
# define lockdep_free_key_range(start, size) do { } while (0)
|
# define lockdep_free_key_range(start, size) do { } while (0)
|
||||||
|
|
|
@ -1177,7 +1177,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||||
* itself, so actual lookup of the hash should be once per lock object.
|
* itself, so actual lookup of the hash should be once per lock object.
|
||||||
*/
|
*/
|
||||||
static inline struct lock_class *
|
static inline struct lock_class *
|
||||||
register_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
||||||
{
|
{
|
||||||
struct lockdep_subclass_key *key;
|
struct lockdep_subclass_key *key;
|
||||||
struct list_head *hash_head;
|
struct list_head *hash_head;
|
||||||
|
@ -1249,7 +1249,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||||
out_unlock_set:
|
out_unlock_set:
|
||||||
__raw_spin_unlock(&hash_lock);
|
__raw_spin_unlock(&hash_lock);
|
||||||
|
|
||||||
if (!subclass)
|
if (!subclass || force)
|
||||||
lock->class_cache = class;
|
lock->class_cache = class;
|
||||||
|
|
||||||
DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
|
DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
|
||||||
|
@ -1937,7 +1937,7 @@ void trace_softirqs_off(unsigned long ip)
|
||||||
* Initialize a lock instance's lock-class mapping info:
|
* Initialize a lock instance's lock-class mapping info:
|
||||||
*/
|
*/
|
||||||
void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||||
struct lock_class_key *key)
|
struct lock_class_key *key, int subclass)
|
||||||
{
|
{
|
||||||
if (unlikely(!debug_locks))
|
if (unlikely(!debug_locks))
|
||||||
return;
|
return;
|
||||||
|
@ -1957,6 +1957,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||||
lock->name = name;
|
lock->name = name;
|
||||||
lock->key = key;
|
lock->key = key;
|
||||||
lock->class_cache = NULL;
|
lock->class_cache = NULL;
|
||||||
|
if (subclass)
|
||||||
|
register_lock_class(lock, subclass, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(lockdep_init_map);
|
EXPORT_SYMBOL_GPL(lockdep_init_map);
|
||||||
|
@ -1995,7 +1997,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||||
* Not cached yet or subclass?
|
* Not cached yet or subclass?
|
||||||
*/
|
*/
|
||||||
if (unlikely(!class)) {
|
if (unlikely(!class)) {
|
||||||
class = register_lock_class(lock, subclass);
|
class = register_lock_class(lock, subclass, 0);
|
||||||
if (!class)
|
if (!class)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ void debug_mutex_init(struct mutex *lock, const char *name,
|
||||||
* Make sure we are not reinitializing a held lock:
|
* Make sure we are not reinitializing a held lock:
|
||||||
*/
|
*/
|
||||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||||
lockdep_init_map(&lock->dep_map, name, key);
|
lockdep_init_map(&lock->dep_map, name, key, 0);
|
||||||
#endif
|
#endif
|
||||||
lock->owner = NULL;
|
lock->owner = NULL;
|
||||||
lock->magic = lock;
|
lock->magic = lock;
|
||||||
|
|
|
@ -28,7 +28,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
||||||
* Make sure we are not reinitializing a held semaphore:
|
* Make sure we are not reinitializing a held semaphore:
|
||||||
*/
|
*/
|
||||||
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
|
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
|
||||||
lockdep_init_map(&sem->dep_map, name, key);
|
lockdep_init_map(&sem->dep_map, name, key, 0);
|
||||||
#endif
|
#endif
|
||||||
sem->activity = 0;
|
sem->activity = 0;
|
||||||
spin_lock_init(&sem->wait_lock);
|
spin_lock_init(&sem->wait_lock);
|
||||||
|
|
|
@ -19,7 +19,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
||||||
* Make sure we are not reinitializing a held semaphore:
|
* Make sure we are not reinitializing a held semaphore:
|
||||||
*/
|
*/
|
||||||
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
|
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
|
||||||
lockdep_init_map(&sem->dep_map, name, key);
|
lockdep_init_map(&sem->dep_map, name, key, 0);
|
||||||
#endif
|
#endif
|
||||||
sem->count = RWSEM_UNLOCKED_VALUE;
|
sem->count = RWSEM_UNLOCKED_VALUE;
|
||||||
spin_lock_init(&sem->wait_lock);
|
spin_lock_init(&sem->wait_lock);
|
||||||
|
|
|
@ -20,7 +20,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
|
||||||
* Make sure we are not reinitializing a held lock:
|
* Make sure we are not reinitializing a held lock:
|
||||||
*/
|
*/
|
||||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||||
lockdep_init_map(&lock->dep_map, name, key);
|
lockdep_init_map(&lock->dep_map, name, key, 0);
|
||||||
#endif
|
#endif
|
||||||
lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
lock->magic = SPINLOCK_MAGIC;
|
lock->magic = SPINLOCK_MAGIC;
|
||||||
|
@ -38,7 +38,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
|
||||||
* Make sure we are not reinitializing a held lock:
|
* Make sure we are not reinitializing a held lock:
|
||||||
*/
|
*/
|
||||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||||
lockdep_init_map(&lock->dep_map, name, key);
|
lockdep_init_map(&lock->dep_map, name, key, 0);
|
||||||
#endif
|
#endif
|
||||||
lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
|
lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
|
||||||
lock->magic = RWLOCK_MAGIC;
|
lock->magic = RWLOCK_MAGIC;
|
||||||
|
|
|
@ -823,7 +823,7 @@ static void inline sock_lock_init(struct sock *sk)
|
||||||
af_family_slock_key_strings[sk->sk_family]);
|
af_family_slock_key_strings[sk->sk_family]);
|
||||||
lockdep_init_map(&sk->sk_lock.dep_map,
|
lockdep_init_map(&sk->sk_lock.dep_map,
|
||||||
af_family_key_strings[sk->sk_family],
|
af_family_key_strings[sk->sk_family],
|
||||||
af_family_keys + sk->sk_family);
|
af_family_keys + sk->sk_family, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue