lockdep: Make held_lock->check and "int check" argument bool
The "int check" argument of lock_acquire() and held_lock->check are misleading. This is actually a boolean: 2 means "true", everything else is "false". And there is no need to pass 1 or 0 to lock_acquire() depending on CONFIG_PROVE_LOCKING, __lock_acquire() checks prove_locking at the start and clears "check" if !CONFIG_PROVE_LOCKING. Note: probably we can simply kill this member/arg. The only explicit user of check => 0 is rcu_lock_acquire(), perhaps we can change it to use lock_acquire(trylock =>, read => 2). __lockdep_no_validate means check => 0 implicitly, but we can change validate_chain() to check hlock->instance->key instead. Not to mention it would be nice to get rid of lockdep_set_novalidate_class(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Dave Jones <davej@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140120182006.GA26495@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ddf1d169c0
commit
fb9edbe984
|
@ -39,17 +39,10 @@
|
|||
lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
|
||||
# define __rel(l, n, i) \
|
||||
lock_release(&(l)->dep_map, n, i)
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i)
|
||||
# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i)
|
||||
# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i)
|
||||
# define lockdep_release(l, n, i) __rel(l, n, i)
|
||||
# else
|
||||
# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
|
||||
# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
|
||||
# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
|
||||
# define lockdep_release(l, n, i) __rel(l, n, i)
|
||||
# endif
|
||||
#define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
|
||||
#define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
|
||||
#define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
|
||||
#define lockdep_release(l, n, i) __rel(l, n, i)
|
||||
#else
|
||||
# define lockdep_acquire(l, s, t, i) do { } while (0)
|
||||
# define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
|
||||
|
|
|
@ -252,9 +252,9 @@ struct held_lock {
|
|||
unsigned int trylock:1; /* 16 bits */
|
||||
|
||||
unsigned int read:2; /* see lock_acquire() comment */
|
||||
unsigned int check:2; /* see lock_acquire() comment */
|
||||
unsigned int check:1; /* see lock_acquire() comment */
|
||||
unsigned int hardirqs_off:1;
|
||||
unsigned int references:11; /* 32 bits */
|
||||
unsigned int references:12; /* 32 bits */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -326,9 +326,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
|
|||
*
|
||||
* Values for check:
|
||||
*
|
||||
* 0: disabled
|
||||
* 1: simple checks (freeing, held-at-exit-time, etc.)
|
||||
* 2: full validation
|
||||
* 0: simple checks (freeing, held-at-exit-time, etc.)
|
||||
* 1: full validation
|
||||
*/
|
||||
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
int trylock, int read, int check,
|
||||
|
@ -479,15 +478,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
|||
* on the per lock-class debug mode:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i)
|
||||
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
|
||||
#else
|
||||
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
|
||||
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
|
||||
#endif
|
||||
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
|
||||
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
|
||||
|
||||
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
|
@ -518,13 +511,13 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
|||
# define might_lock(lock) \
|
||||
do { \
|
||||
typecheck(struct lockdep_map *, &(lock)->dep_map); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
|
||||
} while (0)
|
||||
# define might_lock_read(lock) \
|
||||
do { \
|
||||
typecheck(struct lockdep_map *, &(lock)->dep_map); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
|
||||
} while (0)
|
||||
#else
|
||||
|
|
|
@ -314,7 +314,7 @@ static inline bool rcu_lockdep_current_cpu_online(void)
|
|||
|
||||
static inline void rcu_lock_acquire(struct lockdep_map *map)
|
||||
{
|
||||
lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
|
||||
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
|
||||
}
|
||||
|
||||
static inline void rcu_lock_release(struct lockdep_map *map)
|
||||
|
|
|
@ -2098,7 +2098,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
|
|||
* (If lookup_chain_cache() returns with 1 it acquires
|
||||
* graph_lock for us)
|
||||
*/
|
||||
if (!hlock->trylock && (hlock->check == 2) &&
|
||||
if (!hlock->trylock && hlock->check &&
|
||||
lookup_chain_cache(curr, hlock, chain_key)) {
|
||||
/*
|
||||
* Check whether last held lock:
|
||||
|
@ -3055,9 +3055,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
int class_idx;
|
||||
u64 chain_key;
|
||||
|
||||
if (!prove_locking)
|
||||
check = 1;
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
||||
|
@ -3069,8 +3066,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
if (lock->key == &__lockdep_no_validate__)
|
||||
check = 1;
|
||||
if (!prove_locking || lock->key == &__lockdep_no_validate__)
|
||||
check = 0;
|
||||
|
||||
if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
||||
class = lock->class_cache[subclass];
|
||||
|
@ -3138,7 +3135,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
hlock->holdtime_stamp = lockstat_clock();
|
||||
#endif
|
||||
|
||||
if (check == 2 && !mark_irqflags(curr, hlock))
|
||||
if (check && !mark_irqflags(curr, hlock))
|
||||
return 0;
|
||||
|
||||
/* mark it as used: */
|
||||
|
|
Loading…
Reference in New Issue