locking/lockdep: Add a function building a chain between two classes
Crossrelease needs to build a chain between two classes regardless of their contexts. However, add_chain_cache() cannot be used for that purpose since it assumes that it's called in the acquisition context of the hlock. So this patch introduces a new function doing it. Signed-off-by: Byungchul Park <byungchul.park@lge.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akpm@linux-foundation.org Cc: boqun.feng@gmail.com Cc: kernel-team@lge.com Cc: kirill@shutemov.name Cc: npiggin@gmail.com Cc: walken@google.com Cc: willy@infradead.org Link: http://lkml.kernel.org/r/1502089981-21272-3-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
545c23f2e9
commit
49347a986a
|
@ -2150,6 +2150,76 @@ static int check_no_collision(struct task_struct *curr,
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is for building a chain between just two different classes,
|
||||
* instead of adding a new hlock upon current, which is done by
|
||||
* add_chain_cache().
|
||||
*
|
||||
* This can be called in any context with two classes, while
|
||||
* add_chain_cache() must be done within the lock owener's context
|
||||
* since it uses hlock which might be racy in another context.
|
||||
*/
|
||||
static inline int add_chain_cache_classes(unsigned int prev,
|
||||
unsigned int next,
|
||||
unsigned int irq_context,
|
||||
u64 chain_key)
|
||||
{
|
||||
struct hlist_head *hash_head = chainhashentry(chain_key);
|
||||
struct lock_chain *chain;
|
||||
|
||||
/*
|
||||
* Allocate a new chain entry from the static array, and add
|
||||
* it to the hash:
|
||||
*/
|
||||
|
||||
/*
|
||||
* We might need to take the graph lock, ensure we've got IRQs
|
||||
* disabled to make this an IRQ-safe lock.. for recursion reasons
|
||||
* lockdep won't complain about its own locking errors.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
|
||||
dump_stack();
|
||||
return 0;
|
||||
}
|
||||
|
||||
chain = lock_chains + nr_lock_chains++;
|
||||
chain->chain_key = chain_key;
|
||||
chain->irq_context = irq_context;
|
||||
chain->depth = 2;
|
||||
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
|
||||
chain->base = nr_chain_hlocks;
|
||||
nr_chain_hlocks += chain->depth;
|
||||
chain_hlocks[chain->base] = prev - 1;
|
||||
chain_hlocks[chain->base + 1] = next -1;
|
||||
}
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
/*
|
||||
* Important for check_no_collision().
|
||||
*/
|
||||
else {
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
|
||||
dump_stack();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
hlist_add_head_rcu(&chain->entry, hash_head);
|
||||
debug_atomic_inc(chain_lookup_misses);
|
||||
inc_chains();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adds a dependency chain into chain hashtable. And must be called with
|
||||
* graph_lock held.
|
||||
|
|
Loading…
Reference in New Issue