locking/lockdep: Initialize the locks_before and locks_after lists earlier

This patch does not change any functionality. A later patch will reuse
lock classes that have been freed. In combination with that patch this
patch wil have the effect of initializing lock class order lists once
instead of every time a lock class structure is reinitialized.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: johannes.berg@intel.com
Cc: tj@kernel.org
Link: https://lkml.kernel.org/r/20190214230058.196511-8-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Bart Van Assche 2019-02-14 15:00:42 -08:00 committed by Ingo Molnar
parent 86cffb80a5
commit feb0a3865e
1 changed files with 27 additions and 2 deletions

View File

@ -735,6 +735,25 @@ static bool assign_lock_key(struct lockdep_map *lock)
return true; return true;
} }
/*
* Initialize the lock_classes[] array elements.
*/
static void init_data_structures_once(void)
{
static bool initialization_happened;
int i;
if (likely(initialization_happened))
return;
initialization_happened = true;
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
INIT_LIST_HEAD(&lock_classes[i].locks_after);
INIT_LIST_HEAD(&lock_classes[i].locks_before);
}
}
/* /*
* Register a lock's class in the hash-table, if the class is not present * Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object * yet. Otherwise we look it up. We cache the result in the lock object
@ -775,6 +794,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
goto out_unlock_set; goto out_unlock_set;
} }
init_data_structures_once();
/* /*
* Allocate a new key from the static array, and add it to * Allocate a new key from the static array, and add it to
* the hash: * the hash:
@ -793,8 +814,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class->key = key; class->key = key;
class->name = lock->name; class->name = lock->name;
class->subclass = subclass; class->subclass = subclass;
INIT_LIST_HEAD(&class->locks_before); WARN_ON_ONCE(!list_empty(&class->locks_before));
INIT_LIST_HEAD(&class->locks_after); WARN_ON_ONCE(!list_empty(&class->locks_after));
class->name_version = count_matching_names(class); class->name_version = count_matching_names(class);
/* /*
* We use RCU's safe list-add method to make * We use RCU's safe list-add method to make
@ -4155,6 +4176,8 @@ void lockdep_free_key_range(void *start, unsigned long size)
int i; int i;
int locked; int locked;
init_data_structures_once();
raw_local_irq_save(flags); raw_local_irq_save(flags);
locked = graph_lock(); locked = graph_lock();
@ -4218,6 +4241,8 @@ void lockdep_reset_lock(struct lockdep_map *lock)
unsigned long flags; unsigned long flags;
int j, locked; int j, locked;
init_data_structures_once();
raw_local_irq_save(flags); raw_local_irq_save(flags);
locked = graph_lock(); locked = graph_lock();