lockdep: Fix -Wunused-parameter for _THIS_IP_
While looking into a bug related to the compiler's handling of addresses of labels, I noticed some uses of _THIS_IP_ seemed unused in lockdep. Drive by cleanup. -Wunused-parameter: kernel/locking/lockdep.c:1383:22: warning: unused parameter 'ip' kernel/locking/lockdep.c:4246:48: warning: unused parameter 'ip' kernel/locking/lockdep.c:4844:19: warning: unused parameter 'ip' Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Link: https://lore.kernel.org/r/20220314221909.2027027-1-ndesaulniers@google.com
This commit is contained in:
parent
ace1a98519
commit
8b023accc8
|
@ -75,7 +75,7 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
|
|||
if (interrupts_enabled(regs)) {
|
||||
if (regs->exit_rcu) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
rcu_irq_exit();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
return;
|
||||
|
@ -121,7 +121,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
|
|||
static __always_inline void __exit_to_user_mode(void)
|
||||
{
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
user_enter_irqoff();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
|
|||
ftrace_nmi_exit();
|
||||
if (restore) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
}
|
||||
|
||||
rcu_nmi_exit();
|
||||
|
@ -215,7 +215,7 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
|
|||
|
||||
if (restore) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
}
|
||||
|
||||
rcu_nmi_exit();
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
#ifdef CONFIG_PROVE_LOCKING
|
||||
extern void lockdep_softirqs_on(unsigned long ip);
|
||||
extern void lockdep_softirqs_off(unsigned long ip);
|
||||
extern void lockdep_hardirqs_on_prepare(unsigned long ip);
|
||||
extern void lockdep_hardirqs_on_prepare(void);
|
||||
extern void lockdep_hardirqs_on(unsigned long ip);
|
||||
extern void lockdep_hardirqs_off(unsigned long ip);
|
||||
#else
|
||||
static inline void lockdep_softirqs_on(unsigned long ip) { }
|
||||
static inline void lockdep_softirqs_off(unsigned long ip) { }
|
||||
static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { }
|
||||
static inline void lockdep_hardirqs_on_prepare(void) { }
|
||||
static inline void lockdep_hardirqs_on(unsigned long ip) { }
|
||||
static inline void lockdep_hardirqs_off(unsigned long ip) { }
|
||||
#endif
|
||||
|
|
|
@ -450,7 +450,7 @@ static __always_inline void guest_state_enter_irqoff(void)
|
|||
{
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
instrumentation_end();
|
||||
|
||||
guest_context_enter_irqoff();
|
||||
|
|
|
@ -126,7 +126,7 @@ static __always_inline void __exit_to_user_mode(void)
|
|||
{
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
instrumentation_end();
|
||||
|
||||
user_enter_irqoff();
|
||||
|
@ -416,7 +416,7 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
|||
instrumentation_begin();
|
||||
/* Tell the tracer that IRET will enable interrupts */
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
instrumentation_end();
|
||||
rcu_irq_exit();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
|
@ -465,7 +465,7 @@ void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
|
|||
ftrace_nmi_exit();
|
||||
if (irq_state.lockdep) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
}
|
||||
instrumentation_end();
|
||||
|
||||
|
|
|
@ -1380,7 +1380,7 @@ static struct lock_list *alloc_list_entry(void)
|
|||
*/
|
||||
static int add_lock_to_list(struct lock_class *this,
|
||||
struct lock_class *links_to, struct list_head *head,
|
||||
unsigned long ip, u16 distance, u8 dep,
|
||||
u16 distance, u8 dep,
|
||||
const struct lock_trace *trace)
|
||||
{
|
||||
struct lock_list *entry;
|
||||
|
@ -3133,19 +3133,15 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|||
* to the previous lock's dependency list:
|
||||
*/
|
||||
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
|
||||
&hlock_class(prev)->locks_after,
|
||||
next->acquire_ip, distance,
|
||||
calc_dep(prev, next),
|
||||
*trace);
|
||||
&hlock_class(prev)->locks_after, distance,
|
||||
calc_dep(prev, next), *trace);
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
|
||||
&hlock_class(next)->locks_before,
|
||||
next->acquire_ip, distance,
|
||||
calc_depb(prev, next),
|
||||
*trace);
|
||||
&hlock_class(next)->locks_before, distance,
|
||||
calc_depb(prev, next), *trace);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
|
@ -4236,14 +4232,13 @@ static void __trace_hardirqs_on_caller(void)
|
|||
|
||||
/**
|
||||
* lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
|
||||
* @ip: Caller address
|
||||
*
|
||||
* Invoked before a possible transition to RCU idle from exit to user or
|
||||
* guest mode. This ensures that all RCU operations are done before RCU
|
||||
* stops watching. After the RCU transition lockdep_hardirqs_on() has to be
|
||||
* invoked to set the final state.
|
||||
*/
|
||||
void lockdep_hardirqs_on_prepare(unsigned long ip)
|
||||
void lockdep_hardirqs_on_prepare(void)
|
||||
{
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
@ -4840,8 +4835,7 @@ EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
|
|||
|
||||
static void
|
||||
print_lock_nested_lock_not_held(struct task_struct *curr,
|
||||
struct held_lock *hlock,
|
||||
unsigned long ip)
|
||||
struct held_lock *hlock)
|
||||
{
|
||||
if (!debug_locks_off())
|
||||
return;
|
||||
|
@ -5017,7 +5011,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
|
||||
|
||||
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
|
||||
print_lock_nested_lock_not_held(curr, hlock, ip);
|
||||
print_lock_nested_lock_not_held(curr, hlock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ void __cpuidle default_idle_call(void)
|
|||
* last -- this is very similar to the entry code.
|
||||
*/
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(_THIS_IP_);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
rcu_idle_enter();
|
||||
lockdep_hardirqs_on(_THIS_IP_);
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ void trace_hardirqs_on(void)
|
|||
this_cpu_write(tracing_irq_cpu, 0);
|
||||
}
|
||||
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on);
|
||||
|
@ -94,7 +94,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
|||
this_cpu_write(tracing_irq_cpu, 0);
|
||||
}
|
||||
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
||||
|
|
Loading…
Reference in New Issue