lockdep: rename map_[acquire|release]() => lock_map_[acquire|release]()
the names were too generic: drivers/uio/uio.c:87: error: expected identifier or '(' before 'do' drivers/uio/uio.c:87: error: expected identifier or '(' before 'while' drivers/uio/uio.c:113: error: 'map_release' undeclared here (not in a function) Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8bfe0298f7
commit
3295f0ef9f
|
@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
map_acquire(&handle->h_lockdep_map);
|
lock_map_acquire(&handle->h_lockdep_map);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return handle;
|
return handle;
|
||||||
|
@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
|
||||||
spin_unlock(&journal->j_state_lock);
|
spin_unlock(&journal->j_state_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
map_release(&handle->h_lockdep_map);
|
lock_map_release(&handle->h_lockdep_map);
|
||||||
|
|
||||||
jbd_free_handle(handle);
|
jbd_free_handle(handle);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
map_acquire(&handle->h_lockdep_map);
|
lock_map_acquire(&handle->h_lockdep_map);
|
||||||
out:
|
out:
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
|
@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
|
||||||
spin_unlock(&journal->j_state_lock);
|
spin_unlock(&journal->j_state_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
map_release(&handle->h_lockdep_map);
|
lock_map_release(&handle->h_lockdep_map);
|
||||||
|
|
||||||
jbd2_free_handle(handle);
|
jbd2_free_handle(handle);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -465,14 +465,14 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
# ifdef CONFIG_PROVE_LOCKING
|
# ifdef CONFIG_PROVE_LOCKING
|
||||||
# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
|
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
|
||||||
# else
|
# else
|
||||||
# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
|
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
|
||||||
# endif
|
# endif
|
||||||
# define map_release(l) lock_release(l, 1, _THIS_IP_)
|
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||||
#else
|
#else
|
||||||
# define map_acquire(l) do { } while (0)
|
# define lock_map_acquire(l) do { } while (0)
|
||||||
# define map_release(l) do { } while (0)
|
# define lock_map_release(l) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __LINUX_LOCKDEP_H */
|
#endif /* __LINUX_LOCKDEP_H */
|
||||||
|
|
|
@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
||||||
|
|
||||||
BUG_ON(get_wq_data(work) != cwq);
|
BUG_ON(get_wq_data(work) != cwq);
|
||||||
work_clear_pending(work);
|
work_clear_pending(work);
|
||||||
map_acquire(&cwq->wq->lockdep_map);
|
lock_map_acquire(&cwq->wq->lockdep_map);
|
||||||
map_acquire(&lockdep_map);
|
lock_map_acquire(&lockdep_map);
|
||||||
f(work);
|
f(work);
|
||||||
map_release(&lockdep_map);
|
lock_map_release(&lockdep_map);
|
||||||
map_release(&cwq->wq->lockdep_map);
|
lock_map_release(&cwq->wq->lockdep_map);
|
||||||
|
|
||||||
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
||||||
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
|
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
|
||||||
|
@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
map_acquire(&wq->lockdep_map);
|
lock_map_acquire(&wq->lockdep_map);
|
||||||
map_release(&wq->lockdep_map);
|
lock_map_release(&wq->lockdep_map);
|
||||||
for_each_cpu_mask_nr(cpu, *cpu_map)
|
for_each_cpu_mask_nr(cpu, *cpu_map)
|
||||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
||||||
}
|
}
|
||||||
|
@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
|
||||||
if (!cwq)
|
if (!cwq)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
map_acquire(&cwq->wq->lockdep_map);
|
lock_map_acquire(&cwq->wq->lockdep_map);
|
||||||
map_release(&cwq->wq->lockdep_map);
|
lock_map_release(&cwq->wq->lockdep_map);
|
||||||
|
|
||||||
prev = NULL;
|
prev = NULL;
|
||||||
spin_lock_irq(&cwq->lock);
|
spin_lock_irq(&cwq->lock);
|
||||||
|
@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
map_acquire(&work->lockdep_map);
|
lock_map_acquire(&work->lockdep_map);
|
||||||
map_release(&work->lockdep_map);
|
lock_map_release(&work->lockdep_map);
|
||||||
|
|
||||||
cwq = get_wq_data(work);
|
cwq = get_wq_data(work);
|
||||||
if (!cwq)
|
if (!cwq)
|
||||||
|
@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
|
||||||
if (cwq->thread == NULL)
|
if (cwq->thread == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
map_acquire(&cwq->wq->lockdep_map);
|
lock_map_acquire(&cwq->wq->lockdep_map);
|
||||||
map_release(&cwq->wq->lockdep_map);
|
lock_map_release(&cwq->wq->lockdep_map);
|
||||||
|
|
||||||
flush_cpu_workqueue(cwq);
|
flush_cpu_workqueue(cwq);
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue