Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner: "A set of locking fixes: - Address the fallout of the rwsem rework. Missing ACQUIREs and a sanity check to prevent a use-after-free - Add missing checks for unitialized mutexes when mutex debugging is enabled. - Remove the bogus code in the generic SMP variant of arch_futex_atomic_op_inuser() - Fixup the #ifdeffery in lockdep to prevent compile warnings" * 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/mutex: Test for initialized mutex locking/lockdep: Clean up #ifdef checks locking/lockdep: Hide unused 'class' variable locking/rwsem: Add ACQUIRE comments tty/ldsem, locking/rwsem: Add missing ACQUIRE to read_failed sleep loop lcoking/rwsem: Add missing ACQUIRE to read_slowpath sleep loop locking/rwsem: Add missing ACQUIRE to read_slowpath exit when queue is empty locking/rwsem: Don't call owner_on_cpu() on read-owner futex: Cleanup generic SMP variant of arch_futex_atomic_op_inuser()
This commit is contained in:
commit
431f288ed7
|
@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
|
|||
|
||||
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
|
||||
tsk = waiter->task;
|
||||
smp_mb();
|
||||
waiter->task = NULL;
|
||||
smp_store_release(&waiter->task, NULL);
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
|
@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
|
|||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (!waiter.task)
|
||||
if (!smp_load_acquire(&waiter.task))
|
||||
break;
|
||||
if (!timeout)
|
||||
break;
|
||||
|
|
|
@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
static inline int
|
||||
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||
{
|
||||
int oldval = 0, ret;
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
case FUTEX_OP_ADD:
|
||||
case FUTEX_OP_OR:
|
||||
case FUTEX_OP_ANDN:
|
||||
case FUTEX_OP_XOR:
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret)
|
||||
*oval = oldval;
|
||||
|
||||
return ret;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
|
|
@ -448,7 +448,7 @@ static void print_lockdep_off(const char *bug_msg)
|
|||
|
||||
unsigned long nr_stack_trace_entries;
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
/*
|
||||
* Stack-trace: tightly packed array of stack backtrace
|
||||
* addresses. Protected by the graph_lock.
|
||||
|
@ -491,7 +491,7 @@ unsigned int max_lockdep_depth;
|
|||
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
/*
|
||||
* Locking printouts:
|
||||
*/
|
||||
|
@ -2969,7 +2969,7 @@ static void check_chain_key(struct task_struct *curr)
|
|||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
enum lock_usage_bit new_bit);
|
||||
|
||||
|
@ -3608,7 +3608,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
||||
#else /* CONFIG_PROVE_LOCKING */
|
||||
|
||||
static inline int
|
||||
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
|
||||
|
@ -3627,7 +3627,7 @@ static inline int separate_irq_context(struct task_struct *curr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
||||
#endif /* CONFIG_PROVE_LOCKING */
|
||||
|
||||
/*
|
||||
* Initialize a lock instance's lock-class mapping info:
|
||||
|
@ -4321,8 +4321,7 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
|
|||
*/
|
||||
static void check_flags(unsigned long flags)
|
||||
{
|
||||
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
|
||||
defined(CONFIG_TRACE_IRQFLAGS)
|
||||
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
|
||||
if (!debug_locks)
|
||||
return;
|
||||
|
||||
|
|
|
@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
|
|||
|
||||
static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct lock_class *class;
|
||||
unsigned long nr_unused = 0, nr_uncategorized = 0,
|
||||
nr_irq_safe = 0, nr_irq_unsafe = 0,
|
||||
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
|
||||
|
@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
|
|||
sum_forward_deps = 0;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
struct lock_class *class;
|
||||
|
||||
list_for_each_entry(class, &all_lock_classes, lock_entry) {
|
||||
|
||||
if (class->usage_mask == 0)
|
||||
|
|
|
@ -908,6 +908,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
|
||||
might_sleep();
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
#endif
|
||||
|
||||
ww = container_of(lock, struct ww_mutex, base);
|
||||
if (use_ww_ctx && ww_ctx) {
|
||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||
|
@ -1379,8 +1383,13 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
|||
*/
|
||||
int __sched mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
bool locked = __mutex_trylock(lock);
|
||||
bool locked;
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
#endif
|
||||
|
||||
locked = __mutex_trylock(lock);
|
||||
if (locked)
|
||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||
|
||||
|
|
|
@ -666,7 +666,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
|
|||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
owner = rwsem_owner_flags(sem, &flags);
|
||||
if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner)))
|
||||
/*
|
||||
* Don't check the read-owner as the entry may be stale.
|
||||
*/
|
||||
if ((flags & nonspinnable) ||
|
||||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
|
||||
ret = false;
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
|
@ -1000,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
|
|||
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
|
||||
adjustment = 0;
|
||||
if (rwsem_optimistic_spin(sem, false)) {
|
||||
/* rwsem_optimistic_spin() implies ACQUIRE on success */
|
||||
/*
|
||||
* Wake up other readers in the wait list if the front
|
||||
* waiter is a reader.
|
||||
|
@ -1014,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
|
|||
}
|
||||
return sem;
|
||||
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
|
||||
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
|
||||
return sem;
|
||||
}
|
||||
|
||||
|
@ -1032,6 +1038,8 @@ queue:
|
|||
*/
|
||||
if (adjustment && !(atomic_long_read(&sem->count) &
|
||||
(RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
|
||||
/* Provide lock ACQUIRE */
|
||||
smp_acquire__after_ctrl_dep();
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
rwsem_set_reader_owned(sem);
|
||||
lockevent_inc(rwsem_rlock_fast);
|
||||
|
@ -1065,15 +1073,18 @@ queue:
|
|||
wake_up_q(&wake_q);
|
||||
|
||||
/* wait to be given the lock */
|
||||
while (true) {
|
||||
for (;;) {
|
||||
set_current_state(state);
|
||||
if (!waiter.task)
|
||||
if (!smp_load_acquire(&waiter.task)) {
|
||||
/* Matches rwsem_mark_wake()'s smp_store_release(). */
|
||||
break;
|
||||
}
|
||||
if (signal_pending_state(state, current)) {
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
if (waiter.task)
|
||||
goto out_nolock;
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
|
@ -1083,6 +1094,7 @@ queue:
|
|||
__set_current_state(TASK_RUNNING);
|
||||
lockevent_inc(rwsem_rlock);
|
||||
return sem;
|
||||
|
||||
out_nolock:
|
||||
list_del(&waiter.list);
|
||||
if (list_empty(&sem->wait_list)) {
|
||||
|
@ -1123,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
|||
|
||||
/* do optimistic spinning and steal lock if possible */
|
||||
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
|
||||
rwsem_optimistic_spin(sem, true))
|
||||
rwsem_optimistic_spin(sem, true)) {
|
||||
/* rwsem_optimistic_spin() implies ACQUIRE on success */
|
||||
return sem;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable reader optimistic spinning for this rwsem after
|
||||
|
@ -1184,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
|||
wait:
|
||||
/* wait until we successfully acquire the lock */
|
||||
set_current_state(state);
|
||||
while (true) {
|
||||
if (rwsem_try_write_lock(sem, wstate))
|
||||
for (;;) {
|
||||
if (rwsem_try_write_lock(sem, wstate)) {
|
||||
/* rwsem_try_write_lock() implies ACQUIRE on success */
|
||||
break;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
|
||||
|
|
Loading…
Reference in New Issue