binder: use wake_up_pollfree()
wake_up_poll() uses nr_exclusive=1, so it's not guaranteed to wake up
all exclusive waiters. Yet, POLLFREE *must* wake up all waiters. epoll
and aio poll are fortunately not affected by this, but it's very
fragile. Thus, the new function wake_up_pollfree() has been introduced.
Convert binder to use wake_up_pollfree().
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Fixes: f5cb779ba1
("ANDROID: binder: remove waitqueue when thread exits.")
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20211209010455.42744-3-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
This commit is contained in:
parent
42288cb44c
commit
a880b28a71
|
@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc,
|
|||
__release(&t->lock);
|
||||
|
||||
/*
|
||||
* If this thread used poll, make sure we remove the waitqueue
|
||||
* from any epoll data structures holding it with POLLFREE.
|
||||
* waitqueue_active() is safe to use here because we're holding
|
||||
* the inner lock.
|
||||
* If this thread used poll, make sure we remove the waitqueue from any
|
||||
* poll data structures holding it.
|
||||
*/
|
||||
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
|
||||
waitqueue_active(&thread->wait)) {
|
||||
wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
|
||||
}
|
||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||
wake_up_pollfree(&thread->wait);
|
||||
|
||||
binder_inner_proc_unlock(thread->proc);
|
||||
|
||||
/*
|
||||
* This is needed to avoid races between wake_up_poll() above and
|
||||
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
|
||||
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
|
||||
* lock, so we can be sure it's done after calling synchronize_rcu().
|
||||
* This is needed to avoid races between wake_up_pollfree() above and
|
||||
* someone else removing the last entry from the queue for other reasons
|
||||
* (e.g. ep_remove_wait_queue() being called due to an epoll file
|
||||
* descriptor being closed). Such other users hold an RCU read lock, so
|
||||
* we can be sure they're done after we call synchronize_rcu().
|
||||
*/
|
||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||
synchronize_rcu();
|
||||
|
|
Loading…
Reference in New Issue