epoll: pull all code between fetch_events and send_event into the loop
This is a no-op change which simplifies the follow up patches. Link: https://lkml.kernel.org/r/20201106231635.3528496-7-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Khazhismel Kumykov <khazhy@google.com> Cc: Guantao Liu <guantaol@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1493c47fb1
commit
e8c85328b1
|
@ -1774,14 +1774,14 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
}
|
}
|
||||||
|
|
||||||
fetch_events:
|
fetch_events:
|
||||||
eavail = ep_events_available(ep);
|
|
||||||
if (!eavail)
|
|
||||||
eavail = ep_busy_loop(ep, timed_out);
|
|
||||||
|
|
||||||
if (eavail)
|
|
||||||
goto send_events;
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
eavail = ep_events_available(ep);
|
||||||
|
if (!eavail)
|
||||||
|
eavail = ep_busy_loop(ep, timed_out);
|
||||||
|
|
||||||
|
if (eavail)
|
||||||
|
goto send_events;
|
||||||
|
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
|
@ -1830,21 +1830,22 @@ fetch_events:
|
||||||
* carefully under lock, below.
|
* carefully under lock, below.
|
||||||
*/
|
*/
|
||||||
eavail = 1;
|
eavail = 1;
|
||||||
} while (0);
|
|
||||||
|
|
||||||
if (!list_empty_careful(&wait.entry)) {
|
if (!list_empty_careful(&wait.entry)) {
|
||||||
write_lock_irq(&ep->lock);
|
write_lock_irq(&ep->lock);
|
||||||
/*
|
/*
|
||||||
* If the thread timed out and is not on the wait queue, it
|
* If the thread timed out and is not on the wait queue,
|
||||||
* means that the thread was woken up after its timeout expired
|
* it means that the thread was woken up after its
|
||||||
* before it could reacquire the lock. Thus, when wait.entry is
|
* timeout expired before it could reacquire the lock.
|
||||||
* empty, it needs to harvest events.
|
* Thus, when wait.entry is empty, it needs to harvest
|
||||||
*/
|
* events.
|
||||||
if (timed_out)
|
*/
|
||||||
eavail = list_empty(&wait.entry);
|
if (timed_out)
|
||||||
__remove_wait_queue(&ep->wq, &wait);
|
eavail = list_empty(&wait.entry);
|
||||||
write_unlock_irq(&ep->lock);
|
__remove_wait_queue(&ep->wq, &wait);
|
||||||
}
|
write_unlock_irq(&ep->lock);
|
||||||
|
}
|
||||||
|
} while (0);
|
||||||
|
|
||||||
send_events:
|
send_events:
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue