io_uring: split iopoll loop

The main loop of io_do_iopoll() iterates and does ->iopoll() until it
meets a first completed request, then it continues from that position
and splices requests to pass them through io_iopoll_complete().

Split the loop in two for clearness, iopolling and reaping completed
requests from the list.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/a7f6fd27a94845e5dc925a47a4a9765a92e514fb.1632516769.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-09-24 21:59:48 +01:00 committed by Jens Axboe
parent c2b6c6bc4e
commit e3f721e6f6
1 changed files with 19 additions and 13 deletions

View File

@ -2441,6 +2441,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done)
io_req_free_batch_finish(ctx, &rb);
}
/* same as "continue" but starts from the pos, not next to it */
#define list_for_each_entry_safe_resume(pos, n, head, member) \
for (n = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{
struct io_kiocb *req, *tmp;
@ -2456,7 +2462,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (ctx->poll_multi_queue || force_nonspin)
poll_flags |= BLK_POLL_ONESHOT;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
list_for_each_entry(req, &ctx->iopoll_list, inflight_entry) {
struct kiocb *kiocb = &req->rw.kiocb;
int ret;
@ -2465,12 +2471,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
* If we find a request that requires polling, break out
* and complete those lists first, if we have entries there.
*/
if (READ_ONCE(req->iopoll_completed)) {
list_move_tail(&req->inflight_entry, &done);
nr_events++;
continue;
}
if (!list_empty(&done))
if (READ_ONCE(req->iopoll_completed))
break;
ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
@ -2481,17 +2482,22 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
/* iopoll may have completed current req */
if (!rq_list_empty(iob.req_list) ||
READ_ONCE(req->iopoll_completed)) {
list_move_tail(&req->inflight_entry, &done);
nr_events++;
}
READ_ONCE(req->iopoll_completed))
break;
}
if (!rq_list_empty(iob.req_list))
iob.complete(&iob);
if (!list_empty(&done))
io_iopoll_complete(ctx, &done);
list_for_each_entry_safe_resume(req, tmp, &ctx->iopoll_list,
inflight_entry) {
if (!READ_ONCE(req->iopoll_completed))
break;
list_move_tail(&req->inflight_entry, &done);
nr_events++;
}
if (nr_events)
io_iopoll_complete(ctx, &done);
return nr_events;
}