io_uring: merge defer_prep() and prep_async()

Merge two function and do renaming in favour of the second one, it
relays the meaning better.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-02-28 22:35:19 +00:00 committed by Jens Axboe
parent 26f0505a9c
commit b7e298d265
1 changed files with 13 additions and 15 deletions

View File

@ -5868,6 +5868,13 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_req_prep_async(struct io_kiocb *req) static int io_req_prep_async(struct io_kiocb *req)
{ {
if (!io_op_defs[req->opcode].needs_async_setup)
return 0;
if (WARN_ON_ONCE(req->async_data))
return -EFAULT;
if (io_alloc_async_data(req))
return -EAGAIN;
switch (req->opcode) { switch (req->opcode) {
case IORING_OP_READV: case IORING_OP_READV:
return io_rw_prep_async(req, READ); return io_rw_prep_async(req, READ);
@ -5880,18 +5887,9 @@ static int io_req_prep_async(struct io_kiocb *req)
case IORING_OP_CONNECT: case IORING_OP_CONNECT:
return io_connect_prep_async(req); return io_connect_prep_async(req);
} }
return 0; printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
} req->opcode);
static int io_req_defer_prep(struct io_kiocb *req)
{
if (!io_op_defs[req->opcode].needs_async_setup)
return 0;
if (WARN_ON_ONCE(req->async_data))
return -EFAULT; return -EFAULT;
if (io_alloc_async_data(req))
return -EAGAIN;
return io_req_prep_async(req);
} }
static u32 io_get_sequence(struct io_kiocb *req) static u32 io_get_sequence(struct io_kiocb *req)
@ -5924,7 +5922,7 @@ static int io_req_defer(struct io_kiocb *req)
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
return 0; return 0;
ret = io_req_defer_prep(req); ret = io_req_prep_async(req);
if (ret) if (ret)
return ret; return ret;
io_prep_async_link(req); io_prep_async_link(req);
@ -6339,7 +6337,7 @@ fail_req:
io_req_complete_failed(req, ret); io_req_complete_failed(req, ret);
} }
} else if (req->flags & REQ_F_FORCE_ASYNC) { } else if (req->flags & REQ_F_FORCE_ASYNC) {
ret = io_req_defer_prep(req); ret = io_req_prep_async(req);
if (unlikely(ret)) if (unlikely(ret))
goto fail_req; goto fail_req;
io_queue_async_work(req); io_queue_async_work(req);
@ -6492,7 +6490,7 @@ fail_req:
head->flags |= REQ_F_IO_DRAIN; head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1; ctx->drain_next = 1;
} }
ret = io_req_defer_prep(req); ret = io_req_prep_async(req);
if (unlikely(ret)) if (unlikely(ret))
goto fail_req; goto fail_req;
trace_io_uring_link(ctx, req, head); trace_io_uring_link(ctx, req, head);