io_uring: alloc req only after getting sqe

As io_get_sqe() split into 2 stage get/consume, get an sqe before
allocating io_kiocb, so no free_req*() for a failure case is needed,
and inline back __io_req_do_free(), which has only 1 user.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2020-04-08 08:58:44 +03:00 committed by Jens Axboe
parent 709b302fad
commit b1e50e549b
1 changed files with 9 additions and 15 deletions

View File

@ -1354,14 +1354,6 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
fput(file); fput(file);
} }
static void __io_req_do_free(struct io_kiocb *req)
{
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
else
clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
}
static void __io_req_aux_free(struct io_kiocb *req) static void __io_req_aux_free(struct io_kiocb *req)
{ {
if (req->flags & REQ_F_NEED_CLEANUP) if (req->flags & REQ_F_NEED_CLEANUP)
@ -1392,7 +1384,10 @@ static void __io_free_req(struct io_kiocb *req)
} }
percpu_ref_put(&req->ctx->refs); percpu_ref_put(&req->ctx->refs);
__io_req_do_free(req); if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
else
clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
} }
struct req_batch { struct req_batch {
@ -5844,18 +5839,17 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct io_kiocb *req; struct io_kiocb *req;
int err; int err;
sqe = io_get_sqe(ctx);
if (unlikely(!sqe)) {
io_consume_sqe(ctx);
break;
}
req = io_get_req(ctx, statep); req = io_get_req(ctx, statep);
if (unlikely(!req)) { if (unlikely(!req)) {
if (!submitted) if (!submitted)
submitted = -EAGAIN; submitted = -EAGAIN;
break; break;
} }
sqe = io_get_sqe(ctx);
if (!sqe) {
__io_req_do_free(req);
io_consume_sqe(ctx);
break;
}
/* /*
* All io need record the previous position, if LINK vs DARIN, * All io need record the previous position, if LINK vs DARIN,