io_uring: simplify overflow handling

Overflowed CQEs doesn't lock requests anymore, so we don't care so much
about cancelling them, so kill cq_overflow_flushed and simplify the
code.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/5799867aeba9e713c32f49aef78e5e1aef9fbc43.1617287883.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-04-01 15:43:58 +01:00 committed by Jens Axboe
parent e07785b002
commit c4ea060e85
1 changed files with 3 additions and 5 deletions

View File

@ -335,7 +335,6 @@ struct io_ring_ctx {
struct {
unsigned int flags;
unsigned int compat: 1;
unsigned int cq_overflow_flushed: 1;
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
@ -1525,8 +1524,7 @@ static bool __io_cqring_fill_event(struct io_kiocb *req, long res,
WRITE_ONCE(cqe->flags, cflags);
return true;
}
if (!ctx->cq_overflow_flushed &&
!atomic_read(&req->task->io_uring->in_idle)) {
if (!atomic_read(&req->task->io_uring->in_idle)) {
struct io_overflow_cqe *ocqe;
ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
@ -8491,6 +8489,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
mutex_lock(&ctx->uring_lock);
io_sqe_files_unregister(ctx);
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
mutex_unlock(&ctx->uring_lock);
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
@ -8692,8 +8692,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
/* if force is set, the ring is going away. always drop after that */
ctx->cq_overflow_flushed = 1;
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
xa_for_each(&ctx->personalities, index, creds)