io_uring: reorder cqring_flush and wakeups

Unlike in the past, io_commit_cqring_flush() doesn't do anything that
may need io_cqring_wake() to be issued after, all requests it completes
will go via task_work. Do io_commit_cqring_flush() after
io_cqring_wake() to clean up __io_cq_unlock_post().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ed32dcfeec47e6c97bd6b18c152ddce5b218403f.1692916914.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2023-08-24 23:53:28 +01:00 committed by Jens Axboe
parent 59fbc409e7
commit 54927baf6c
2 changed files with 4 additions and 12 deletions

View File

@ -629,19 +629,11 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx)
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
{ {
io_commit_cqring(ctx); io_commit_cqring(ctx);
if (!ctx->task_complete) {
if (ctx->task_complete) {
/*
* ->task_complete implies that only current might be waiting
* for CQEs, and obviously, we currently don't. No one is
* waiting, wakeups are futile, skip them.
*/
io_commit_cqring_flush(ctx);
} else {
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
io_commit_cqring_flush(ctx);
io_cqring_wake(ctx); io_cqring_wake(ctx);
} }
io_commit_cqring_flush(ctx);
} }
static void io_cq_unlock_post(struct io_ring_ctx *ctx) static void io_cq_unlock_post(struct io_ring_ctx *ctx)
@ -649,8 +641,8 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx)
{ {
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
io_commit_cqring_flush(ctx);
io_cqring_wake(ctx); io_cqring_wake(ctx);
io_commit_cqring_flush(ctx);
} }
/* Returns true if there are no backlogged entries after the flush */ /* Returns true if there are no backlogged entries after the flush */

View File

@ -985,9 +985,9 @@ copy_iov:
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
{ {
io_commit_cqring_flush(ctx);
if (ctx->flags & IORING_SETUP_SQPOLL) if (ctx->flags & IORING_SETUP_SQPOLL)
io_cqring_wake(ctx); io_cqring_wake(ctx);
io_commit_cqring_flush(ctx);
} }
void io_rw_fail(struct io_kiocb *req) void io_rw_fail(struct io_kiocb *req)