io_uring: async task poll trigger cleanup
If the request is still hashed in io_async_task_func(), then it cannot have been canceled and it's pointless to check. So save that check. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f2a8d5c7a2
commit
310672552f
|
@ -4354,7 +4354,7 @@ static void io_async_task_func(struct callback_head *cb)
|
||||||
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
|
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
|
||||||
struct async_poll *apoll = req->apoll;
|
struct async_poll *apoll = req->apoll;
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
bool canceled;
|
bool canceled = false;
|
||||||
|
|
||||||
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
|
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
|
||||||
|
|
||||||
|
@ -4363,34 +4363,33 @@ static void io_async_task_func(struct callback_head *cb)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hash_hashed(&req->hash_node))
|
/* If req is still hashed, it cannot have been canceled. Don't check. */
|
||||||
|
if (hash_hashed(&req->hash_node)) {
|
||||||
hash_del(&req->hash_node);
|
hash_del(&req->hash_node);
|
||||||
|
} else {
|
||||||
canceled = READ_ONCE(apoll->poll.canceled);
|
canceled = READ_ONCE(apoll->poll.canceled);
|
||||||
if (canceled) {
|
if (canceled) {
|
||||||
io_cqring_fill_event(req, -ECANCELED);
|
io_cqring_fill_event(req, -ECANCELED);
|
||||||
io_commit_cqring(ctx);
|
io_commit_cqring(ctx);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&ctx->completion_lock);
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
|
|
||||||
/* restore ->work in case we need to retry again */
|
/* restore ->work in case we need to retry again */
|
||||||
memcpy(&req->work, &apoll->work, sizeof(req->work));
|
memcpy(&req->work, &apoll->work, sizeof(req->work));
|
||||||
|
kfree(apoll);
|
||||||
|
|
||||||
if (canceled) {
|
if (!canceled) {
|
||||||
kfree(apoll);
|
__set_current_state(TASK_RUNNING);
|
||||||
|
mutex_lock(&ctx->uring_lock);
|
||||||
|
__io_queue_sqe(req, NULL);
|
||||||
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
} else {
|
||||||
io_cqring_ev_posted(ctx);
|
io_cqring_ev_posted(ctx);
|
||||||
req_set_fail_links(req);
|
req_set_fail_links(req);
|
||||||
io_double_put_req(req);
|
io_double_put_req(req);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
|
||||||
__io_queue_sqe(req, NULL);
|
|
||||||
mutex_unlock(&ctx->uring_lock);
|
|
||||||
|
|
||||||
kfree(apoll);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||||
|
|
Loading…
Reference in New Issue