io_uring: deduplicate failing task_work_add
When io_req_task_work_add() fails, the request will be cancelled by enqueueing via task_works of io-wq. Extract a function for that. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
02b23a9af5
commit
eab30c4d20
|
@ -2171,6 +2171,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void io_req_task_work_add_fallback(struct io_kiocb *req,
|
||||
void (*cb)(struct callback_head *))
|
||||
{
|
||||
struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
|
||||
init_task_work(&req->task_work, cb);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
|
||||
static void __io_req_task_cancel(struct io_kiocb *req, int error)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
@ -2225,14 +2235,8 @@ static void io_req_task_queue(struct io_kiocb *req)
|
|||
percpu_ref_get(&req->ctx->refs);
|
||||
|
||||
ret = io_req_task_work_add(req);
|
||||
if (unlikely(ret)) {
|
||||
struct task_struct *tsk;
|
||||
|
||||
init_task_work(&req->task_work, io_req_task_cancel);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
if (unlikely(ret))
|
||||
io_req_task_work_add_fallback(req, io_req_task_cancel);
|
||||
}
|
||||
|
||||
static inline void io_queue_next(struct io_kiocb *req)
|
||||
|
@ -2350,13 +2354,8 @@ static void io_free_req_deferred(struct io_kiocb *req)
|
|||
|
||||
init_task_work(&req->task_work, io_put_req_deferred_cb);
|
||||
ret = io_req_task_work_add(req);
|
||||
if (unlikely(ret)) {
|
||||
struct task_struct *tsk;
|
||||
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
if (unlikely(ret))
|
||||
io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
|
||||
}
|
||||
|
||||
static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
|
||||
|
@ -3425,15 +3424,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
|
|||
/* submit ref gets dropped, acquire a new one */
|
||||
refcount_inc(&req->refs);
|
||||
ret = io_req_task_work_add(req);
|
||||
if (unlikely(ret)) {
|
||||
struct task_struct *tsk;
|
||||
|
||||
/* queue just for cancelation */
|
||||
init_task_work(&req->task_work, io_req_task_cancel);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
if (unlikely(ret))
|
||||
io_req_task_work_add_fallback(req, io_req_task_cancel);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -5153,12 +5145,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
|||
*/
|
||||
ret = io_req_task_work_add(req);
|
||||
if (unlikely(ret)) {
|
||||
struct task_struct *tsk;
|
||||
|
||||
WRITE_ONCE(poll->canceled, true);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
io_req_task_work_add_fallback(req, func);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue