io_uring: fail io-wq submission from a task_work
In case of failure io_wq_submit_work() needs to post an CQE and so potentially take uring_lock. The safest way to deal with it is to do that from under task_work where we can safely take the lock. Also, as io_iopoll_check() holds the lock tight and releases it reluctantly, it will play nicer in the furuter with notifying an iopolling task about new such pending failed requests. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
792bb6eb86
commit
a3df769899
|
@ -2338,7 +2338,7 @@ static void io_req_task_cancel(struct callback_head *cb)
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
__io_req_task_cancel(req, -ECANCELED);
|
__io_req_task_cancel(req, req->result);
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
percpu_ref_put(&ctx->refs);
|
percpu_ref_put(&ctx->refs);
|
||||||
}
|
}
|
||||||
|
@ -2371,11 +2371,22 @@ static void io_req_task_queue(struct io_kiocb *req)
|
||||||
req->task_work.func = io_req_task_submit;
|
req->task_work.func = io_req_task_submit;
|
||||||
ret = io_req_task_work_add(req);
|
ret = io_req_task_work_add(req);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
|
req->result = -ECANCELED;
|
||||||
percpu_ref_get(&req->ctx->refs);
|
percpu_ref_get(&req->ctx->refs);
|
||||||
io_req_task_work_add_fallback(req, io_req_task_cancel);
|
io_req_task_work_add_fallback(req, io_req_task_cancel);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
|
||||||
|
{
|
||||||
|
percpu_ref_get(&req->ctx->refs);
|
||||||
|
req->result = ret;
|
||||||
|
req->task_work.func = io_req_task_cancel;
|
||||||
|
|
||||||
|
if (unlikely(io_req_task_work_add(req)))
|
||||||
|
io_req_task_work_add_fallback(req, io_req_task_cancel);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void io_queue_next(struct io_kiocb *req)
|
static inline void io_queue_next(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
struct io_kiocb *nxt = io_req_find_next(req);
|
struct io_kiocb *nxt = io_req_find_next(req);
|
||||||
|
@ -6428,13 +6439,8 @@ static void io_wq_submit_work(struct io_wq_work *work)
|
||||||
if (timeout)
|
if (timeout)
|
||||||
io_queue_linked_timeout(timeout);
|
io_queue_linked_timeout(timeout);
|
||||||
|
|
||||||
if (work->flags & IO_WQ_WORK_CANCEL) {
|
if (work->flags & IO_WQ_WORK_CANCEL)
|
||||||
/* io-wq is going to take down one */
|
ret = -ECANCELED;
|
||||||
refcount_inc(&req->refs);
|
|
||||||
percpu_ref_get(&req->ctx->refs);
|
|
||||||
io_req_task_work_add_fallback(req, io_req_task_cancel);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
do {
|
do {
|
||||||
|
@ -6450,29 +6456,11 @@ static void io_wq_submit_work(struct io_wq_work *work)
|
||||||
} while (1);
|
} while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* avoid locking problems by failing it from a clean context */
|
||||||
if (ret) {
|
if (ret) {
|
||||||
struct io_ring_ctx *lock_ctx = NULL;
|
/* io-wq is going to take one down */
|
||||||
|
refcount_inc(&req->refs);
|
||||||
if (req->ctx->flags & IORING_SETUP_IOPOLL)
|
io_req_task_queue_fail(req, ret);
|
||||||
lock_ctx = req->ctx;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* io_iopoll_complete() does not hold completion_lock to
|
|
||||||
* complete polled io, so here for polled io, we can not call
|
|
||||||
* io_req_complete() directly, otherwise there maybe concurrent
|
|
||||||
* access to cqring, defer_list, etc, which is not safe. Given
|
|
||||||
* that io_iopoll_complete() is always called under uring_lock,
|
|
||||||
* so here for polled io, we also get uring_lock to complete
|
|
||||||
* it.
|
|
||||||
*/
|
|
||||||
if (lock_ctx)
|
|
||||||
mutex_lock(&lock_ctx->uring_lock);
|
|
||||||
|
|
||||||
req_set_fail_links(req);
|
|
||||||
io_req_complete(req, ret);
|
|
||||||
|
|
||||||
if (lock_ctx)
|
|
||||||
mutex_unlock(&lock_ctx->uring_lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue