io_uring: cancel deferred requests in try_cancel

As io_uring_cancel_files() and others let SQO to run between
io_uring_try_cancel_requests(), SQO may generate new deferred requests,
so it's safer to try to cancel them in it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-03-11 23:29:35 +00:00 committed by Jens Axboe
parent d052d1d685
commit e1915f76a8
1 changed files with 6 additions and 4 deletions

View File

@ -8577,11 +8577,11 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
return ret; return ret;
} }
static void io_cancel_defer_files(struct io_ring_ctx *ctx, static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
struct task_struct *task, struct task_struct *task,
struct files_struct *files) struct files_struct *files)
{ {
struct io_defer_entry *de = NULL; struct io_defer_entry *de;
LIST_HEAD(list); LIST_HEAD(list);
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
@ -8592,6 +8592,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
} }
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
if (list_empty(&list))
return false;
while (!list_empty(&list)) { while (!list_empty(&list)) {
de = list_first_entry(&list, struct io_defer_entry, list); de = list_first_entry(&list, struct io_defer_entry, list);
@ -8601,6 +8603,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
io_req_complete(de->req, -ECANCELED); io_req_complete(de->req, -ECANCELED);
kfree(de); kfree(de);
} }
return true;
} }
static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
@ -8666,6 +8669,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
} }
} }
ret |= io_cancel_defer_files(ctx, task, files);
ret |= io_poll_remove_all(ctx, task, files); ret |= io_poll_remove_all(ctx, task, files);
ret |= io_kill_timeouts(ctx, task, files); ret |= io_kill_timeouts(ctx, task, files);
ret |= io_run_task_work(); ret |= io_run_task_work();
@ -8734,8 +8738,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
atomic_inc(&task->io_uring->in_idle); atomic_inc(&task->io_uring->in_idle);
} }
io_cancel_defer_files(ctx, task, files);
io_uring_cancel_files(ctx, task, files); io_uring_cancel_files(ctx, task, files);
if (!files) if (!files)
io_uring_try_cancel_requests(ctx, task, NULL); io_uring_try_cancel_requests(ctx, task, NULL);