io_uring: cleanup up cancel SQPOLL reqs across exec
For SQPOLL rings tctx_inflight() always returns zero, so it might skip doing full cancelation. It's fine because we jam all sqpoll submissions in any case and do go through files cancel for them, but not nice. Do the intended full cancellation, by mimicking __io_uring_task_cancel() waiting but impersonating SQPOLL task. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
257e84a537
commit
0e9ddb39b7
|
@ -9083,29 +9083,39 @@ void __io_uring_files_cancel(struct files_struct *files)
|
||||||
|
|
||||||
static s64 tctx_inflight(struct io_uring_task *tctx)
|
static s64 tctx_inflight(struct io_uring_task *tctx)
|
||||||
{
|
{
|
||||||
unsigned long index;
|
return percpu_counter_sum(&tctx->inflight);
|
||||||
struct file *file;
|
}
|
||||||
|
|
||||||
|
static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
|
||||||
|
{
|
||||||
|
struct io_uring_task *tctx;
|
||||||
s64 inflight;
|
s64 inflight;
|
||||||
|
DEFINE_WAIT(wait);
|
||||||
|
|
||||||
inflight = percpu_counter_sum(&tctx->inflight);
|
if (!ctx->sq_data)
|
||||||
if (!tctx->sqpoll)
|
return;
|
||||||
return inflight;
|
tctx = ctx->sq_data->thread->io_uring;
|
||||||
|
io_disable_sqo_submit(ctx);
|
||||||
|
|
||||||
/*
|
atomic_inc(&tctx->in_idle);
|
||||||
* If we have SQPOLL rings, then we need to iterate and find them, and
|
do {
|
||||||
* add the pending count for those.
|
/* read completions before cancelations */
|
||||||
*/
|
inflight = tctx_inflight(tctx);
|
||||||
xa_for_each(&tctx->xa, index, file) {
|
if (!inflight)
|
||||||
struct io_ring_ctx *ctx = file->private_data;
|
break;
|
||||||
|
io_uring_cancel_task_requests(ctx, NULL);
|
||||||
|
|
||||||
if (ctx->flags & IORING_SETUP_SQPOLL) {
|
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
|
/*
|
||||||
|
* If we've seen completions, retry without waiting. This
|
||||||
inflight += percpu_counter_sum(&__tctx->inflight);
|
* avoids a race where a completion comes in before we did
|
||||||
}
|
* prepare_to_wait().
|
||||||
}
|
*/
|
||||||
|
if (inflight == tctx_inflight(tctx))
|
||||||
return inflight;
|
schedule();
|
||||||
|
finish_wait(&tctx->wait, &wait);
|
||||||
|
} while (1);
|
||||||
|
atomic_dec(&tctx->in_idle);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -9122,8 +9132,13 @@ void __io_uring_task_cancel(void)
|
||||||
atomic_inc(&tctx->in_idle);
|
atomic_inc(&tctx->in_idle);
|
||||||
|
|
||||||
/* trigger io_disable_sqo_submit() */
|
/* trigger io_disable_sqo_submit() */
|
||||||
if (tctx->sqpoll)
|
if (tctx->sqpoll) {
|
||||||
__io_uring_files_cancel(NULL);
|
struct file *file;
|
||||||
|
unsigned long index;
|
||||||
|
|
||||||
|
xa_for_each(&tctx->xa, index, file)
|
||||||
|
io_uring_cancel_sqpoll(file->private_data);
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/* read completions before cancelations */
|
/* read completions before cancelations */
|
||||||
|
|
Loading…
Reference in New Issue