io_uring: remove ring quiesce for io_uring_register

None of the opcodes in io_uring_register use ring quiesce anymore. Hence
io_register_op_must_quiesce always returns false and io_ctx_quiesce is
never called.

Signed-off-by: Usama Arif <usama.arif@bytedance.com>
Link: https://lore.kernel.org/r/20220204145117.1186568-6-usama.arif@bytedance.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Usama Arif 2022-02-04 14:51:17 +00:00 committed by Jens Axboe
parent ff16cfcfda
commit 8bb649ee1d
1 changed files with 0 additions and 83 deletions

View File

@ -1292,18 +1292,6 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req)
return __io_put_kbuf(req);
}
static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
{
bool got = percpu_ref_tryget(ref);
/* already at zero, wait for ->release() */
if (!got)
wait_for_completion(compl);
percpu_ref_resurrect(ref);
if (got)
percpu_ref_put(ref);
}
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
__must_hold(&req->ctx->timeout_lock)
@ -11007,66 +10995,6 @@ err:
return ret;
}
static bool io_register_op_must_quiesce(int op)
{
switch (op) {
case IORING_REGISTER_BUFFERS:
case IORING_UNREGISTER_BUFFERS:
case IORING_REGISTER_FILES:
case IORING_UNREGISTER_FILES:
case IORING_REGISTER_FILES_UPDATE:
case IORING_REGISTER_EVENTFD:
case IORING_REGISTER_EVENTFD_ASYNC:
case IORING_UNREGISTER_EVENTFD:
case IORING_REGISTER_PROBE:
case IORING_REGISTER_PERSONALITY:
case IORING_UNREGISTER_PERSONALITY:
case IORING_REGISTER_ENABLE_RINGS:
case IORING_REGISTER_RESTRICTIONS:
case IORING_REGISTER_FILES2:
case IORING_REGISTER_FILES_UPDATE2:
case IORING_REGISTER_BUFFERS2:
case IORING_REGISTER_BUFFERS_UPDATE:
case IORING_REGISTER_IOWQ_AFF:
case IORING_UNREGISTER_IOWQ_AFF:
case IORING_REGISTER_IOWQ_MAX_WORKERS:
return false;
default:
return true;
}
}
static __cold int io_ctx_quiesce(struct io_ring_ctx *ctx)
{
long ret;
percpu_ref_kill(&ctx->refs);
/*
* Drop uring mutex before waiting for references to exit. If another
* thread is currently inside io_uring_enter() it might need to grab the
* uring_lock to make progress. If we hold it here across the drain
* wait, then we can deadlock. It's safe to drop the mutex here, since
* no new references will come in after we've killed the percpu ref.
*/
mutex_unlock(&ctx->uring_lock);
do {
ret = wait_for_completion_interruptible_timeout(&ctx->ref_comp, HZ);
if (ret) {
ret = min(0L, ret);
break;
}
ret = io_run_task_work_sig();
io_req_caches_free(ctx);
} while (ret >= 0);
mutex_lock(&ctx->uring_lock);
if (ret)
io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
return ret;
}
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
void __user *arg, unsigned nr_args)
__releases(ctx->uring_lock)
@ -11090,12 +11018,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
return -EACCES;
}
if (io_register_op_must_quiesce(opcode)) {
ret = io_ctx_quiesce(ctx);
if (ret)
return ret;
}
switch (opcode) {
case IORING_REGISTER_BUFFERS:
ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
@ -11200,11 +11122,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
}
if (io_register_op_must_quiesce(opcode)) {
/* bring the ctx back to life */
percpu_ref_reinit(&ctx->refs);
reinit_completion(&ctx->ref_comp);
}
return ret;
}