io_uring: remove inflight batching in free_many()
io_free_req_many() is used only for iopoll requests, i.e. reads/writes. Hence no need to batch inflight unhooking. For safety, it'll be done by io_dismantle_req(), which replaces __io_req_aux_free(), and looks more solid and cleaner. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
8c9cb6cd9a
commit
e6543a816e
|
@ -1504,7 +1504,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
|
||||||
fput(file);
|
fput(file);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __io_req_aux_free(struct io_kiocb *req)
|
static void io_dismantle_req(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
if (req->flags & REQ_F_NEED_CLEANUP)
|
if (req->flags & REQ_F_NEED_CLEANUP)
|
||||||
io_cleanup_req(req);
|
io_cleanup_req(req);
|
||||||
|
@ -1514,11 +1514,6 @@ static void __io_req_aux_free(struct io_kiocb *req)
|
||||||
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
|
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
|
||||||
__io_put_req_task(req);
|
__io_put_req_task(req);
|
||||||
io_req_work_drop_env(req);
|
io_req_work_drop_env(req);
|
||||||
}
|
|
||||||
|
|
||||||
static void __io_free_req(struct io_kiocb *req)
|
|
||||||
{
|
|
||||||
__io_req_aux_free(req);
|
|
||||||
|
|
||||||
if (req->flags & REQ_F_INFLIGHT) {
|
if (req->flags & REQ_F_INFLIGHT) {
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
@ -1530,7 +1525,11 @@ static void __io_free_req(struct io_kiocb *req)
|
||||||
wake_up(&ctx->inflight_wait);
|
wake_up(&ctx->inflight_wait);
|
||||||
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
|
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __io_free_req(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
io_dismantle_req(req);
|
||||||
percpu_ref_put(&req->ctx->refs);
|
percpu_ref_put(&req->ctx->refs);
|
||||||
if (likely(!io_is_fallback_req(req)))
|
if (likely(!io_is_fallback_req(req)))
|
||||||
kmem_cache_free(req_cachep, req);
|
kmem_cache_free(req_cachep, req);
|
||||||
|
@ -1549,35 +1548,11 @@ static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
|
||||||
if (!rb->to_free)
|
if (!rb->to_free)
|
||||||
return;
|
return;
|
||||||
if (rb->need_iter) {
|
if (rb->need_iter) {
|
||||||
int i, inflight = 0;
|
int i;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
for (i = 0; i < rb->to_free; i++) {
|
for (i = 0; i < rb->to_free; i++)
|
||||||
struct io_kiocb *req = rb->reqs[i];
|
io_dismantle_req(rb->reqs[i]);
|
||||||
|
|
||||||
if (req->flags & REQ_F_INFLIGHT)
|
|
||||||
inflight++;
|
|
||||||
__io_req_aux_free(req);
|
|
||||||
}
|
}
|
||||||
if (!inflight)
|
|
||||||
goto do_free;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&ctx->inflight_lock, flags);
|
|
||||||
for (i = 0; i < rb->to_free; i++) {
|
|
||||||
struct io_kiocb *req = rb->reqs[i];
|
|
||||||
|
|
||||||
if (req->flags & REQ_F_INFLIGHT) {
|
|
||||||
list_del(&req->inflight_entry);
|
|
||||||
if (!--inflight)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
|
|
||||||
|
|
||||||
if (waitqueue_active(&ctx->inflight_wait))
|
|
||||||
wake_up(&ctx->inflight_wait);
|
|
||||||
}
|
|
||||||
do_free:
|
|
||||||
kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
|
kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
|
||||||
percpu_ref_put_many(&ctx->refs, rb->to_free);
|
percpu_ref_put_many(&ctx->refs, rb->to_free);
|
||||||
rb->to_free = rb->need_iter = 0;
|
rb->to_free = rb->need_iter = 0;
|
||||||
|
|
Loading…
Reference in New Issue