io_uring: properly mark async work as bounded vs unbounded
Now that io-wq supports separating the two request lifetime types, mark the following IO as having unbounded runtimes: - Any read/write to a non-regular file - Any specific networked IO - Any poll command Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c5def4ab84
commit
5f8fd2d3e0
|
@ -506,6 +506,20 @@ static inline bool io_prep_async_work(struct io_kiocb *req)
|
|||
case IORING_OP_WRITEV:
|
||||
case IORING_OP_WRITE_FIXED:
|
||||
do_hashed = true;
|
||||
/* fall-through */
|
||||
case IORING_OP_READV:
|
||||
case IORING_OP_READ_FIXED:
|
||||
case IORING_OP_SENDMSG:
|
||||
case IORING_OP_RECVMSG:
|
||||
case IORING_OP_ACCEPT:
|
||||
case IORING_OP_POLL_ADD:
|
||||
/*
|
||||
* We know REQ_F_ISREG is not set on some of these
|
||||
* opcodes, but this enables us to keep the check in
|
||||
* just one place.
|
||||
*/
|
||||
if (!(req->flags & REQ_F_ISREG))
|
||||
req->work.flags |= IO_WQ_WORK_UNBOUND;
|
||||
break;
|
||||
}
|
||||
if (io_sqe_needs_user(req->submit.sqe))
|
||||
|
@ -3745,7 +3759,7 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
|
|||
|
||||
/* Do QD, or 4 * CPUS, whatever is smallest */
|
||||
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
|
||||
ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, NULL);
|
||||
ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user);
|
||||
if (IS_ERR(ctx->io_wq)) {
|
||||
ret = PTR_ERR(ctx->io_wq);
|
||||
ctx->io_wq = NULL;
|
||||
|
|
Loading…
Reference in New Issue