io_uring: dont remove file from msg_ring reqs

commit ef0ec1ad03 upstream.

We should not be messing with req->file outside of core paths. Clearing
it makes msg_ring non reentrant, i.e. luckily io_msg_send_fd() fails the
request on failed io_double_lock_ctx() but clearly was originally
intended to do retries instead.

Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e5ac9edadb574fe33f6d727cb8f14ce68262a684.1670384893.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Pavel Begunkov 2022-12-07 03:53:26 +00:00 committed by Greg Kroah-Hartman
parent 16225abb3c
commit f6a5cada4f
4 changed files with 10 additions and 5 deletions

View File

@ -1757,7 +1757,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
return ret; return ret;
/* If the op doesn't have a file, we're not polling for it */ /* If the op doesn't have a file, we're not polling for it */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file) if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
io_iopoll_req_issued(req, issue_flags); io_iopoll_req_issued(req, issue_flags);
return 0; return 0;

View File

@ -167,9 +167,5 @@ done:
if (ret < 0) if (ret < 0)
req_set_fail(req); req_set_fail(req);
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
/* put file to avoid an attempt to IOPOLL the req */
if (!(req->flags & REQ_F_FIXED_FILE))
io_put_file(req->file);
req->file = NULL;
return IOU_OK; return IOU_OK;
} }

View File

@ -63,6 +63,7 @@ const struct io_op_def io_op_defs[] = {
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
.name = "READV", .name = "READV",
.prep = io_prep_rw, .prep = io_prep_rw,
@ -80,6 +81,7 @@ const struct io_op_def io_op_defs[] = {
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
.name = "WRITEV", .name = "WRITEV",
.prep = io_prep_rw, .prep = io_prep_rw,
@ -103,6 +105,7 @@ const struct io_op_def io_op_defs[] = {
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
.name = "READ_FIXED", .name = "READ_FIXED",
.prep = io_prep_rw, .prep = io_prep_rw,
@ -118,6 +121,7 @@ const struct io_op_def io_op_defs[] = {
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
.name = "WRITE_FIXED", .name = "WRITE_FIXED",
.prep = io_prep_rw, .prep = io_prep_rw,
@ -277,6 +281,7 @@ const struct io_op_def io_op_defs[] = {
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
.name = "READ", .name = "READ",
.prep = io_prep_rw, .prep = io_prep_rw,
@ -292,6 +297,7 @@ const struct io_op_def io_op_defs[] = {
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw), .async_size = sizeof(struct io_async_rw),
.name = "WRITE", .name = "WRITE",
.prep = io_prep_rw, .prep = io_prep_rw,
@ -481,6 +487,7 @@ const struct io_op_def io_op_defs[] = {
.plug = 1, .plug = 1,
.name = "URING_CMD", .name = "URING_CMD",
.iopoll = 1, .iopoll = 1,
.iopoll_queue = 1,
.async_size = uring_cmd_pdu_size(1), .async_size = uring_cmd_pdu_size(1),
.prep = io_uring_cmd_prep, .prep = io_uring_cmd_prep,
.issue = io_uring_cmd, .issue = io_uring_cmd,

View File

@ -25,6 +25,8 @@ struct io_op_def {
unsigned ioprio : 1; unsigned ioprio : 1;
/* supports iopoll */ /* supports iopoll */
unsigned iopoll : 1; unsigned iopoll : 1;
/* have to be put into the iopoll list */
unsigned iopoll_queue : 1;
/* opcode specific path will handle ->async_data allocation if needed */ /* opcode specific path will handle ->async_data allocation if needed */
unsigned manual_alloc : 1; unsigned manual_alloc : 1;
/* size of async data needed, if any */ /* size of async data needed, if any */