io_uring: let io_setup_async_rw take care of iovec
Now we give out ownership of iovec into io_setup_async_rw(), so it either sets request's context right or frees the iovec on error itself. Makes our life a bit easier at call sites. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1a2cc0ce8d
commit
6bf985dc50
|
@ -2721,11 +2721,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
|
||||||
ret = io_import_iovec(rw, req, &iovec, &iter, false);
|
ret = io_import_iovec(rw, req, &iovec, &iter, false);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return false;
|
return false;
|
||||||
ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
|
return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
|
||||||
if (!ret)
|
|
||||||
return true;
|
|
||||||
kfree(iovec);
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -3366,8 +3362,10 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
|
||||||
if (!force && !io_op_defs[req->opcode].needs_async_data)
|
if (!force && !io_op_defs[req->opcode].needs_async_data)
|
||||||
return 0;
|
return 0;
|
||||||
if (!req->async_data) {
|
if (!req->async_data) {
|
||||||
if (__io_alloc_async_data(req))
|
if (__io_alloc_async_data(req)) {
|
||||||
|
kfree(iovec);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
io_req_map_rw(req, iovec, fast_iov, iter);
|
io_req_map_rw(req, iovec, fast_iov, iter);
|
||||||
}
|
}
|
||||||
|
@ -3528,9 +3526,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
||||||
/* If the file doesn't support async, just async punt */
|
/* If the file doesn't support async, just async punt */
|
||||||
if (force_nonblock && !io_file_supports_async(req->file, READ)) {
|
if (force_nonblock && !io_file_supports_async(req->file, READ)) {
|
||||||
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
|
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
|
||||||
if (!ret)
|
return ret ?: -EAGAIN;
|
||||||
return -EAGAIN;
|
|
||||||
goto out_free;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
|
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
|
||||||
|
@ -3565,10 +3561,9 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
||||||
}
|
}
|
||||||
|
|
||||||
ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
|
ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
|
||||||
if (ret2) {
|
if (ret2)
|
||||||
ret = ret2;
|
return ret2;
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
rw = req->async_data;
|
rw = req->async_data;
|
||||||
/* it's copied and will be cleaned with ->io */
|
/* it's copied and will be cleaned with ->io */
|
||||||
iovec = NULL;
|
iovec = NULL;
|
||||||
|
@ -3703,8 +3698,7 @@ copy_iov:
|
||||||
/* some cases will consume bytes even on error returns */
|
/* some cases will consume bytes even on error returns */
|
||||||
iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
||||||
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
|
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
|
||||||
if (!ret)
|
return ret ?: -EAGAIN;
|
||||||
return -EAGAIN;
|
|
||||||
}
|
}
|
||||||
out_free:
|
out_free:
|
||||||
/* it's reportedly faster than delegating the null check to kfree() */
|
/* it's reportedly faster than delegating the null check to kfree() */
|
||||||
|
|
Loading…
Reference in New Issue