io_uring: encapsulate rw state

Add a new struct io_rw_state storing all iov related bits: fast iov,
iterator and iterator state. Not much changes here, simply convert
struct io_async_rw to use it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e8245ffcb568b228a009ec1eb79c993c813679f1.1634144845.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-10-14 16:10:15 +01:00 committed by Jens Axboe
parent 258f3a7f84
commit 538941e268
1 changed files with 23 additions and 19 deletions

View File

@ -694,11 +694,15 @@ struct io_async_msghdr {
struct sockaddr_storage addr; struct sockaddr_storage addr;
}; };
struct io_async_rw { struct io_rw_state {
struct iovec fast_iov[UIO_FASTIOV]; struct iovec fast_iov[UIO_FASTIOV];
const struct iovec *free_iovec;
struct iov_iter iter; struct iov_iter iter;
struct iov_iter_state iter_state; struct iov_iter_state iter_state;
};
struct io_async_rw {
struct io_rw_state s;
const struct iovec *free_iovec;
size_t bytes_done; size_t bytes_done;
struct wait_page_queue wpq; struct wait_page_queue wpq;
}; };
@ -2596,7 +2600,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
if (!req_has_async_data(req)) if (!req_has_async_data(req))
return !io_req_prep_async(req); return !io_req_prep_async(req);
iov_iter_restore(&rw->iter, &rw->iter_state); iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
return true; return true;
} }
@ -3259,7 +3263,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
{ {
struct io_async_rw *rw = req->async_data; struct io_async_rw *rw = req->async_data;
memcpy(&rw->iter, iter, sizeof(*iter)); memcpy(&rw->s.iter, iter, sizeof(*iter));
rw->free_iovec = iovec; rw->free_iovec = iovec;
rw->bytes_done = 0; rw->bytes_done = 0;
/* can only be fixed buffers, no need to do anything */ /* can only be fixed buffers, no need to do anything */
@ -3268,13 +3272,13 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
if (!iovec) { if (!iovec) {
unsigned iov_off = 0; unsigned iov_off = 0;
rw->iter.iov = rw->fast_iov; rw->s.iter.iov = rw->s.fast_iov;
if (iter->iov != fast_iov) { if (iter->iov != fast_iov) {
iov_off = iter->iov - fast_iov; iov_off = iter->iov - fast_iov;
rw->iter.iov += iov_off; rw->s.iter.iov += iov_off;
} }
if (rw->fast_iov != fast_iov) if (rw->s.fast_iov != fast_iov)
memcpy(rw->fast_iov + iov_off, fast_iov + iov_off, memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
sizeof(struct iovec) * iter->nr_segs); sizeof(struct iovec) * iter->nr_segs);
} else { } else {
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
@ -3309,7 +3313,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
io_req_map_rw(req, iovec, fast_iov, iter); io_req_map_rw(req, iovec, fast_iov, iter);
iorw = req->async_data; iorw = req->async_data;
/* we've copied and mapped the iter, ensure state is saved */ /* we've copied and mapped the iter, ensure state is saved */
iov_iter_save_state(&iorw->iter, &iorw->iter_state); iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
} }
return 0; return 0;
} }
@ -3317,10 +3321,10 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
static inline int io_rw_prep_async(struct io_kiocb *req, int rw) static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
{ {
struct io_async_rw *iorw = req->async_data; struct io_async_rw *iorw = req->async_data;
struct iovec *iov = iorw->fast_iov; struct iovec *iov = iorw->s.fast_iov;
int ret; int ret;
ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, false);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
@ -3328,7 +3332,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
iorw->free_iovec = iov; iorw->free_iovec = iov;
if (iov) if (iov)
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
iov_iter_save_state(&iorw->iter, &iorw->iter_state); iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
return 0; return 0;
} }
@ -3438,8 +3442,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) { if (req_has_async_data(req)) {
rw = req->async_data; rw = req->async_data;
iter = &rw->iter; iter = &rw->s.iter;
state = &rw->iter_state; state = &rw->s.iter_state;
/* /*
* We come here from an earlier attempt, restore our state to * We come here from an earlier attempt, restore our state to
* match in case it doesn't. It's cheap enough that we don't * match in case it doesn't. It's cheap enough that we don't
@ -3510,9 +3514,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
* Now use our persistent iterator and state, if we aren't already. * Now use our persistent iterator and state, if we aren't already.
* We've restored and mapped the iter to match. * We've restored and mapped the iter to match.
*/ */
if (iter != &rw->iter) { if (iter != &rw->s.iter) {
iter = &rw->iter; iter = &rw->s.iter;
state = &rw->iter_state; state = &rw->s.iter_state;
} }
do { do {
@ -3574,8 +3578,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) { if (req_has_async_data(req)) {
rw = req->async_data; rw = req->async_data;
iter = &rw->iter; iter = &rw->s.iter;
state = &rw->iter_state; state = &rw->s.iter_state;
iov_iter_restore(iter, state); iov_iter_restore(iter, state);
iovec = NULL; iovec = NULL;
} else { } else {