block: convert to advancing variants of iov_iter_get_pages{,_alloc}()

... doing revert if we end up not using some pages

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2022-06-09 10:37:57 -04:00
parent 1ef255e257
commit 480cb846c2
2 changed files with 17 additions and 13 deletions

View File

@ -1200,7 +1200,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
struct page **pages = (struct page **)bv; struct page **pages = (struct page **)bv;
ssize_t size, left; ssize_t size, left;
unsigned len, i = 0; unsigned len, i = 0;
size_t offset; size_t offset, trim;
int ret = 0; int ret = 0;
/* /*
@ -1218,16 +1218,19 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
* result to ensure the bio's total size is correct. The remainder of * result to ensure the bio's total size is correct. The remainder of
* the iov data will be picked up in the next bio iteration. * the iov data will be picked up in the next bio iteration.
*/ */
size = iov_iter_get_pages(iter, pages, UINT_MAX - bio->bi_iter.bi_size, size = iov_iter_get_pages2(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
nr_pages, &offset); nr_pages, &offset);
if (size > 0) { if (unlikely(size <= 0))
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); return size ? size : -EFAULT;
size = ALIGN_DOWN(size, bdev_logical_block_size(bio->bi_bdev));
} else
nr_pages = 0;
if (unlikely(size <= 0)) { nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
ret = size ? size : -EFAULT;
trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
iov_iter_revert(iter, trim);
size -= trim;
if (unlikely(!size)) {
ret = -EFAULT;
goto out; goto out;
} }
@ -1246,7 +1249,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
offset = 0; offset = 0;
} }
iov_iter_advance(iter, size - left); iov_iter_revert(iter, left);
out: out:
while (i < nr_pages) while (i < nr_pages)
put_page(pages[i++]); put_page(pages[i++]);

View File

@ -254,7 +254,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
size_t offs, added = 0; size_t offs, added = 0;
int npages; int npages;
bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs);
if (unlikely(bytes <= 0)) { if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT; ret = bytes ? bytes : -EFAULT;
goto out_unmap; goto out_unmap;
@ -284,7 +284,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
bytes -= n; bytes -= n;
offs = 0; offs = 0;
} }
iov_iter_advance(iter, added);
} }
/* /*
* release the pages we didn't map into the bio, if any * release the pages we didn't map into the bio, if any
@ -293,9 +292,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
put_page(pages[j++]); put_page(pages[j++]);
kvfree(pages); kvfree(pages);
/* couldn't stuff something into bio? */ /* couldn't stuff something into bio? */
if (bytes) if (bytes) {
iov_iter_revert(iter, bytes);
break; break;
} }
}
ret = blk_rq_append_bio(rq, bio); ret = blk_rq_append_bio(rq, bio);
if (ret) if (ret)