mpage: split submit_bio and bio end_io handler for reads and writes

Split the submit_bio() and bio end_io handler for reads and writes similar
to other aops.

This is a prep patch before we convert end_io handlers to use folios.

Link: https://lkml.kernel.org/r/20230411122920.30134-3-p.raghav@samsung.com
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Suggested-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Martin Brandenburg <martin@omnibond.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Marshall <hubcap@omnibond.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Pankaj Raghav 2023-04-11 14:29:19 +02:00 committed by Andrew Morton
parent cd01049d9c
commit f0d6ca46d6
1 changed files with 36 additions and 18 deletions

View File

@ -43,23 +43,41 @@
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
static void mpage_end_io(struct bio *bio)
static void mpage_read_end_io(struct bio *bio)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page;
page_endio(page, bio_op(bio),
bio_for_each_segment_all(bv, bio, iter_all)
page_endio(bv->bv_page, REQ_OP_READ,
blk_status_to_errno(bio->bi_status));
}
bio_put(bio);
}
static struct bio *mpage_bio_submit(struct bio *bio)
static void mpage_write_end_io(struct bio *bio)
{
bio->bi_end_io = mpage_end_io;
struct bio_vec *bv;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bv, bio, iter_all)
page_endio(bv->bv_page, REQ_OP_WRITE,
blk_status_to_errno(bio->bi_status));
bio_put(bio);
}
static struct bio *mpage_bio_submit_read(struct bio *bio)
{
bio->bi_end_io = mpage_read_end_io;
guard_bio_eod(bio);
submit_bio(bio);
return NULL;
}
static struct bio *mpage_bio_submit_write(struct bio *bio)
{
bio->bi_end_io = mpage_write_end_io;
guard_bio_eod(bio);
submit_bio(bio);
return NULL;
@ -265,7 +283,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
* This folio will go to BIO. Do we need to send this BIO off first?
*/
if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
args->bio = mpage_bio_submit(args->bio);
args->bio = mpage_bio_submit_read(args->bio);
alloc_new:
if (args->bio == NULL) {
@ -278,7 +296,7 @@ alloc_new:
length = first_hole << blkbits;
if (!bio_add_folio(args->bio, folio, length, 0)) {
args->bio = mpage_bio_submit(args->bio);
args->bio = mpage_bio_submit_read(args->bio);
goto alloc_new;
}
@ -286,7 +304,7 @@ alloc_new:
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
(first_hole != blocks_per_page))
args->bio = mpage_bio_submit(args->bio);
args->bio = mpage_bio_submit_read(args->bio);
else
args->last_block_in_bio = blocks[blocks_per_page - 1];
out:
@ -294,7 +312,7 @@ out:
confused:
if (args->bio)
args->bio = mpage_bio_submit(args->bio);
args->bio = mpage_bio_submit_read(args->bio);
if (!folio_test_uptodate(folio))
block_read_full_folio(folio, args->get_block);
else
@ -356,7 +374,7 @@ void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
args.bio = do_mpage_readpage(&args);
}
if (args.bio)
mpage_bio_submit(args.bio);
mpage_bio_submit_read(args.bio);
}
EXPORT_SYMBOL(mpage_readahead);
@ -373,7 +391,7 @@ int mpage_read_folio(struct folio *folio, get_block_t get_block)
args.bio = do_mpage_readpage(&args);
if (args.bio)
mpage_bio_submit(args.bio);
mpage_bio_submit_read(args.bio);
return 0;
}
EXPORT_SYMBOL(mpage_read_folio);
@ -577,7 +595,7 @@ page_is_mapped:
* This page will go to BIO. Do we need to send this BIO off first?
*/
if (bio && mpd->last_block_in_bio != blocks[0] - 1)
bio = mpage_bio_submit(bio);
bio = mpage_bio_submit_write(bio);
alloc_new:
if (bio == NULL) {
@ -596,7 +614,7 @@ alloc_new:
wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
length = first_unmapped << blkbits;
if (!bio_add_folio(bio, folio, length, 0)) {
bio = mpage_bio_submit(bio);
bio = mpage_bio_submit_write(bio);
goto alloc_new;
}
@ -606,7 +624,7 @@ alloc_new:
folio_start_writeback(folio);
folio_unlock(folio);
if (boundary || (first_unmapped != blocks_per_page)) {
bio = mpage_bio_submit(bio);
bio = mpage_bio_submit_write(bio);
if (boundary_block) {
write_boundary_block(boundary_bdev,
boundary_block, 1 << blkbits);
@ -618,7 +636,7 @@ alloc_new:
confused:
if (bio)
bio = mpage_bio_submit(bio);
bio = mpage_bio_submit_write(bio);
/*
* The caller has a ref on the inode, so *mapping is stable
@ -652,7 +670,7 @@ mpage_writepages(struct address_space *mapping,
blk_start_plug(&plug);
ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
if (mpd.bio)
mpage_bio_submit(mpd.bio);
mpage_bio_submit_write(mpd.bio);
blk_finish_plug(&plug);
return ret;
}