iomap: Convert readahead and readpage to use a folio

Handle folios of arbitrary size instead of working in PAGE_SIZE units.
readahead_folio() decreases the page refcount for you, so this is not
quite a mechanical change.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-04-28 09:39:51 -04:00
parent 874628a2c5
commit 3aa9c659bf
1 changed files with 26 additions and 27 deletions

View File

@ -188,8 +188,8 @@ static void iomap_read_end_io(struct bio *bio)
}
struct iomap_readpage_ctx {
struct page *cur_page;
bool cur_page_in_bio;
struct folio *cur_folio;
bool cur_folio_in_bio;
struct bio *bio;
struct readahead_control *rac;
};
@ -252,8 +252,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos + offset;
loff_t length = iomap_length(iter) - offset;
struct page *page = ctx->cur_page;
struct folio *folio = page_folio(page);
struct folio *folio = ctx->cur_folio;
struct iomap_page *iop;
loff_t orig_pos = pos;
size_t poff, plen;
@ -274,7 +273,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
goto done;
}
ctx->cur_page_in_bio = true;
ctx->cur_folio_in_bio = true;
if (iop)
atomic_add(plen, &iop->read_bytes_pending);
@ -282,7 +281,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
if (!ctx->bio ||
bio_end_sector(ctx->bio) != sector ||
!bio_add_folio(ctx->bio, folio, plen, poff)) {
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
@ -321,30 +320,31 @@ done:
int
iomap_readpage(struct page *page, const struct iomap_ops *ops)
{
struct folio *folio = page_folio(page);
struct iomap_iter iter = {
.inode = page->mapping->host,
.pos = page_offset(page),
.len = PAGE_SIZE,
.inode = folio->mapping->host,
.pos = folio_pos(folio),
.len = folio_size(folio),
};
struct iomap_readpage_ctx ctx = {
.cur_page = page,
.cur_folio = folio,
};
int ret;
trace_iomap_readpage(page->mapping->host, 1);
trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
if (ret < 0)
SetPageError(page);
folio_set_error(folio);
if (ctx.bio) {
submit_bio(ctx.bio);
WARN_ON_ONCE(!ctx.cur_page_in_bio);
WARN_ON_ONCE(!ctx.cur_folio_in_bio);
} else {
WARN_ON_ONCE(ctx.cur_page_in_bio);
unlock_page(page);
WARN_ON_ONCE(ctx.cur_folio_in_bio);
folio_unlock(folio);
}
/*
@ -363,15 +363,15 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
loff_t done, ret;
for (done = 0; done < length; done += ret) {
if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
if (!ctx->cur_page_in_bio)
unlock_page(ctx->cur_page);
put_page(ctx->cur_page);
ctx->cur_page = NULL;
if (ctx->cur_folio &&
offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
if (!ctx->cur_folio_in_bio)
folio_unlock(ctx->cur_folio);
ctx->cur_folio = NULL;
}
if (!ctx->cur_page) {
ctx->cur_page = readahead_page(ctx->rac);
ctx->cur_page_in_bio = false;
if (!ctx->cur_folio) {
ctx->cur_folio = readahead_folio(ctx->rac);
ctx->cur_folio_in_bio = false;
}
ret = iomap_readpage_iter(iter, ctx, done);
if (ret <= 0)
@ -414,10 +414,9 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
if (ctx.bio)
submit_bio(ctx.bio);
if (ctx.cur_page) {
if (!ctx.cur_page_in_bio)
unlock_page(ctx.cur_page);
put_page(ctx.cur_page);
if (ctx.cur_folio) {
if (!ctx.cur_folio_in_bio)
folio_unlock(ctx.cur_folio);
}
}
EXPORT_SYMBOL_GPL(iomap_readahead);