btrfs: Exploit the fact that pages passed to extent_readpages are always contiguous
Currently extent_readpages (called from btrfs_readpages) will always call __extent_readpages which tries to create contiguous range of pages and call __do_contiguous_readpages when such contiguous range is created. It turns out this is unnecessary due to the fact that generic MM code always calls filesystem's ->readpages callback (btrfs_readpages in this case) with already contiguous pages. Armed with this knowledge it's possible to simplify extent_readpages by eliminating the call to __extent_readpages and directly calling contiguous_readpages. The only edge case that needs to be handled is when add_to_page_cache_lru fails. This is easy as all that is needed is to submit whatever is the number of pages successfully added to the lru. This can happen when the page is already in the range, so it does not need to be read again, and we can't do anything else in case of other errors. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
ed1b4ed79d
commit
e65ef21ed8
|
@ -3076,7 +3076,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
||||
static inline void contiguous_readpages(struct extent_io_tree *tree,
|
||||
struct page *pages[], int nr_pages,
|
||||
u64 start, u64 end,
|
||||
struct extent_map **em_cached,
|
||||
|
@ -3107,46 +3107,6 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
|||
}
|
||||
}
|
||||
|
||||
static void __extent_readpages(struct extent_io_tree *tree,
|
||||
struct page *pages[],
|
||||
int nr_pages,
|
||||
struct extent_map **em_cached,
|
||||
struct bio **bio, unsigned long *bio_flags,
|
||||
u64 *prev_em_start)
|
||||
{
|
||||
u64 start = 0;
|
||||
u64 end = 0;
|
||||
u64 page_start;
|
||||
int index;
|
||||
int first_index = 0;
|
||||
|
||||
for (index = 0; index < nr_pages; index++) {
|
||||
page_start = page_offset(pages[index]);
|
||||
if (!end) {
|
||||
start = page_start;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
first_index = index;
|
||||
} else if (end + 1 == page_start) {
|
||||
end += PAGE_SIZE;
|
||||
} else {
|
||||
__do_contiguous_readpages(tree, &pages[first_index],
|
||||
index - first_index, start,
|
||||
end, em_cached,
|
||||
bio, bio_flags,
|
||||
prev_em_start);
|
||||
start = page_start;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
first_index = index;
|
||||
}
|
||||
}
|
||||
|
||||
if (end)
|
||||
__do_contiguous_readpages(tree, &pages[first_index],
|
||||
index - first_index, start,
|
||||
end, em_cached, bio,
|
||||
bio_flags, prev_em_start);
|
||||
}
|
||||
|
||||
static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||
struct page *page,
|
||||
get_extent_t *get_extent,
|
||||
|
@ -4109,6 +4069,8 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
|
|||
u64 prev_em_start = (u64)-1;
|
||||
|
||||
while (!list_empty(pages)) {
|
||||
u64 contig_end = 0;
|
||||
|
||||
for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
|
||||
struct page *page = lru_to_page(pages);
|
||||
|
||||
|
@ -4117,14 +4079,22 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
|
|||
if (add_to_page_cache_lru(page, mapping, page->index,
|
||||
readahead_gfp_mask(mapping))) {
|
||||
put_page(page);
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
||||
pagepool[nr++] = page;
|
||||
contig_end = page_offset(page) + PAGE_SIZE - 1;
|
||||
}
|
||||
|
||||
__extent_readpages(tree, pagepool, nr, &em_cached, &bio,
|
||||
&bio_flags, &prev_em_start);
|
||||
if (nr) {
|
||||
u64 contig_start = page_offset(pagepool[0]);
|
||||
|
||||
ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
|
||||
|
||||
contiguous_readpages(tree, pagepool, nr, contig_start,
|
||||
contig_end, &em_cached, &bio, &bio_flags,
|
||||
&prev_em_start);
|
||||
}
|
||||
}
|
||||
|
||||
if (em_cached)
|
||||
|
|
Loading…
Reference in New Issue