f2fs: introduce __get_node_page to reuse common code

There are duplicated code in between get_node_page and get_node_page_ra,
introduce __get_node_page to includes common parts of these two, and
export get_node_page and get_node_page_ra by reusing __get_node_page.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2016-01-05 16:52:46 +08:00 committed by Jaegeuk Kim
parent e84587250a
commit 0e022ea8fc
1 changed files with 35 additions and 53 deletions

View File

@ -1060,7 +1060,31 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
f2fs_put_page(apage, err ? 1 : 0);
}
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
/*
* readahead MAX_RA_NODE number of node pages.
*/
void ra_node_pages(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
struct blk_plug plug;
int i, end;
nid_t nid;
blk_start_plug(&plug);
/* Then, try readahead for siblings of the desired node */
end = start + MAX_RA_NODE;
end = min(end, NIDS_PER_BLOCK);
for (i = start; i < end; i++) {
nid = get_nid(parent, i, false);
ra_node_page(sbi, nid);
}
blk_finish_plug(&plug);
}
struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
struct page *parent, int start)
{
struct page *page;
int err;
@ -1081,6 +1105,9 @@ repeat:
goto page_hit;
}
if (parent)
ra_node_pages(parent, start + 1);
lock_page(page);
if (unlikely(!PageUptodate(page))) {
@ -1096,62 +1123,17 @@ page_hit:
return page;
}
/*
* Return a locked page for the desired node page.
* And, readahead MAX_RA_NODE number of node pages.
*/
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
{
return __get_node_page(sbi, nid, NULL, 0);
}
struct page *get_node_page_ra(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
struct blk_plug plug;
struct page *page;
int err, i, end;
nid_t nid;
nid_t nid = get_nid(parent, start, false);
/* First, try getting the desired direct node. */
nid = get_nid(parent, start, false);
if (!nid)
return ERR_PTR(-ENOENT);
f2fs_bug_on(sbi, check_nid_range(sbi, nid));
repeat:
page = grab_cache_page(NODE_MAPPING(sbi), nid);
if (!page)
return ERR_PTR(-ENOMEM);
err = read_node_page(page, READ_SYNC);
if (err < 0) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
} else if (err == LOCKED_PAGE) {
goto page_hit;
}
blk_start_plug(&plug);
/* Then, try readahead for siblings of the desired node */
end = start + MAX_RA_NODE;
end = min(end, NIDS_PER_BLOCK);
for (i = start + 1; i < end; i++) {
nid_t tnid;
tnid = get_nid(parent, i, false);
ra_node_page(sbi, tnid);
}
blk_finish_plug(&plug);
lock_page(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
f2fs_put_page(page, 1);
goto repeat;
}
page_hit:
f2fs_bug_on(sbi, nid != nid_of_node(page));
return page;
return __get_node_page(sbi, nid, parent, start);
}
void sync_inode_page(struct dnode_of_data *dn)