f2fs: Detect looped node chain efficiently
find_fsync_dnodes() detect the looped node chain by comparing the loop counter with free blocks. While it may take tens of seconds to quit when the free blocks are large enough. We can use Floyd's cycle detection algorithm to make the detection more efficient. Signed-off-by: Chunhai Guo <guochunhai@vivo.com> Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
25f9080576
commit
38a4a330c8
|
@ -360,21 +360,63 @@ static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
|
|||
return ra_blocks;
|
||||
}
|
||||
|
||||
/* Detect looped node chain with Floyd's cycle detection algorithm. */
|
||||
static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr,
|
||||
block_t *blkaddr_fast, bool *is_detecting)
|
||||
{
|
||||
unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
|
||||
struct page *page = NULL;
|
||||
int i;
|
||||
|
||||
if (!*is_detecting)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (!f2fs_is_valid_blkaddr(sbi, *blkaddr_fast, META_POR)) {
|
||||
*is_detecting = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
page = f2fs_get_tmp_page(sbi, *blkaddr_fast);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
if (!is_recoverable_dnode(page)) {
|
||||
f2fs_put_page(page, 1);
|
||||
*is_detecting = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast,
|
||||
next_blkaddr_of_node(page));
|
||||
|
||||
*blkaddr_fast = next_blkaddr_of_node(page);
|
||||
f2fs_put_page(page, 1);
|
||||
|
||||
f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks);
|
||||
}
|
||||
|
||||
if (*blkaddr_fast == blkaddr) {
|
||||
f2fs_notice(sbi, "%s: Detect looped node chain on blkaddr:%u."
|
||||
" Run fsck to fix it.", __func__, blkaddr);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
|
||||
bool check_only)
|
||||
{
|
||||
struct curseg_info *curseg;
|
||||
struct page *page = NULL;
|
||||
block_t blkaddr;
|
||||
unsigned int loop_cnt = 0;
|
||||
unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
|
||||
unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
|
||||
valid_user_blocks(sbi);
|
||||
block_t blkaddr, blkaddr_fast;
|
||||
bool is_detecting = true;
|
||||
int err = 0;
|
||||
|
||||
/* get node pages in the current segment */
|
||||
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
|
||||
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
||||
blkaddr_fast = blkaddr;
|
||||
|
||||
while (1) {
|
||||
struct fsync_inode_entry *entry;
|
||||
|
@ -431,25 +473,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
|
|||
if (IS_INODE(page) && is_dent_dnode(page))
|
||||
entry->last_dentry = blkaddr;
|
||||
next:
|
||||
/* sanity check in order to detect looped node chain */
|
||||
if (++loop_cnt >= free_blocks ||
|
||||
blkaddr == next_blkaddr_of_node(page)) {
|
||||
f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
|
||||
__func__, blkaddr,
|
||||
next_blkaddr_of_node(page));
|
||||
f2fs_put_page(page, 1);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr,
|
||||
next_blkaddr_of_node(page));
|
||||
|
||||
/* check next segment */
|
||||
blkaddr = next_blkaddr_of_node(page);
|
||||
f2fs_put_page(page, 1);
|
||||
|
||||
f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
|
||||
err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast,
|
||||
&is_detecting);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue