f2fs: support in batch fzero in dnode page

This patch tries to speedup fzero_range by making space preallocation and
address removal of blocks in one dnode page as in batch operation.

In virtual machine, with zram driver:

dd if=/dev/zero of=/mnt/f2fs/file bs=1M count=4096
time xfs_io -f /mnt/f2fs/file -c "fzero 0 4096M"

Before:
real	0m3.276s
user	0m0.008s
sys	0m3.260s

After:
real	0m1.568s
user	0m0.000s
sys	0m1.564s

Signed-off-by: Chao Yu <yuchao0@huawei.com>
[Jaegeuk Kim: consider ENOSPC case]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2016-05-09 19:56:31 +08:00 committed by Jaegeuk Kim
parent 46008c6d42
commit 6e9619499f
1 changed files with 56 additions and 16 deletions

View File

@ -997,6 +997,49 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
return ret; return ret;
} }
static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
pgoff_t end)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
pgoff_t index = start;
unsigned int ofs_in_node = dn->ofs_in_node;
blkcnt_t count = 0;
int ret;
for (; index < end; index++, dn->ofs_in_node++) {
if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR)
count++;
}
dn->ofs_in_node = ofs_in_node;
ret = reserve_new_blocks(dn, count);
if (ret)
return ret;
dn->ofs_in_node = ofs_in_node;
for (index = start; index < end; index++, dn->ofs_in_node++) {
dn->data_blkaddr =
datablock_addr(dn->node_page, dn->ofs_in_node);
/*
* reserve_new_blocks will not guarantee entire block
* allocation.
*/
if (dn->data_blkaddr == NULL_ADDR) {
ret = -ENOSPC;
break;
}
if (dn->data_blkaddr != NEW_ADDR) {
invalidate_blocks(sbi, dn->data_blkaddr);
dn->data_blkaddr = NEW_ADDR;
set_data_blkaddr(dn);
}
}
f2fs_update_extent_cache_range(dn, start, 0, index - start);
return ret;
}
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
int mode) int mode)
{ {
@ -1047,35 +1090,32 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
(loff_t)pg_start << PAGE_SHIFT); (loff_t)pg_start << PAGE_SHIFT);
} }
for (index = pg_start; index < pg_end; index++) { for (index = pg_start; index < pg_end;) {
struct dnode_of_data dn; struct dnode_of_data dn;
struct page *ipage; unsigned int end_offset;
pgoff_t end;
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
ipage = get_node_page(sbi, inode->i_ino); set_new_dnode(&dn, inode, NULL, NULL, 0);
if (IS_ERR(ipage)) { ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
ret = PTR_ERR(ipage);
f2fs_unlock_op(sbi);
goto out;
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
ret = f2fs_reserve_block(&dn, index);
if (ret) { if (ret) {
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
goto out; goto out;
} }
if (dn.data_blkaddr != NEW_ADDR) { end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
invalidate_blocks(sbi, dn.data_blkaddr); end = min(pg_end, end_offset - dn.ofs_in_node + index);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
} ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (ret)
goto out;
index = end;
new_size = max_t(loff_t, new_size, new_size = max_t(loff_t, new_size,
(loff_t)(index + 1) << PAGE_SHIFT); (loff_t)index << PAGE_SHIFT);
} }
if (off_end) { if (off_end) {