f2fs: compress: don't redirty sparse cluster during {,de}compress

[ Upstream commit f785cec298c95d00058560c0715233294a04b8f3 ]

In f2fs_do_write_data_page, when the data block is NULL_ADDR, it skips
writepage considering that it has been already truncated.
This results in an infinite loop as the PAGECACHE_TAG_TOWRITE tag is not
cleared during the writeback process for a compressed file including
NULL_ADDR in compress_mode=user.

This is the reproduction process:

1. dd if=/dev/zero bs=4096 count=1024 seek=1024 of=testfile
2. f2fs_io compress testfile
3. dd if=/dev/zero bs=4096 count=1 conv=notrunc of=testfile
4. f2fs_io decompress testfile

To prevent the problem, let's check whether the cluster is fully
allocated before redirty its pages.

Fixes: 5fdb322ff2 ("f2fs: add F2FS_IOC_DECOMPRESS_FILE and F2FS_IOC_COMPRESS_FILE")
Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
Reviewed-by: Sunmin Jeong <s_min.jeong@samsung.com>
Tested-by: Jaewook Kim <jw5454.kim@samsung.com>
Signed-off-by: Yeongjin Gil <youngjin.gil@samsung.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Yeongjin Gil 2024-08-19 17:34:30 +09:00 committed by Greg Kroah-Hartman
parent 4263b3ef81
commit b6f186bd6a
3 changed files with 61 additions and 26 deletions

View File

@ -945,7 +945,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
int count, i;
for (i = 1, count = 1; i < cluster_size; i++) {
for (i = 0, count = 0; i < cluster_size; i++) {
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
dn->ofs_in_node + i);
@ -956,8 +956,8 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
return count;
}
static int __f2fs_cluster_blocks(struct inode *inode,
unsigned int cluster_idx, bool compr_blks)
static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
enum cluster_check_type type)
{
struct dnode_of_data dn;
unsigned int start_idx = cluster_idx <<
@ -978,10 +978,12 @@ static int __f2fs_cluster_blocks(struct inode *inode,
}
if (dn.data_blkaddr == COMPRESS_ADDR) {
if (compr_blks)
ret = __f2fs_get_cluster_blocks(inode, &dn);
else
if (type == CLUSTER_COMPR_BLKS)
ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
else if (type == CLUSTER_IS_COMPR)
ret = 1;
} else if (type == CLUSTER_RAW_BLKS) {
ret = __f2fs_get_cluster_blocks(inode, &dn);
}
fail:
f2fs_put_dnode(&dn);
@ -991,7 +993,16 @@ fail:
/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{
return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
CLUSTER_COMPR_BLKS);
}
/* return # of raw blocks in non-compressed cluster */
static int f2fs_decompressed_blocks(struct inode *inode,
unsigned int cluster_idx)
{
return __f2fs_cluster_blocks(inode, cluster_idx,
CLUSTER_RAW_BLKS);
}
/* return whether cluster is compressed one or not */
@ -999,7 +1010,16 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
return __f2fs_cluster_blocks(inode,
index >> F2FS_I(inode)->i_log_cluster_size,
false);
CLUSTER_IS_COMPR);
}
/* return whether cluster contains non raw blocks or not */
bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
{
unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
return f2fs_decompressed_blocks(inode, cluster_idx) !=
F2FS_I(inode)->i_cluster_size;
}
static bool cluster_may_compress(struct compress_ctx *cc)

View File

@ -4284,6 +4284,11 @@ static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
* compress.c
*/
#ifdef CONFIG_F2FS_FS_COMPRESSION
enum cluster_check_type {
CLUSTER_IS_COMPR, /* check only if compressed cluster */
CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
};
bool f2fs_is_compressed_page(struct page *page);
struct page *f2fs_compress_control_page(struct page *page);
int f2fs_prepare_compress_overwrite(struct inode *inode,
@ -4310,6 +4315,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
struct writeback_control *wbc,
enum iostat_type io_type);
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
pgoff_t fofs, block_t blkaddr,
unsigned int llen, unsigned int c_len);
@ -4396,6 +4402,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
static inline int f2fs_is_compressed_cluster(
struct inode *inode,
pgoff_t index) { return 0; }
static inline bool f2fs_is_sparse_cluster(
struct inode *inode,
pgoff_t index) { return true; }
static inline void f2fs_update_read_extent_tree_range_compressed(
struct inode *inode,
pgoff_t fofs, block_t blkaddr,

View File

@ -4182,9 +4182,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t page_idx = 0, last_idx;
int cluster_size = fi->i_cluster_size;
int count, ret;
pgoff_t page_idx = 0, last_idx, cluster_idx;
int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@ -4217,10 +4216,15 @@ static int f2fs_ioc_decompress_file(struct file *filp)
goto out;
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
last_idx >>= fi->i_log_cluster_size;
count = last_idx - page_idx;
while (count && count >= cluster_size) {
ret = redirty_blocks(inode, page_idx, cluster_size);
for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
page_idx = cluster_idx << fi->i_log_cluster_size;
if (!f2fs_is_compressed_cluster(inode, page_idx))
continue;
ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@ -4230,9 +4234,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
break;
}
count -= cluster_size;
page_idx += cluster_size;
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
@ -4258,9 +4259,9 @@ static int f2fs_ioc_compress_file(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t page_idx = 0, last_idx;
int cluster_size = F2FS_I(inode)->i_cluster_size;
int count, ret;
struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t page_idx = 0, last_idx, cluster_idx;
int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@ -4292,10 +4293,15 @@ static int f2fs_ioc_compress_file(struct file *filp)
set_inode_flag(inode, FI_ENABLE_COMPRESS);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
last_idx >>= fi->i_log_cluster_size;
count = last_idx - page_idx;
while (count && count >= cluster_size) {
ret = redirty_blocks(inode, page_idx, cluster_size);
for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
page_idx = cluster_idx << fi->i_log_cluster_size;
if (f2fs_is_sparse_cluster(inode, page_idx))
continue;
ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@ -4305,9 +4311,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
break;
}
count -= cluster_size;
page_idx += cluster_size;
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;