From 0c638f70d7310f961a3482108c9d7ce15fcba8b3 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 8 Nov 2019 11:37:33 +0800 Subject: [PATCH] erofs: drop all vle annotations for runtime names VLE was an old informal name of fixed-sized output compression which came from published ATC'19 paper [1]. Drop those old annotations since erofs can handle all encoded clusters in block-aligned basis, which is wider than fixed-sized output compression after larger clustersize feature is fully implemented. Unaligned encoding won't be considered in EROFS since it's not friendly to inplace I/O and perhaps decompression inplace. a) Fixed-sized output compression with 16KB pcluster: ___________________________________ |xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx| |___ 0___|___ 1___|___ 2___|___ 3___| physical blocks b) Block-aligned fixed-sized input compression with 16KB pcluster: ___________________________________ |xxxxxxxx|xxxxxxxx|xxxxxxxx|xxx00000| |___ 0___|___ 1___|___ 2___|___ 3___| physical blocks c) Block-unaligned fixed-sized input compression with 16KB compression unit: ____________________________________________ |..xxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|x.......| |___ 0___|___ 1___|___ 2___|___ 3___|___ 4___| physical blocks Refine better names for those as well. [1] https://www.usenix.org/conference/atc19/presentation/gao Link: https://lore.kernel.org/r/20191108033733.63919-1-gaoxiang25@huawei.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/internal.h | 4 +-- fs/erofs/zdata.c | 62 +++++++++++++++++++++------------------------ fs/erofs/zmap.c | 28 ++++++++++---------- 3 files changed, 44 insertions(+), 50 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 6864fa4893e9..1ed5beff7d11 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -279,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value) extern const struct super_operations erofs_sops; extern const struct address_space_operations erofs_raw_access_aops; -#ifdef CONFIG_EROFS_FS_ZIP -extern const struct address_space_operations z_erofs_vle_normalaccess_aops; -#endif +extern const struct address_space_operations z_erofs_aops; /* * Logical to physical block mapping, used by erofs_map_blocks() diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 35bf6879d3a6..ca99425a4536 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -711,7 +711,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, queue_work(z_erofs_workqueue, &io->u.work); } -static void z_erofs_vle_read_endio(struct bio *bio) +static void z_erofs_decompressqueue_endio(struct bio *bio) { tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); @@ -939,8 +939,8 @@ out: return err; } -static void z_erofs_vle_unzip_all(const struct z_erofs_decompressqueue *io, - struct list_head *pagepool) +static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, + struct list_head *pagepool) { z_erofs_next_pcluster_t owned = io->head; @@ -960,14 +960,14 @@ static void z_erofs_vle_unzip_all(const struct z_erofs_decompressqueue *io, } } -static void z_erofs_vle_unzip_wq(struct work_struct *work) +static void z_erofs_decompressqueue_work(struct work_struct *work) { struct z_erofs_decompressqueue *bgq = container_of(work, struct z_erofs_decompressqueue, u.work); LIST_HEAD(pagepool); DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - z_erofs_vle_unzip_all(bgq, &pagepool); + z_erofs_decompress_queue(bgq, &pagepool); put_pages_list(&pagepool); kvfree(bgq); @@ -1097,7 +1097,7 @@ jobqueue_init(struct super_block *sb, *fg = true; goto fg_out; } - INIT_WORK(&q->u.work, z_erofs_vle_unzip_wq); + INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); } else { fg_out: q = fgq; @@ -1163,11 +1163,11 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[], return true; } -static bool z_erofs_vle_submit_all(struct super_block *sb, - z_erofs_next_pcluster_t owned_head, - struct list_head *pagepool, - struct z_erofs_decompressqueue *fgq, - bool *force_fg) +static bool z_erofs_submit_queue(struct super_block *sb, + z_erofs_next_pcluster_t owned_head, + struct list_head *pagepool, + struct z_erofs_decompressqueue *fgq, + bool *force_fg) { struct erofs_sb_info *const sbi = EROFS_SB(sb); z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; @@ -1234,7 +1234,7 @@ submit_bio_retry: if (!bio) { bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); - bio->bi_end_io = z_erofs_vle_read_endio; + bio->bi_end_io = z_erofs_decompressqueue_endio; bio_set_dev(bio, sb->s_bdev); bio->bi_iter.bi_sector = (sector_t)(first_index + i) << LOG_SECTORS_PER_BLOCK; @@ -1270,19 +1270,18 @@ skippage: return true; } -static void z_erofs_submit_and_unzip(struct super_block *sb, - struct z_erofs_collector *clt, - struct list_head *pagepool, - bool force_fg) +static void z_erofs_runqueue(struct super_block *sb, + struct z_erofs_collector *clt, + struct list_head *pagepool, bool force_fg) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; - if (!z_erofs_vle_submit_all(sb, clt->owned_head, - pagepool, io, &force_fg)) + if (!z_erofs_submit_queue(sb, clt->owned_head, + pagepool, io, &force_fg)) return; - /* decompress no I/O pclusters immediately */ - z_erofs_vle_unzip_all(&io[JQ_BYPASS], pagepool); + /* handle bypass queue (no i/o pclusters) immediately */ + z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); if (!force_fg) return; @@ -1291,12 +1290,11 @@ static void z_erofs_submit_and_unzip(struct super_block *sb, io_wait_event(io[JQ_SUBMIT].u.wait, !atomic_read(&io[JQ_SUBMIT].pending_bios)); - /* let's synchronous decompression */ - z_erofs_vle_unzip_all(&io[JQ_SUBMIT], pagepool); + /* handle synchronous decompress queue in the caller context */ + z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); } -static int z_erofs_vle_normalaccess_readpage(struct file *file, - struct page *page) +static int z_erofs_readpage(struct file *file, struct page *page) { struct inode *const inode = page->mapping->host; struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); @@ -1311,7 +1309,7 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file, (void)z_erofs_collector_end(&f.clt); /* if some compressed cluster ready, need submit them anyway */ - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); + z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true); if (err) erofs_err(inode->i_sb, "failed to read, err [%d]", err); @@ -1330,10 +1328,8 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi, return nr <= sbi->max_sync_decompress_pages; } -static int z_erofs_vle_normalaccess_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) +static int z_erofs_readpages(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned int nr_pages) { struct inode *const inode = mapping->host; struct erofs_sb_info *const sbi = EROFS_I_SB(inode); @@ -1388,7 +1384,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, (void)z_erofs_collector_end(&f.clt); - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); + z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync); if (f.map.mpage) put_page(f.map.mpage); @@ -1398,8 +1394,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, return 0; } -const struct address_space_operations z_erofs_vle_normalaccess_aops = { - .readpage = z_erofs_vle_normalaccess_readpage, - .readpages = z_erofs_vle_normalaccess_readpages, +const struct address_space_operations z_erofs_aops = { + .readpage = z_erofs_readpage, + .readpages = z_erofs_readpages, }; diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 6a26c293ae2d..736db3a4cdef 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode) set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); } - inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; + inode->i_mapping->a_ops = &z_erofs_aops; return 0; } -static int fill_inode_lazy(struct inode *inode) +static int z_erofs_fill_inode_lazy(struct inode *inode) { struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = inode->i_sb; @@ -138,8 +138,8 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, return 0; } -static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) +static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) { struct inode *const inode = m->inode; struct erofs_inode *const vi = EROFS_I(inode); @@ -311,13 +311,13 @@ out: return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); } -static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned int lcn) +static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned int lcn) { const unsigned int datamode = EROFS_I(m->inode)->datalayout; if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) - return vle_legacy_load_cluster_from_disk(m, lcn); + return legacy_load_cluster_from_disk(m, lcn); if (datamode == EROFS_INODE_FLAT_COMPRESSION) return compacted_load_cluster_from_disk(m, lcn); @@ -325,8 +325,8 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, return -EINVAL; } -static int vle_extent_lookback(struct z_erofs_maprecorder *m, - unsigned int lookback_distance) +static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, + unsigned int lookback_distance) { struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_map_blocks *const map = m->map; @@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, /* load extent head logical cluster if needed */ lcn -= lookback_distance; - err = vle_load_cluster_from_disk(m, lcn); + err = z_erofs_load_cluster_from_disk(m, lcn); if (err) return err; @@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, DBG_BUGON(1); return -EFSCORRUPTED; } - return vle_extent_lookback(m, m->delta[0]); + return z_erofs_extent_lookback(m, m->delta[0]); case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: map->m_flags &= ~EROFS_MAP_ZIPPED; /* fallthrough */ @@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, goto out; } - err = fill_inode_lazy(inode); + err = z_erofs_fill_inode_lazy(inode); if (err) goto out; @@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, m.lcn = ofs >> lclusterbits; endoff = ofs & ((1 << lclusterbits) - 1); - err = vle_load_cluster_from_disk(&m, m.lcn); + err = z_erofs_load_cluster_from_disk(&m, m.lcn); if (err) goto unmap_out; @@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, /* fallthrough */ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: /* get the correspoinding first chunk */ - err = vle_extent_lookback(&m, m.delta[0]); + err = z_erofs_extent_lookback(&m, m.delta[0]); if (err) goto unmap_out; break;