erofs: remove dead code since managed cache is now built-in
After commit 4279f3f988
("staging: erofs: turn cache
strategies into mount options"), cache strategies are
changed into mount options rather than old build configs.
Let's kill useless code for obsoleted build options.
Link: https://lore.kernel.org/r/20191008125616.183715-2-gaoxiang25@huawei.com
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
This commit is contained in:
parent
9e579fc123
commit
bda17a4577
|
@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
|
|||
}
|
||||
|
||||
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
|
||||
struct erofs_workgroup *grp,
|
||||
bool cleanup)
|
||||
struct erofs_workgroup *grp)
|
||||
{
|
||||
/*
|
||||
* If managed cache is on, refcount of workgroups
|
||||
|
@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
|
|||
}
|
||||
|
||||
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
|
||||
unsigned long nr_shrink,
|
||||
bool cleanup)
|
||||
unsigned long nr_shrink)
|
||||
{
|
||||
pgoff_t first_index = 0;
|
||||
void *batch[PAGEVEC_SIZE];
|
||||
|
@ -208,7 +206,7 @@ repeat:
|
|||
first_index = grp->index + 1;
|
||||
|
||||
/* try to shrink each valid workgroup */
|
||||
if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
|
||||
if (!erofs_try_to_release_workgroup(sbi, grp))
|
||||
continue;
|
||||
|
||||
++freed;
|
||||
|
@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
|
|||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
||||
|
||||
mutex_lock(&sbi->umount_mutex);
|
||||
erofs_shrink_workstation(sbi, ~0UL, true);
|
||||
/* clean up all remaining workgroups in memory */
|
||||
erofs_shrink_workstation(sbi, ~0UL);
|
||||
|
||||
spin_lock(&erofs_sb_list_lock);
|
||||
list_del(&sbi->list);
|
||||
|
@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
|
|||
spin_unlock(&erofs_sb_list_lock);
|
||||
sbi->shrinker_run_no = run_no;
|
||||
|
||||
freed += erofs_shrink_workstation(sbi, nr, false);
|
||||
freed += erofs_shrink_workstation(sbi, nr);
|
||||
|
||||
spin_lock(&erofs_sb_list_lock);
|
||||
/* Get the next list element before we move this one */
|
||||
|
|
|
@ -574,7 +574,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
|||
struct list_head *pagepool)
|
||||
{
|
||||
struct inode *const inode = fe->inode;
|
||||
struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode);
|
||||
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
||||
struct erofs_map_blocks *const map = &fe->map;
|
||||
struct z_erofs_collector *const clt = &fe->clt;
|
||||
const loff_t offset = page_offset(page);
|
||||
|
@ -997,8 +997,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
|
|||
struct address_space *mc,
|
||||
gfp_t gfp)
|
||||
{
|
||||
/* determined at compile time to avoid too many #ifdefs */
|
||||
const bool nocache = __builtin_constant_p(mc) ? !mc : false;
|
||||
const pgoff_t index = pcl->obj.index;
|
||||
bool tocache = false;
|
||||
|
||||
|
@ -1019,7 +1017,7 @@ repeat:
|
|||
* the cached page has not been allocated and
|
||||
* an placeholder is out there, prepare it now.
|
||||
*/
|
||||
if (!nocache && page == PAGE_UNALLOCATED) {
|
||||
if (page == PAGE_UNALLOCATED) {
|
||||
tocache = true;
|
||||
goto out_allocpage;
|
||||
}
|
||||
|
@ -1031,21 +1029,6 @@ repeat:
|
|||
|
||||
mapping = READ_ONCE(page->mapping);
|
||||
|
||||
/*
|
||||
* if managed cache is disabled, it's no way to
|
||||
* get such a cached-like page.
|
||||
*/
|
||||
if (nocache) {
|
||||
/* if managed cache is disabled, it is impossible `justfound' */
|
||||
DBG_BUGON(justfound);
|
||||
|
||||
/* and it should be locked, not uptodate, and not truncated */
|
||||
DBG_BUGON(!PageLocked(page));
|
||||
DBG_BUGON(PageUptodate(page));
|
||||
DBG_BUGON(!mapping);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* unmanaged (file) pages are all locked solidly,
|
||||
* therefore it is impossible for `mapping' to be NULL.
|
||||
|
@ -1102,7 +1085,7 @@ out_allocpage:
|
|||
cpu_relax();
|
||||
goto repeat;
|
||||
}
|
||||
if (nocache || !tocache)
|
||||
if (!tocache)
|
||||
goto out;
|
||||
if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
|
||||
page->mapping = Z_EROFS_MAPPING_STAGING;
|
||||
|
@ -1208,7 +1191,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
|
|||
struct z_erofs_unzip_io *fgq,
|
||||
bool force_fg)
|
||||
{
|
||||
struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
||||
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
|
||||
struct z_erofs_unzip_io *q[NR_JOBQUEUES];
|
||||
struct bio *bio;
|
||||
|
|
Loading…
Reference in New Issue