staging: erofs: introduce erofs_page_is_managed()

1) In order to clean up unnecessary
   page->mapping == MNGD_MAPPING(sbi) wrapped by #ifdefs;

2) Needed by "staging: erofs: support IO read error injection".

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Gao Xiang 2019-03-25 11:40:08 +08:00 committed by Greg Kroah-Hartman
parent 1115249602
commit d61fbb6b16
2 changed files with 16 additions and 22 deletions

View File

@ -268,8 +268,15 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page);
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page)
{
return page->mapping == MNGD_MAPPING(sbi);
}
#else
#define MNGD_MAPPING(sbi) (NULL)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page) { return false; }
#endif
#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3

View File

@ -845,11 +845,9 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
static inline void z_erofs_vle_read_endio(struct bio *bio)
{
const blk_status_t err = bio->bi_status;
struct erofs_sb_info *sbi = NULL;
unsigned int i;
struct bio_vec *bvec;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
struct address_space *mc = NULL;
#endif
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, i, iter_all) {
@ -859,20 +857,12 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
DBG_BUGON(PageUptodate(page));
DBG_BUGON(!page->mapping);
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
struct inode *const inode = page->mapping->host;
struct super_block *const sb = inode->i_sb;
if (unlikely(!sbi && !z_erofs_is_stagingpage(page)))
sbi = EROFS_SB(page->mapping->host->i_sb);
mc = MNGD_MAPPING(EROFS_SB(sb));
}
/*
* If mc has not gotten, it equals NULL,
* however, page->mapping never be NULL if working properly.
*/
cachemngd = (page->mapping == mc);
#endif
/* sbi should already be gotten if the page is managed */
if (sbi)
cachemngd = erofs_page_is_managed(sbi, page);
if (unlikely(err))
SetPageError(page);
@ -984,13 +974,11 @@ repeat:
DBG_BUGON(!page->mapping);
if (!z_erofs_is_stagingpage(page)) {
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (page->mapping == MNGD_MAPPING(sbi)) {
if (erofs_page_is_managed(sbi, page)) {
if (unlikely(!PageUptodate(page)))
err = -EIO;
continue;
}
#endif
/*
* only if non-head page can be selected
@ -1055,10 +1043,9 @@ out:
for (i = 0; i < clusterpages; ++i) {
page = compressed_pages[i];
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (page->mapping == MNGD_MAPPING(sbi))
if (erofs_page_is_managed(sbi, page))
continue;
#endif
/* recycle all individual staging pages */
(void)z_erofs_gather_if_stagingpage(page_pool, page);