staging: erofs: refine compressed pages preload flow
Currently, there are two kinds of compressed pages in erofs: 1) file pages for the in-place decompression and 2) managed pages for cached decompression. Both are all stored in grp->compressed_pages[]. For managed pages, they could already exist or could be preloaded in this round, including the following cases in detail: 1) Already valid (loaded in some previous round); 2) PAGE_UNALLOCATED, should be allocated at the time of submission; 3) Just found in the managed cache, and with an extra page ref. Currently, 1) and 3) can be distinguishable by lock_page and checking its PG_private, which is guaranteed by the reclaim path, but it's better to do a double check by using an extra tag. This patch reworks the preload flow by introducing such the tag by using tagged pointer, too many #ifdefs are removed as well. Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
9248fce714
commit
92e6efd566
|
@ -21,6 +21,21 @@
|
||||||
*/
|
*/
|
||||||
#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
|
#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
|
||||||
|
|
||||||
|
/* how to allocate cached pages for a workgroup */
|
||||||
|
enum z_erofs_cache_alloctype {
|
||||||
|
DONTALLOC, /* don't allocate any cached pages */
|
||||||
|
DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* tagged pointer with 1-bit tag for all compressed pages
|
||||||
|
* tag 0 - the page is just found with an extra page reference
|
||||||
|
*/
|
||||||
|
typedef tagptr1_t compressed_page_t;
|
||||||
|
|
||||||
|
#define tag_compressed_page_justfound(page) \
|
||||||
|
tagptr_fold(compressed_page_t, page, 1)
|
||||||
|
|
||||||
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
|
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
|
||||||
static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
|
static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
|
||||||
|
|
||||||
|
@ -131,38 +146,58 @@ struct z_erofs_vle_work_builder {
|
||||||
{ .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
|
{ .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
|
||||||
|
|
||||||
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
||||||
|
static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
|
||||||
static bool grab_managed_cache_pages(struct address_space *mapping,
|
struct address_space *mc,
|
||||||
erofs_blk_t start,
|
pgoff_t index,
|
||||||
struct page **compressed_pages,
|
unsigned int clusterpages,
|
||||||
int clusterblks,
|
enum z_erofs_cache_alloctype type,
|
||||||
bool reserve_allocation)
|
struct list_head *pagepool,
|
||||||
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
bool noio = true;
|
struct page **const pages = bl->compressed_pages;
|
||||||
unsigned int i;
|
const unsigned int remaining = bl->compressed_deficit;
|
||||||
|
bool standalone = true;
|
||||||
|
unsigned int i, j = 0;
|
||||||
|
|
||||||
/* TODO: optimize by introducing find_get_pages_range */
|
if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
|
||||||
for (i = 0; i < clusterblks; ++i) {
|
return;
|
||||||
struct page *page, *found;
|
|
||||||
|
|
||||||
if (READ_ONCE(compressed_pages[i]))
|
gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
|
||||||
|
|
||||||
|
index += clusterpages - remaining;
|
||||||
|
|
||||||
|
for (i = 0; i < remaining; ++i) {
|
||||||
|
struct page *page;
|
||||||
|
compressed_page_t t;
|
||||||
|
|
||||||
|
/* the compressed page was loaded before */
|
||||||
|
if (READ_ONCE(pages[i]))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
page = found = find_get_page(mapping, start + i);
|
page = find_get_page(mc, index + i);
|
||||||
if (!found) {
|
|
||||||
noio = false;
|
if (page) {
|
||||||
if (!reserve_allocation)
|
t = tag_compressed_page_justfound(page);
|
||||||
|
} else if (type == DELAYEDALLOC) {
|
||||||
|
t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
|
||||||
|
} else { /* DONTALLOC */
|
||||||
|
if (standalone)
|
||||||
|
j = i;
|
||||||
|
standalone = false;
|
||||||
continue;
|
continue;
|
||||||
page = PAGE_UNALLOCATED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cmpxchg(compressed_pages + i, NULL, page))
|
if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (found)
|
if (page)
|
||||||
put_page(found);
|
put_page(page);
|
||||||
}
|
}
|
||||||
return noio;
|
bl->compressed_pages += j;
|
||||||
|
bl->compressed_deficit = remaining - j;
|
||||||
|
|
||||||
|
if (standalone)
|
||||||
|
bl->role = Z_EROFS_VLE_WORK_PRIMARY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called by erofs_shrinker to get rid of all compressed_pages */
|
/* called by erofs_shrinker to get rid of all compressed_pages */
|
||||||
|
@ -234,6 +269,17 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
|
||||||
|
struct address_space *mc,
|
||||||
|
pgoff_t index,
|
||||||
|
unsigned int clusterpages,
|
||||||
|
enum z_erofs_cache_alloctype type,
|
||||||
|
struct list_head *pagepool,
|
||||||
|
gfp_t gfp)
|
||||||
|
{
|
||||||
|
/* nowhere to load compressed pages from */
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
|
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
|
||||||
|
@ -608,6 +654,26 @@ struct z_erofs_vle_frontend {
|
||||||
.owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
|
.owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
|
||||||
.backmost = true, }
|
.backmost = true, }
|
||||||
|
|
||||||
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
||||||
|
static inline bool
|
||||||
|
should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
|
||||||
|
{
|
||||||
|
if (fe->backmost)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (EROFS_FS_ZIP_CACHE_LVL >= 2)
|
||||||
|
return la < fe->headoffset;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline bool
|
||||||
|
should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
|
static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
|
||||||
struct page *page,
|
struct page *page,
|
||||||
struct list_head *page_pool)
|
struct list_head *page_pool)
|
||||||
|
@ -622,12 +688,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
|
||||||
bool tight = builder_is_followed(builder);
|
bool tight = builder_is_followed(builder);
|
||||||
struct z_erofs_vle_work *work = builder->work;
|
struct z_erofs_vle_work *work = builder->work;
|
||||||
|
|
||||||
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
enum z_erofs_cache_alloctype cache_strategy;
|
||||||
struct address_space *const mc = MNGD_MAPPING(sbi);
|
|
||||||
struct z_erofs_vle_workgroup *grp;
|
|
||||||
bool noio_outoforder;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
enum z_erofs_page_type page_type;
|
enum z_erofs_page_type page_type;
|
||||||
unsigned int cur, end, spiltted, index;
|
unsigned int cur, end, spiltted, index;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
@ -667,20 +728,16 @@ repeat:
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
/* preload all compressed pages (maybe downgrade role if necessary) */
|
||||||
grp = fe->builder.grp;
|
if (should_alloc_managed_pages(fe, map->m_la))
|
||||||
|
cache_strategy = DELAYEDALLOC;
|
||||||
|
else
|
||||||
|
cache_strategy = DONTALLOC;
|
||||||
|
|
||||||
/* let's do out-of-order decompression for noio */
|
preload_compressed_pages(builder, MNGD_MAPPING(sbi),
|
||||||
noio_outoforder = grab_managed_cache_pages(mc,
|
map->m_pa / PAGE_SIZE,
|
||||||
erofs_blknr(map->m_pa),
|
map->m_plen / PAGE_SIZE,
|
||||||
grp->compressed_pages, erofs_blknr(map->m_plen),
|
cache_strategy, page_pool, GFP_KERNEL);
|
||||||
/* compressed page caching selection strategy */
|
|
||||||
fe->backmost | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
|
|
||||||
map->m_la < fe->headoffset : 0));
|
|
||||||
|
|
||||||
if (noio_outoforder && builder_is_followed(builder))
|
|
||||||
builder->role = Z_EROFS_VLE_WORK_PRIMARY;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
tight &= builder_is_followed(builder);
|
tight &= builder_is_followed(builder);
|
||||||
work = builder->work;
|
work = builder->work;
|
||||||
|
@ -1062,6 +1119,9 @@ pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
struct page *oldpage, *page;
|
struct page *oldpage, *page;
|
||||||
|
|
||||||
|
compressed_page_t t;
|
||||||
|
int justfound;
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
page = READ_ONCE(grp->compressed_pages[nr]);
|
page = READ_ONCE(grp->compressed_pages[nr]);
|
||||||
oldpage = page;
|
oldpage = page;
|
||||||
|
@ -1078,6 +1138,11 @@ repeat:
|
||||||
goto out_allocpage;
|
goto out_allocpage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* process the target tagged pointer */
|
||||||
|
t = tagptr_init(compressed_page_t, page);
|
||||||
|
justfound = tagptr_unfold_tags(t);
|
||||||
|
page = tagptr_unfold_ptr(t);
|
||||||
|
|
||||||
mapping = READ_ONCE(page->mapping);
|
mapping = READ_ONCE(page->mapping);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1085,7 +1150,10 @@ repeat:
|
||||||
* get such a cached-like page.
|
* get such a cached-like page.
|
||||||
*/
|
*/
|
||||||
if (nocache) {
|
if (nocache) {
|
||||||
/* should be locked, not uptodate, and not truncated */
|
/* if managed cache is disabled, it is impossible `justfound' */
|
||||||
|
DBG_BUGON(justfound);
|
||||||
|
|
||||||
|
/* and it should be locked, not uptodate, and not truncated */
|
||||||
DBG_BUGON(!PageLocked(page));
|
DBG_BUGON(!PageLocked(page));
|
||||||
DBG_BUGON(PageUptodate(page));
|
DBG_BUGON(PageUptodate(page));
|
||||||
DBG_BUGON(!mapping);
|
DBG_BUGON(!mapping);
|
||||||
|
@ -1102,11 +1170,22 @@ repeat:
|
||||||
|
|
||||||
lock_page(page);
|
lock_page(page);
|
||||||
|
|
||||||
|
/* only true if page reclaim goes wrong, should never happen */
|
||||||
|
DBG_BUGON(justfound && PagePrivate(page));
|
||||||
|
|
||||||
/* the page is still in manage cache */
|
/* the page is still in manage cache */
|
||||||
if (page->mapping == mc) {
|
if (page->mapping == mc) {
|
||||||
WRITE_ONCE(grp->compressed_pages[nr], page);
|
WRITE_ONCE(grp->compressed_pages[nr], page);
|
||||||
|
|
||||||
if (!PagePrivate(page)) {
|
if (!PagePrivate(page)) {
|
||||||
|
/*
|
||||||
|
* impossible to be !PagePrivate(page) for
|
||||||
|
* the current restriction as well if
|
||||||
|
* the page is already in compressed_pages[].
|
||||||
|
*/
|
||||||
|
DBG_BUGON(!justfound);
|
||||||
|
|
||||||
|
justfound = 0;
|
||||||
set_page_private(page, (unsigned long)grp);
|
set_page_private(page, (unsigned long)grp);
|
||||||
SetPagePrivate(page);
|
SetPagePrivate(page);
|
||||||
}
|
}
|
||||||
|
@ -1124,6 +1203,7 @@ repeat:
|
||||||
* reuse this one, let's allocate a new cache-managed page.
|
* reuse this one, let's allocate a new cache-managed page.
|
||||||
*/
|
*/
|
||||||
DBG_BUGON(page->mapping);
|
DBG_BUGON(page->mapping);
|
||||||
|
DBG_BUGON(!justfound);
|
||||||
|
|
||||||
tocache = true;
|
tocache = true;
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
|
Loading…
Reference in New Issue