mm/z3fold.c: introduce helper functions

Patch series "z3fold: support page migration", v2.

This patchset implements page migration support and slightly better buddy
search.  To implement page migration support, z3fold has to move away from
the current scheme of handle encoding.  i.  e.  stop encoding page address
in handles.  Instead, a small per-page structure is created which will
contain actual addresses for z3fold objects, while pointers to fields of
that structure will be used as handles.

Thus, it will be possible to change the underlying addresses to reflect
page migration.

To support migration itself, 3 callbacks will be implemented:

1: isolation callback: z3fold_page_isolate(): try to isolate the page
   by removing it from all lists.  Pages scheduled for some activity and
   mapped pages will not be isolated.  Return true if isolation was
   successful or false otherwise

2: migration callback: z3fold_page_migrate(): re-check critical
   conditions and migrate page contents to the new page provided by the
   system.  Returns 0 on success or negative error code otherwise

3: putback callback: z3fold_page_putback(): put back the page if
   z3fold_page_migrate() for it failed permanently (i.  e.  not with
   -EAGAIN code).

To make sure an isolated page doesn't get freed, its kref is incremented
in z3fold_page_isolate() and decremented during post-migration compaction,
if migration was successful, or by z3fold_page_putback() in the other
case.

Since the new handle encoding scheme implies slight memory consumption
increase, better buddy search (which decreases memory consumption) is
included in this patchset.

This patch (of 4):

Introduce a separate helper function for object allocation, as well as 2
smaller helpers to add a buddy to the list and to get a pointer to the
pool from the z3fold header.  No functional changes here.

Link: http://lkml.kernel.org/r/20190417103633.a4bb770b5bf0fb7e43ce1666@gmail.com
Signed-off-by: Vitaly Wool <vitaly.vul@sony.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Cc: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
Cc: Uladzislau Rezki <urezki@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Vitaly Wool 2019-05-13 17:22:43 -07:00 committed by Linus Torvalds
parent 1c52e6d068
commit 9050cce104
1 changed files with 100 additions and 84 deletions

View File

@ -255,10 +255,15 @@ static enum buddy handle_to_buddy(unsigned long handle)
return (handle - zhdr->first_num) & BUDDY_MASK; return (handle - zhdr->first_num) & BUDDY_MASK;
} }
static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
{
return zhdr->pool;
}
static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
{ {
struct page *page = virt_to_page(zhdr); struct page *page = virt_to_page(zhdr);
struct z3fold_pool *pool = zhdr->pool; struct z3fold_pool *pool = zhdr_to_pool(zhdr);
WARN_ON(!list_empty(&zhdr->buddy)); WARN_ON(!list_empty(&zhdr->buddy));
set_bit(PAGE_STALE, &page->private); set_bit(PAGE_STALE, &page->private);
@ -295,9 +300,10 @@ static void release_z3fold_page_locked_list(struct kref *ref)
{ {
struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
refcount); refcount);
spin_lock(&zhdr->pool->lock); struct z3fold_pool *pool = zhdr_to_pool(zhdr);
spin_lock(&pool->lock);
list_del_init(&zhdr->buddy); list_del_init(&zhdr->buddy);
spin_unlock(&zhdr->pool->lock); spin_unlock(&pool->lock);
WARN_ON(z3fold_page_trylock(zhdr)); WARN_ON(z3fold_page_trylock(zhdr));
__release_z3fold_page(zhdr, true); __release_z3fold_page(zhdr, true);
@ -349,6 +355,23 @@ static int num_free_chunks(struct z3fold_header *zhdr)
return nfree; return nfree;
} }
/* Add to the appropriate unbuddied list */
static inline void add_to_unbuddied(struct z3fold_pool *pool,
struct z3fold_header *zhdr)
{
if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
zhdr->middle_chunks == 0) {
struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
int freechunks = num_free_chunks(zhdr);
spin_lock(&pool->lock);
list_add(&zhdr->buddy, &unbuddied[freechunks]);
spin_unlock(&pool->lock);
zhdr->cpu = smp_processor_id();
put_cpu_ptr(pool->unbuddied);
}
}
static inline void *mchunk_memmove(struct z3fold_header *zhdr, static inline void *mchunk_memmove(struct z3fold_header *zhdr,
unsigned short dst_chunk) unsigned short dst_chunk)
{ {
@ -406,10 +429,8 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
static void do_compact_page(struct z3fold_header *zhdr, bool locked) static void do_compact_page(struct z3fold_header *zhdr, bool locked)
{ {
struct z3fold_pool *pool = zhdr->pool; struct z3fold_pool *pool = zhdr_to_pool(zhdr);
struct page *page; struct page *page;
struct list_head *unbuddied;
int fchunks;
page = virt_to_page(zhdr); page = virt_to_page(zhdr);
if (locked) if (locked)
@ -430,18 +451,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
} }
z3fold_compact_page(zhdr); z3fold_compact_page(zhdr);
unbuddied = get_cpu_ptr(pool->unbuddied); add_to_unbuddied(pool, zhdr);
fchunks = num_free_chunks(zhdr);
if (fchunks < NCHUNKS &&
(!zhdr->first_chunks || !zhdr->middle_chunks ||
!zhdr->last_chunks)) {
/* the page's not completely free and it's unbuddied */
spin_lock(&pool->lock);
list_add(&zhdr->buddy, &unbuddied[fchunks]);
spin_unlock(&pool->lock);
zhdr->cpu = smp_processor_id();
}
put_cpu_ptr(pool->unbuddied);
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
} }
@ -453,6 +463,67 @@ static void compact_page_work(struct work_struct *w)
do_compact_page(zhdr, false); do_compact_page(zhdr, false);
} }
/* returns _locked_ z3fold page header or NULL */
static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
size_t size, bool can_sleep)
{
struct z3fold_header *zhdr = NULL;
struct page *page;
struct list_head *unbuddied;
int chunks = size_to_chunks(size), i;
lookup:
/* First, try to find an unbuddied z3fold page. */
unbuddied = get_cpu_ptr(pool->unbuddied);
for_each_unbuddied_list(i, chunks) {
struct list_head *l = &unbuddied[i];
zhdr = list_first_entry_or_null(READ_ONCE(l),
struct z3fold_header, buddy);
if (!zhdr)
continue;
/* Re-check under lock. */
spin_lock(&pool->lock);
l = &unbuddied[i];
if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
struct z3fold_header, buddy)) ||
!z3fold_page_trylock(zhdr)) {
spin_unlock(&pool->lock);
zhdr = NULL;
put_cpu_ptr(pool->unbuddied);
if (can_sleep)
cond_resched();
goto lookup;
}
list_del_init(&zhdr->buddy);
zhdr->cpu = -1;
spin_unlock(&pool->lock);
page = virt_to_page(zhdr);
if (test_bit(NEEDS_COMPACTING, &page->private)) {
z3fold_page_unlock(zhdr);
zhdr = NULL;
put_cpu_ptr(pool->unbuddied);
if (can_sleep)
cond_resched();
goto lookup;
}
/*
* this page could not be removed from its unbuddied
* list while pool lock was held, and then we've taken
* page lock so kref_put could not be called before
* we got here, so it's safe to just call kref_get()
*/
kref_get(&zhdr->refcount);
break;
}
put_cpu_ptr(pool->unbuddied);
return zhdr;
}
/* /*
* API Functions * API Functions
@ -546,7 +617,7 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
unsigned long *handle) unsigned long *handle)
{ {
int chunks = 0, i, freechunks; int chunks = size_to_chunks(size);
struct z3fold_header *zhdr = NULL; struct z3fold_header *zhdr = NULL;
struct page *page = NULL; struct page *page = NULL;
enum buddy bud; enum buddy bud;
@ -561,56 +632,8 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
bud = HEADLESS; bud = HEADLESS;
else { else {
struct list_head *unbuddied; retry:
chunks = size_to_chunks(size); zhdr = __z3fold_alloc(pool, size, can_sleep);
lookup:
/* First, try to find an unbuddied z3fold page. */
unbuddied = get_cpu_ptr(pool->unbuddied);
for_each_unbuddied_list(i, chunks) {
struct list_head *l = &unbuddied[i];
zhdr = list_first_entry_or_null(READ_ONCE(l),
struct z3fold_header, buddy);
if (!zhdr)
continue;
/* Re-check under lock. */
spin_lock(&pool->lock);
l = &unbuddied[i];
if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
struct z3fold_header, buddy)) ||
!z3fold_page_trylock(zhdr)) {
spin_unlock(&pool->lock);
put_cpu_ptr(pool->unbuddied);
goto lookup;
}
list_del_init(&zhdr->buddy);
zhdr->cpu = -1;
spin_unlock(&pool->lock);
page = virt_to_page(zhdr);
if (test_bit(NEEDS_COMPACTING, &page->private)) {
z3fold_page_unlock(zhdr);
zhdr = NULL;
put_cpu_ptr(pool->unbuddied);
if (can_sleep)
cond_resched();
goto lookup;
}
/*
* this page could not be removed from its unbuddied
* list while pool lock was held, and then we've taken
* page lock so kref_put could not be called before
* we got here, so it's safe to just call kref_get()
*/
kref_get(&zhdr->refcount);
break;
}
put_cpu_ptr(pool->unbuddied);
if (zhdr) { if (zhdr) {
if (zhdr->first_chunks == 0) { if (zhdr->first_chunks == 0) {
if (zhdr->middle_chunks != 0 && if (zhdr->middle_chunks != 0 &&
@ -630,8 +653,9 @@ lookup:
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
pr_err("No free chunks in unbuddied\n"); pr_err("No free chunks in unbuddied\n");
WARN_ON(1); WARN_ON(1);
goto lookup; goto retry;
} }
page = virt_to_page(zhdr);
goto found; goto found;
} }
bud = FIRST; bud = FIRST;
@ -662,8 +686,12 @@ lookup:
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
atomic64_inc(&pool->pages_nr);
zhdr = init_z3fold_page(page, pool); zhdr = init_z3fold_page(page, pool);
if (!zhdr) {
__free_page(page);
return -ENOMEM;
}
atomic64_inc(&pool->pages_nr);
if (bud == HEADLESS) { if (bud == HEADLESS) {
set_bit(PAGE_HEADLESS, &page->private); set_bit(PAGE_HEADLESS, &page->private);
@ -680,19 +708,7 @@ found:
zhdr->middle_chunks = chunks; zhdr->middle_chunks = chunks;
zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
} }
add_to_unbuddied(pool, zhdr);
if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
zhdr->middle_chunks == 0) {
struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
/* Add to unbuddied list */
freechunks = num_free_chunks(zhdr);
spin_lock(&pool->lock);
list_add(&zhdr->buddy, &unbuddied[freechunks]);
spin_unlock(&pool->lock);
zhdr->cpu = smp_processor_id();
put_cpu_ptr(pool->unbuddied);
}
headless: headless:
spin_lock(&pool->lock); spin_lock(&pool->lock);