Merge branch 'akpm' (patches from Andrew Morton)
Merge more patches from Andrew Morton: "The rest of MM" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: remove free_area_cache zswap: add documentation zswap: add to mm/ zbud: add to mm/
This commit is contained in:
commit
db6e330490
|
@ -0,0 +1,68 @@
|
|||
Overview:
|
||||
|
||||
Zswap is a lightweight compressed cache for swap pages. It takes pages that are
|
||||
in the process of being swapped out and attempts to compress them into a
|
||||
dynamically allocated RAM-based memory pool. zswap basically trades CPU cycles
|
||||
for potentially reduced swap I/O. This trade-off can also result in a
|
||||
significant performance improvement if reads from the compressed cache are
|
||||
faster than reads from a swap device.
|
||||
|
||||
NOTE: Zswap is a new feature as of v3.11 and interacts heavily with memory
|
||||
reclaim. This interaction has not be fully explored on the large set of
|
||||
potential configurations and workloads that exist. For this reason, zswap
|
||||
is a work in progress and should be considered experimental.
|
||||
|
||||
Some potential benefits:
|
||||
* Desktop/laptop users with limited RAM capacities can mitigate the
|
||||
performance impact of swapping.
|
||||
* Overcommitted guests that share a common I/O resource can
|
||||
dramatically reduce their swap I/O pressure, avoiding heavy handed I/O
|
||||
throttling by the hypervisor. This allows more work to get done with less
|
||||
impact to the guest workload and guests sharing the I/O subsystem
|
||||
* Users with SSDs as swap devices can extend the life of the device by
|
||||
drastically reducing life-shortening writes.
|
||||
|
||||
Zswap evicts pages from compressed cache on an LRU basis to the backing swap
|
||||
device when the compressed pool reaches it size limit. This requirement had
|
||||
been identified in prior community discussions.
|
||||
|
||||
To enabled zswap, the "enabled" attribute must be set to 1 at boot time. e.g.
|
||||
zswap.enabled=1
|
||||
|
||||
Design:
|
||||
|
||||
Zswap receives pages for compression through the Frontswap API and is able to
|
||||
evict pages from its own compressed pool on an LRU basis and write them back to
|
||||
the backing swap device in the case that the compressed pool is full.
|
||||
|
||||
Zswap makes use of zbud for the managing the compressed memory pool. Each
|
||||
allocation in zbud is not directly accessible by address. Rather, a handle is
|
||||
return by the allocation routine and that handle must be mapped before being
|
||||
accessed. The compressed memory pool grows on demand and shrinks as compressed
|
||||
pages are freed. The pool is not preallocated.
|
||||
|
||||
When a swap page is passed from frontswap to zswap, zswap maintains a mapping
|
||||
of the swap entry, a combination of the swap type and swap offset, to the zbud
|
||||
handle that references that compressed swap page. This mapping is achieved
|
||||
with a red-black tree per swap type. The swap offset is the search key for the
|
||||
tree nodes.
|
||||
|
||||
During a page fault on a PTE that is a swap entry, frontswap calls the zswap
|
||||
load function to decompress the page into the page allocated by the page fault
|
||||
handler.
|
||||
|
||||
Once there are no PTEs referencing a swap page stored in zswap (i.e. the count
|
||||
in the swap_map goes to 0) the swap code calls the zswap invalidate function,
|
||||
via frontswap, to free the compressed entry.
|
||||
|
||||
Zswap seeks to be simple in its policies. Sysfs attributes allow for one user
|
||||
controlled policies:
|
||||
* max_pool_percent - The maximum percentage of memory that the compressed
|
||||
pool can occupy.
|
||||
|
||||
Zswap allows the compressor to be selected at kernel boot time by setting the
|
||||
“compressor” attribute. The default compressor is lzo. e.g.
|
||||
zswap.compressor=deflate
|
||||
|
||||
A debugfs interface is provided for various statistic about pool size, number
|
||||
of pages stored, and various counters for the reasons pages are rejected.
|
|
@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
|
||||
|
|
|
@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -92,10 +92,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = s390_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -290,7 +290,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
sysctl_legacy_va_layout) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
/* We know it's 32-bit */
|
||||
unsigned long task_size = STACK_TOP32;
|
||||
|
@ -302,7 +301,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
|
||||
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -66,10 +66,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base(mm);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
|
|||
(current->mm->start_data = N_DATADDR(ex));
|
||||
current->mm->brk = ex.a_bss +
|
||||
(current->mm->start_brk = N_BSSADDR(ex));
|
||||
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
|
||||
current->mm->cached_hole_size = 0;
|
||||
|
||||
retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
|
||||
if (retval < 0) {
|
||||
|
|
|
@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = mmap_legacy_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
mm->unmap_area = arch_unmap_area_topdown;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -255,8 +255,6 @@ static int load_aout_binary(struct linux_binprm * bprm)
|
|||
(current->mm->start_data = N_DATADDR(ex));
|
||||
current->mm->brk = ex.a_bss +
|
||||
(current->mm->start_brk = N_BSSADDR(ex));
|
||||
current->mm->free_area_cache = current->mm->mmap_base;
|
||||
current->mm->cached_hole_size = 0;
|
||||
|
||||
retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
|
||||
if (retval < 0) {
|
||||
|
|
|
@ -738,8 +738,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|||
|
||||
/* Do this so that we can load the interpreter, if need be. We will
|
||||
change some of these later */
|
||||
current->mm->free_area_cache = current->mm->mmap_base;
|
||||
current->mm->cached_hole_size = 0;
|
||||
retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
|
||||
executable_stack);
|
||||
if (retval < 0) {
|
||||
|
|
|
@ -330,12 +330,9 @@ struct mm_struct {
|
|||
unsigned long (*get_unmapped_area) (struct file *filp,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags);
|
||||
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
|
||||
#endif
|
||||
unsigned long mmap_base; /* base of mmap area */
|
||||
unsigned long task_size; /* size of task vm space */
|
||||
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
|
||||
unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
|
||||
unsigned long highest_vm_end; /* highest vma end address */
|
||||
pgd_t * pgd;
|
||||
atomic_t mm_users; /* How many users with user space? */
|
||||
|
|
|
@ -322,8 +322,6 @@ extern unsigned long
|
|||
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
extern void arch_unmap_area(struct mm_struct *, unsigned long);
|
||||
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
||||
#else
|
||||
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
#ifndef _ZBUD_H_
|
||||
#define _ZBUD_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct zbud_pool;
|
||||
|
||||
struct zbud_ops {
|
||||
int (*evict)(struct zbud_pool *pool, unsigned long handle);
|
||||
};
|
||||
|
||||
struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
|
||||
void zbud_destroy_pool(struct zbud_pool *pool);
|
||||
int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
|
||||
unsigned long *handle);
|
||||
void zbud_free(struct zbud_pool *pool, unsigned long handle);
|
||||
int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
|
||||
void *zbud_map(struct zbud_pool *pool, unsigned long handle);
|
||||
void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
|
||||
u64 zbud_get_pool_size(struct zbud_pool *pool);
|
||||
|
||||
#endif /* _ZBUD_H_ */
|
|
@ -365,8 +365,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|||
mm->locked_vm = 0;
|
||||
mm->mmap = NULL;
|
||||
mm->mmap_cache = NULL;
|
||||
mm->free_area_cache = oldmm->mmap_base;
|
||||
mm->cached_hole_size = ~0UL;
|
||||
mm->map_count = 0;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
mm->mm_rb = RB_ROOT;
|
||||
|
@ -540,8 +538,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
|
|||
mm->nr_ptes = 0;
|
||||
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
|
||||
spin_lock_init(&mm->page_table_lock);
|
||||
mm->free_area_cache = TASK_UNMAPPED_BASE;
|
||||
mm->cached_hole_size = ~0UL;
|
||||
mm_init_aio(mm);
|
||||
mm_init_owner(mm, p);
|
||||
|
||||
|
|
30
mm/Kconfig
30
mm/Kconfig
|
@ -478,6 +478,36 @@ config FRONTSWAP
|
|||
|
||||
If unsure, say Y to enable frontswap.
|
||||
|
||||
config ZBUD
|
||||
tristate
|
||||
default n
|
||||
help
|
||||
A special purpose allocator for storing compressed pages.
|
||||
It is designed to store up to two compressed pages per physical
|
||||
page. While this design limits storage density, it has simple and
|
||||
deterministic reclaim properties that make it preferable to a higher
|
||||
density approach when reclaim will be used.
|
||||
|
||||
config ZSWAP
|
||||
bool "Compressed cache for swap pages (EXPERIMENTAL)"
|
||||
depends on FRONTSWAP && CRYPTO=y
|
||||
select CRYPTO_LZO
|
||||
select ZBUD
|
||||
default n
|
||||
help
|
||||
A lightweight compressed cache for swap pages. It takes
|
||||
pages that are in the process of being swapped out and attempts to
|
||||
compress them into a dynamically allocated RAM-based memory pool.
|
||||
This can result in a significant I/O reduction on swap device and,
|
||||
in the case where decompressing from RAM is faster that swap device
|
||||
reads, can also improve workload performance.
|
||||
|
||||
This is marked experimental because it is a new feature (as of
|
||||
v3.11) that interacts heavily with memory reclaim. While these
|
||||
interactions don't cause any known issues on simple memory setups,
|
||||
they have not be fully explored on the large set of potential
|
||||
configurations and workloads that exist.
|
||||
|
||||
config MEM_SOFT_DIRTY
|
||||
bool "Track memory changes"
|
||||
depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY
|
||||
|
|
|
@ -32,6 +32,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
|
|||
obj-$(CONFIG_BOUNCE) += bounce.o
|
||||
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
|
||||
obj-$(CONFIG_FRONTSWAP) += frontswap.o
|
||||
obj-$(CONFIG_ZSWAP) += zswap.o
|
||||
obj-$(CONFIG_HAS_DMA) += dmapool.o
|
||||
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
|
||||
obj-$(CONFIG_NUMA) += mempolicy.o
|
||||
|
@ -58,3 +59,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
|
|||
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
|
||||
obj-$(CONFIG_CLEANCACHE) += cleancache.o
|
||||
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
|
||||
obj-$(CONFIG_ZBUD) += zbud.o
|
||||
|
|
28
mm/mmap.c
28
mm/mmap.c
|
@ -1878,15 +1878,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
}
|
||||
#endif
|
||||
|
||||
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
/*
|
||||
* Is this a new hole at the lowest possible address?
|
||||
*/
|
||||
if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
|
||||
mm->free_area_cache = addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* This mmap-allocator allocates new areas top-down from below the
|
||||
* stack's low limit (the base):
|
||||
|
@ -1943,19 +1934,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
}
|
||||
#endif
|
||||
|
||||
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
/*
|
||||
* Is this a new hole at the highest possible address?
|
||||
*/
|
||||
if (addr > mm->free_area_cache)
|
||||
mm->free_area_cache = addr;
|
||||
|
||||
/* dont allow allocations above current base */
|
||||
if (mm->free_area_cache > mm->mmap_base)
|
||||
mm->free_area_cache = mm->mmap_base;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags)
|
||||
|
@ -2376,7 +2354,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
{
|
||||
struct vm_area_struct **insertion_point;
|
||||
struct vm_area_struct *tail_vma = NULL;
|
||||
unsigned long addr;
|
||||
|
||||
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
|
||||
vma->vm_prev = NULL;
|
||||
|
@ -2393,11 +2370,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
} else
|
||||
mm->highest_vm_end = prev ? prev->vm_end : 0;
|
||||
tail_vma->vm_next = NULL;
|
||||
if (mm->unmap_area == arch_unmap_area)
|
||||
addr = prev ? prev->vm_end : mm->mmap_base;
|
||||
else
|
||||
addr = vma ? vma->vm_start : mm->mmap_base;
|
||||
mm->unmap_area(mm, addr);
|
||||
mm->mmap_cache = NULL; /* Kill the cache. */
|
||||
}
|
||||
|
||||
|
|
|
@ -1871,10 +1871,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
}
|
||||
|
||||
void unmap_mapping_range(struct address_space *mapping,
|
||||
loff_t const holebegin, loff_t const holelen,
|
||||
int even_cows)
|
||||
|
|
|
@ -295,7 +295,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
{
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -0,0 +1,527 @@
|
|||
/*
|
||||
* zbud.c
|
||||
*
|
||||
* Copyright (C) 2013, Seth Jennings, IBM
|
||||
*
|
||||
* Concepts based on zcache internal zbud allocator by Dan Magenheimer.
|
||||
*
|
||||
* zbud is an special purpose allocator for storing compressed pages. Contrary
|
||||
* to what its name may suggest, zbud is not a buddy allocator, but rather an
|
||||
* allocator that "buddies" two compressed pages together in a single memory
|
||||
* page.
|
||||
*
|
||||
* While this design limits storage density, it has simple and deterministic
|
||||
* reclaim properties that make it preferable to a higher density approach when
|
||||
* reclaim will be used.
|
||||
*
|
||||
* zbud works by storing compressed pages, or "zpages", together in pairs in a
|
||||
* single memory page called a "zbud page". The first buddy is "left
|
||||
* justifed" at the beginning of the zbud page, and the last buddy is "right
|
||||
* justified" at the end of the zbud page. The benefit is that if either
|
||||
* buddy is freed, the freed buddy space, coalesced with whatever slack space
|
||||
* that existed between the buddies, results in the largest possible free region
|
||||
* within the zbud page.
|
||||
*
|
||||
* zbud also provides an attractive lower bound on density. The ratio of zpages
|
||||
* to zbud pages can not be less than 1. This ensures that zbud can never "do
|
||||
* harm" by using more pages to store zpages than the uncompressed zpages would
|
||||
* have used on their own.
|
||||
*
|
||||
* zbud pages are divided into "chunks". The size of the chunks is fixed at
|
||||
* compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
|
||||
* into chunks allows organizing unbuddied zbud pages into a manageable number
|
||||
* of unbuddied lists according to the number of free chunks available in the
|
||||
* zbud page.
|
||||
*
|
||||
* The zbud API differs from that of conventional allocators in that the
|
||||
* allocation function, zbud_alloc(), returns an opaque handle to the user,
|
||||
* not a dereferenceable pointer. The user must map the handle using
|
||||
* zbud_map() in order to get a usable pointer by which to access the
|
||||
* allocation data and unmap the handle with zbud_unmap() when operations
|
||||
* on the allocation data are complete.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/zbud.h>
|
||||
|
||||
/*****************
|
||||
* Structures
|
||||
*****************/
|
||||
/*
|
||||
* NCHUNKS_ORDER determines the internal allocation granularity, effectively
|
||||
* adjusting internal fragmentation. It also determines the number of
|
||||
* freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
|
||||
* allocation granularity will be in chunks of size PAGE_SIZE/64, and there
|
||||
* will be 64 freelists per pool.
|
||||
*/
|
||||
#define NCHUNKS_ORDER 6
|
||||
|
||||
#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
|
||||
#define CHUNK_SIZE (1 << CHUNK_SHIFT)
|
||||
#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
|
||||
#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
|
||||
|
||||
/**
|
||||
* struct zbud_pool - stores metadata for each zbud pool
|
||||
* @lock: protects all pool fields and first|last_chunk fields of any
|
||||
* zbud page in the pool
|
||||
* @unbuddied: array of lists tracking zbud pages that only contain one buddy;
|
||||
* the lists each zbud page is added to depends on the size of
|
||||
* its free region.
|
||||
* @buddied: list tracking the zbud pages that contain two buddies;
|
||||
* these zbud pages are full
|
||||
* @lru: list tracking the zbud pages in LRU order by most recently
|
||||
* added buddy.
|
||||
* @pages_nr: number of zbud pages in the pool.
|
||||
* @ops: pointer to a structure of user defined operations specified at
|
||||
* pool creation time.
|
||||
*
|
||||
* This structure is allocated at pool creation time and maintains metadata
|
||||
* pertaining to a particular zbud pool.
|
||||
*/
|
||||
struct zbud_pool {
|
||||
spinlock_t lock;
|
||||
struct list_head unbuddied[NCHUNKS];
|
||||
struct list_head buddied;
|
||||
struct list_head lru;
|
||||
u64 pages_nr;
|
||||
struct zbud_ops *ops;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct zbud_header - zbud page metadata occupying the first chunk of each
|
||||
* zbud page.
|
||||
* @buddy: links the zbud page into the unbuddied/buddied lists in the pool
|
||||
* @lru: links the zbud page into the lru list in the pool
|
||||
* @first_chunks: the size of the first buddy in chunks, 0 if free
|
||||
* @last_chunks: the size of the last buddy in chunks, 0 if free
|
||||
*/
|
||||
struct zbud_header {
|
||||
struct list_head buddy;
|
||||
struct list_head lru;
|
||||
unsigned int first_chunks;
|
||||
unsigned int last_chunks;
|
||||
bool under_reclaim;
|
||||
};
|
||||
|
||||
/*****************
|
||||
* Helpers
|
||||
*****************/
|
||||
/* Just to make the code easier to read */
|
||||
enum buddy {
|
||||
FIRST,
|
||||
LAST
|
||||
};
|
||||
|
||||
/* Converts an allocation size in bytes to size in zbud chunks */
|
||||
static int size_to_chunks(int size)
|
||||
{
|
||||
return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
|
||||
}
|
||||
|
||||
#define for_each_unbuddied_list(_iter, _begin) \
|
||||
for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
|
||||
|
||||
/* Initializes the zbud header of a newly allocated zbud page */
|
||||
static struct zbud_header *init_zbud_page(struct page *page)
|
||||
{
|
||||
struct zbud_header *zhdr = page_address(page);
|
||||
zhdr->first_chunks = 0;
|
||||
zhdr->last_chunks = 0;
|
||||
INIT_LIST_HEAD(&zhdr->buddy);
|
||||
INIT_LIST_HEAD(&zhdr->lru);
|
||||
zhdr->under_reclaim = 0;
|
||||
return zhdr;
|
||||
}
|
||||
|
||||
/* Resets the struct page fields and frees the page */
|
||||
static void free_zbud_page(struct zbud_header *zhdr)
|
||||
{
|
||||
__free_page(virt_to_page(zhdr));
|
||||
}
|
||||
|
||||
/*
|
||||
* Encodes the handle of a particular buddy within a zbud page
|
||||
* Pool lock should be held as this function accesses first|last_chunks
|
||||
*/
|
||||
static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
|
||||
{
|
||||
unsigned long handle;
|
||||
|
||||
/*
|
||||
* For now, the encoded handle is actually just the pointer to the data
|
||||
* but this might not always be the case. A little information hiding.
|
||||
* Add CHUNK_SIZE to the handle if it is the first allocation to jump
|
||||
* over the zbud header in the first chunk.
|
||||
*/
|
||||
handle = (unsigned long)zhdr;
|
||||
if (bud == FIRST)
|
||||
/* skip over zbud header */
|
||||
handle += ZHDR_SIZE_ALIGNED;
|
||||
else /* bud == LAST */
|
||||
handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
|
||||
return handle;
|
||||
}
|
||||
|
||||
/* Returns the zbud page where a given handle is stored */
|
||||
static struct zbud_header *handle_to_zbud_header(unsigned long handle)
|
||||
{
|
||||
return (struct zbud_header *)(handle & PAGE_MASK);
|
||||
}
|
||||
|
||||
/* Returns the number of free chunks in a zbud page */
|
||||
static int num_free_chunks(struct zbud_header *zhdr)
|
||||
{
|
||||
/*
|
||||
* Rather than branch for different situations, just use the fact that
|
||||
* free buddies have a length of zero to simplify everything. -1 at the
|
||||
* end for the zbud header.
|
||||
*/
|
||||
return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks - 1;
|
||||
}
|
||||
|
||||
/*****************
|
||||
* API Functions
|
||||
*****************/
|
||||
/**
|
||||
* zbud_create_pool() - create a new zbud pool
|
||||
* @gfp: gfp flags when allocating the zbud pool structure
|
||||
* @ops: user-defined operations for the zbud pool
|
||||
*
|
||||
* Return: pointer to the new zbud pool or NULL if the metadata allocation
|
||||
* failed.
|
||||
*/
|
||||
struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
|
||||
{
|
||||
struct zbud_pool *pool;
|
||||
int i;
|
||||
|
||||
pool = kmalloc(sizeof(struct zbud_pool), gfp);
|
||||
if (!pool)
|
||||
return NULL;
|
||||
spin_lock_init(&pool->lock);
|
||||
for_each_unbuddied_list(i, 0)
|
||||
INIT_LIST_HEAD(&pool->unbuddied[i]);
|
||||
INIT_LIST_HEAD(&pool->buddied);
|
||||
INIT_LIST_HEAD(&pool->lru);
|
||||
pool->pages_nr = 0;
|
||||
pool->ops = ops;
|
||||
return pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* zbud_destroy_pool() - destroys an existing zbud pool
|
||||
* @pool: the zbud pool to be destroyed
|
||||
*
|
||||
* The pool should be emptied before this function is called.
|
||||
*/
|
||||
void zbud_destroy_pool(struct zbud_pool *pool)
|
||||
{
|
||||
kfree(pool);
|
||||
}
|
||||
|
||||
/**
|
||||
* zbud_alloc() - allocates a region of a given size
|
||||
* @pool: zbud pool from which to allocate
|
||||
* @size: size in bytes of the desired allocation
|
||||
* @gfp: gfp flags used if the pool needs to grow
|
||||
* @handle: handle of the new allocation
|
||||
*
|
||||
* This function will attempt to find a free region in the pool large enough to
|
||||
* satisfy the allocation request. A search of the unbuddied lists is
|
||||
* performed first. If no suitable free region is found, then a new page is
|
||||
* allocated and added to the pool to satisfy the request.
|
||||
*
|
||||
* gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
|
||||
* as zbud pool pages.
|
||||
*
|
||||
* Return: 0 if success and handle is set, otherwise -EINVAL is the size or
|
||||
* gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
|
||||
* a new page.
|
||||
*/
|
||||
int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
|
||||
unsigned long *handle)
|
||||
{
|
||||
int chunks, i, freechunks;
|
||||
struct zbud_header *zhdr = NULL;
|
||||
enum buddy bud;
|
||||
struct page *page;
|
||||
|
||||
if (size <= 0 || gfp & __GFP_HIGHMEM)
|
||||
return -EINVAL;
|
||||
if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED)
|
||||
return -ENOSPC;
|
||||
chunks = size_to_chunks(size);
|
||||
spin_lock(&pool->lock);
|
||||
|
||||
/* First, try to find an unbuddied zbud page. */
|
||||
zhdr = NULL;
|
||||
for_each_unbuddied_list(i, chunks) {
|
||||
if (!list_empty(&pool->unbuddied[i])) {
|
||||
zhdr = list_first_entry(&pool->unbuddied[i],
|
||||
struct zbud_header, buddy);
|
||||
list_del(&zhdr->buddy);
|
||||
if (zhdr->first_chunks == 0)
|
||||
bud = FIRST;
|
||||
else
|
||||
bud = LAST;
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
||||
/* Couldn't find unbuddied zbud page, create new one */
|
||||
spin_unlock(&pool->lock);
|
||||
page = alloc_page(gfp);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
spin_lock(&pool->lock);
|
||||
pool->pages_nr++;
|
||||
zhdr = init_zbud_page(page);
|
||||
bud = FIRST;
|
||||
|
||||
found:
|
||||
if (bud == FIRST)
|
||||
zhdr->first_chunks = chunks;
|
||||
else
|
||||
zhdr->last_chunks = chunks;
|
||||
|
||||
if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
|
||||
/* Add to unbuddied list */
|
||||
freechunks = num_free_chunks(zhdr);
|
||||
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
|
||||
} else {
|
||||
/* Add to buddied list */
|
||||
list_add(&zhdr->buddy, &pool->buddied);
|
||||
}
|
||||
|
||||
/* Add/move zbud page to beginning of LRU */
|
||||
if (!list_empty(&zhdr->lru))
|
||||
list_del(&zhdr->lru);
|
||||
list_add(&zhdr->lru, &pool->lru);
|
||||
|
||||
*handle = encode_handle(zhdr, bud);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* zbud_free() - frees the allocation associated with the given handle
|
||||
* @pool: pool in which the allocation resided
|
||||
* @handle: handle associated with the allocation returned by zbud_alloc()
|
||||
*
|
||||
* In the case that the zbud page in which the allocation resides is under
|
||||
* reclaim, as indicated by the PG_reclaim flag being set, this function
|
||||
* only sets the first|last_chunks to 0. The page is actually freed
|
||||
* once both buddies are evicted (see zbud_reclaim_page() below).
|
||||
*/
|
||||
void zbud_free(struct zbud_pool *pool, unsigned long handle)
|
||||
{
|
||||
struct zbud_header *zhdr;
|
||||
int freechunks;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
zhdr = handle_to_zbud_header(handle);
|
||||
|
||||
/* If first buddy, handle will be page aligned */
|
||||
if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
|
||||
zhdr->last_chunks = 0;
|
||||
else
|
||||
zhdr->first_chunks = 0;
|
||||
|
||||
if (zhdr->under_reclaim) {
|
||||
/* zbud page is under reclaim, reclaim will free */
|
||||
spin_unlock(&pool->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Remove from existing buddy list */
|
||||
list_del(&zhdr->buddy);
|
||||
|
||||
if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
|
||||
/* zbud page is empty, free */
|
||||
list_del(&zhdr->lru);
|
||||
free_zbud_page(zhdr);
|
||||
pool->pages_nr--;
|
||||
} else {
|
||||
/* Add to unbuddied list */
|
||||
freechunks = num_free_chunks(zhdr);
|
||||
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
|
||||
}
|
||||
|
||||
spin_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
#define list_tail_entry(ptr, type, member) \
|
||||
list_entry((ptr)->prev, type, member)
|
||||
|
||||
/**
|
||||
* zbud_reclaim_page() - evicts allocations from a pool page and frees it
|
||||
* @pool: pool from which a page will attempt to be evicted
|
||||
* @retires: number of pages on the LRU list for which eviction will
|
||||
* be attempted before failing
|
||||
*
|
||||
* zbud reclaim is different from normal system reclaim in that the reclaim is
|
||||
* done from the bottom, up. This is because only the bottom layer, zbud, has
|
||||
* information on how the allocations are organized within each zbud page. This
|
||||
* has the potential to create interesting locking situations between zbud and
|
||||
* the user, however.
|
||||
*
|
||||
* To avoid these, this is how zbud_reclaim_page() should be called:
|
||||
|
||||
* The user detects a page should be reclaimed and calls zbud_reclaim_page().
|
||||
* zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
|
||||
* the user-defined eviction handler with the pool and handle as arguments.
|
||||
*
|
||||
* If the handle can not be evicted, the eviction handler should return
|
||||
* non-zero. zbud_reclaim_page() will add the zbud page back to the
|
||||
* appropriate list and try the next zbud page on the LRU up to
|
||||
* a user defined number of retries.
|
||||
*
|
||||
* If the handle is successfully evicted, the eviction handler should
|
||||
* return 0 _and_ should have called zbud_free() on the handle. zbud_free()
|
||||
* contains logic to delay freeing the page if the page is under reclaim,
|
||||
* as indicated by the setting of the PG_reclaim flag on the underlying page.
|
||||
*
|
||||
* If all buddies in the zbud page are successfully evicted, then the
|
||||
* zbud page can be freed.
|
||||
*
|
||||
* Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
|
||||
* no pages to evict or an eviction handler is not registered, -EAGAIN if
|
||||
* the retry limit was hit.
|
||||
*/
|
||||
int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
|
||||
{
|
||||
int i, ret, freechunks;
|
||||
struct zbud_header *zhdr;
|
||||
unsigned long first_handle = 0, last_handle = 0;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
|
||||
retries == 0) {
|
||||
spin_unlock(&pool->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < retries; i++) {
|
||||
zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
|
||||
list_del(&zhdr->lru);
|
||||
list_del(&zhdr->buddy);
|
||||
/* Protect zbud page against free */
|
||||
zhdr->under_reclaim = true;
|
||||
/*
|
||||
* We need encode the handles before unlocking, since we can
|
||||
* race with free that will set (first|last)_chunks to 0
|
||||
*/
|
||||
first_handle = 0;
|
||||
last_handle = 0;
|
||||
if (zhdr->first_chunks)
|
||||
first_handle = encode_handle(zhdr, FIRST);
|
||||
if (zhdr->last_chunks)
|
||||
last_handle = encode_handle(zhdr, LAST);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
/* Issue the eviction callback(s) */
|
||||
if (first_handle) {
|
||||
ret = pool->ops->evict(pool, first_handle);
|
||||
if (ret)
|
||||
goto next;
|
||||
}
|
||||
if (last_handle) {
|
||||
ret = pool->ops->evict(pool, last_handle);
|
||||
if (ret)
|
||||
goto next;
|
||||
}
|
||||
next:
|
||||
spin_lock(&pool->lock);
|
||||
zhdr->under_reclaim = false;
|
||||
if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
|
||||
/*
|
||||
* Both buddies are now free, free the zbud page and
|
||||
* return success.
|
||||
*/
|
||||
free_zbud_page(zhdr);
|
||||
pool->pages_nr--;
|
||||
spin_unlock(&pool->lock);
|
||||
return 0;
|
||||
} else if (zhdr->first_chunks == 0 ||
|
||||
zhdr->last_chunks == 0) {
|
||||
/* add to unbuddied list */
|
||||
freechunks = num_free_chunks(zhdr);
|
||||
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
|
||||
} else {
|
||||
/* add to buddied list */
|
||||
list_add(&zhdr->buddy, &pool->buddied);
|
||||
}
|
||||
|
||||
/* add to beginning of LRU */
|
||||
list_add(&zhdr->lru, &pool->lru);
|
||||
}
|
||||
spin_unlock(&pool->lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/**
|
||||
* zbud_map() - maps the allocation associated with the given handle
|
||||
* @pool: pool in which the allocation resides
|
||||
* @handle: handle associated with the allocation to be mapped
|
||||
*
|
||||
* While trivial for zbud, the mapping functions for others allocators
|
||||
* implementing this allocation API could have more complex information encoded
|
||||
* in the handle and could create temporary mappings to make the data
|
||||
* accessible to the user.
|
||||
*
|
||||
* Returns: a pointer to the mapped allocation
|
||||
*/
|
||||
void *zbud_map(struct zbud_pool *pool, unsigned long handle)
|
||||
{
|
||||
return (void *)(handle);
|
||||
}
|
||||
|
||||
/**
|
||||
* zbud_unmap() - maps the allocation associated with the given handle
|
||||
* @pool: pool in which the allocation resides
|
||||
* @handle: handle associated with the allocation to be unmapped
|
||||
*/
|
||||
void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* zbud_get_pool_size() - gets the zbud pool size in pages
|
||||
* @pool: pool whose size is being queried
|
||||
*
|
||||
* Returns: size in pages of the given pool. The pool lock need not be
|
||||
* taken to access pages_nr.
|
||||
*/
|
||||
u64 zbud_get_pool_size(struct zbud_pool *pool)
|
||||
{
|
||||
return pool->pages_nr;
|
||||
}
|
||||
|
||||
static int __init init_zbud(void)
|
||||
{
|
||||
/* Make sure the zbud header will fit in one chunk */
|
||||
BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
|
||||
pr_info("loaded\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit exit_zbud(void)
|
||||
{
|
||||
pr_info("unloaded\n");
|
||||
}
|
||||
|
||||
module_init(init_zbud);
|
||||
module_exit(exit_zbud);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
|
||||
MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
|
|
@ -0,0 +1,943 @@
|
|||
/*
|
||||
* zswap.c - zswap driver file
|
||||
*
|
||||
* zswap is a backend for frontswap that takes pages that are in the process
|
||||
* of being swapped out and attempts to compress and store them in a
|
||||
* RAM-based memory pool. This can result in a significant I/O reduction on
|
||||
* the swap device and, in the case where decompressing from RAM is faster
|
||||
* than reading from the swap device, can also improve workload performance.
|
||||
*
|
||||
* Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/frontswap.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/zbud.h>
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
/*********************************
|
||||
* statistics
|
||||
**********************************/
|
||||
/* Number of memory pages used by the compressed pool */
|
||||
static u64 zswap_pool_pages;
|
||||
/* The number of compressed pages currently stored in zswap */
|
||||
static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* The statistics below are not protected from concurrent access for
|
||||
* performance reasons so they may not be a 100% accurate. However,
|
||||
* they do provide useful information on roughly how many times a
|
||||
* certain event is occurring.
|
||||
*/
|
||||
|
||||
/* Pool limit was hit (see zswap_max_pool_percent) */
|
||||
static u64 zswap_pool_limit_hit;
|
||||
/* Pages written back when pool limit was reached */
|
||||
static u64 zswap_written_back_pages;
|
||||
/* Store failed due to a reclaim failure after pool limit was reached */
|
||||
static u64 zswap_reject_reclaim_fail;
|
||||
/* Compressed page was too big for the allocator to (optimally) store */
|
||||
static u64 zswap_reject_compress_poor;
|
||||
/* Store failed because underlying allocator could not get memory */
|
||||
static u64 zswap_reject_alloc_fail;
|
||||
/* Store failed because the entry metadata could not be allocated (rare) */
|
||||
static u64 zswap_reject_kmemcache_fail;
|
||||
/* Duplicate store was encountered (rare) */
|
||||
static u64 zswap_duplicate_entry;
|
||||
|
||||
/*********************************
|
||||
* tunables
|
||||
**********************************/
|
||||
/* Enable/disable zswap (disabled by default, fixed at boot for now) */
|
||||
static bool zswap_enabled __read_mostly;
|
||||
module_param_named(enabled, zswap_enabled, bool, 0);
|
||||
|
||||
/* Compressor to be used by zswap (fixed at boot for now) */
|
||||
#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
|
||||
static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
|
||||
module_param_named(compressor, zswap_compressor, charp, 0);
|
||||
|
||||
/* The maximum percentage of memory that the compressed pool can occupy */
|
||||
static unsigned int zswap_max_pool_percent = 20;
|
||||
module_param_named(max_pool_percent,
|
||||
zswap_max_pool_percent, uint, 0644);
|
||||
|
||||
/*********************************
|
||||
* compression functions
|
||||
**********************************/
|
||||
/* per-cpu compression transforms */
|
||||
static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms;
|
||||
|
||||
enum comp_op {
|
||||
ZSWAP_COMPOP_COMPRESS,
|
||||
ZSWAP_COMPOP_DECOMPRESS
|
||||
};
|
||||
|
||||
static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen)
|
||||
{
|
||||
struct crypto_comp *tfm;
|
||||
int ret;
|
||||
|
||||
tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu());
|
||||
switch (op) {
|
||||
case ZSWAP_COMPOP_COMPRESS:
|
||||
ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
|
||||
break;
|
||||
case ZSWAP_COMPOP_DECOMPRESS:
|
||||
ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init zswap_comp_init(void)
|
||||
{
|
||||
if (!crypto_has_comp(zswap_compressor, 0, 0)) {
|
||||
pr_info("%s compressor not available\n", zswap_compressor);
|
||||
/* fall back to default compressor */
|
||||
zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
|
||||
if (!crypto_has_comp(zswap_compressor, 0, 0))
|
||||
/* can't even load the default compressor */
|
||||
return -ENODEV;
|
||||
}
|
||||
pr_info("using %s compressor\n", zswap_compressor);
|
||||
|
||||
/* alloc percpu transforms */
|
||||
zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
|
||||
if (!zswap_comp_pcpu_tfms)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zswap_comp_exit(void)
|
||||
{
|
||||
/* free percpu transforms */
|
||||
if (zswap_comp_pcpu_tfms)
|
||||
free_percpu(zswap_comp_pcpu_tfms);
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* data structures
|
||||
**********************************/
|
||||
/*
|
||||
* struct zswap_entry
|
||||
*
|
||||
* This structure contains the metadata for tracking a single compressed
|
||||
* page within zswap.
|
||||
*
|
||||
* rbnode - links the entry into red-black tree for the appropriate swap type
|
||||
* refcount - the number of outstanding reference to the entry. This is needed
|
||||
* to protect against premature freeing of the entry by code
|
||||
* concurent calls to load, invalidate, and writeback. The lock
|
||||
* for the zswap_tree structure that contains the entry must
|
||||
* be held while changing the refcount. Since the lock must
|
||||
* be held, there is no reason to also make refcount atomic.
|
||||
* offset - the swap offset for the entry. Index into the red-black tree.
|
||||
* handle - zsmalloc allocation handle that stores the compressed page data
|
||||
* length - the length in bytes of the compressed page data. Needed during
|
||||
* decompression
|
||||
*/
|
||||
struct zswap_entry {
|
||||
struct rb_node rbnode;
|
||||
pgoff_t offset;
|
||||
int refcount;
|
||||
unsigned int length;
|
||||
unsigned long handle;
|
||||
};
|
||||
|
||||
struct zswap_header {
|
||||
swp_entry_t swpentry;
|
||||
};
|
||||
|
||||
/*
|
||||
* The tree lock in the zswap_tree struct protects a few things:
|
||||
* - the rbtree
|
||||
* - the refcount field of each entry in the tree
|
||||
*/
|
||||
struct zswap_tree {
|
||||
struct rb_root rbroot;
|
||||
spinlock_t lock;
|
||||
struct zbud_pool *pool;
|
||||
};
|
||||
|
||||
static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
|
||||
|
||||
/*********************************
|
||||
* zswap entry functions
|
||||
**********************************/
|
||||
static struct kmem_cache *zswap_entry_cache;
|
||||
|
||||
static int zswap_entry_cache_create(void)
|
||||
{
|
||||
zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
|
||||
return (zswap_entry_cache == NULL);
|
||||
}
|
||||
|
||||
static void zswap_entry_cache_destory(void)
|
||||
{
|
||||
kmem_cache_destroy(zswap_entry_cache);
|
||||
}
|
||||
|
||||
static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
|
||||
{
|
||||
struct zswap_entry *entry;
|
||||
entry = kmem_cache_alloc(zswap_entry_cache, gfp);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
entry->refcount = 1;
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void zswap_entry_cache_free(struct zswap_entry *entry)
|
||||
{
|
||||
kmem_cache_free(zswap_entry_cache, entry);
|
||||
}
|
||||
|
||||
/* caller must hold the tree lock */
|
||||
static void zswap_entry_get(struct zswap_entry *entry)
|
||||
{
|
||||
entry->refcount++;
|
||||
}
|
||||
|
||||
/* caller must hold the tree lock */
|
||||
static int zswap_entry_put(struct zswap_entry *entry)
|
||||
{
|
||||
entry->refcount--;
|
||||
return entry->refcount;
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* rbtree functions
|
||||
**********************************/
|
||||
static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
|
||||
{
|
||||
struct rb_node *node = root->rb_node;
|
||||
struct zswap_entry *entry;
|
||||
|
||||
while (node) {
|
||||
entry = rb_entry(node, struct zswap_entry, rbnode);
|
||||
if (entry->offset > offset)
|
||||
node = node->rb_left;
|
||||
else if (entry->offset < offset)
|
||||
node = node->rb_right;
|
||||
else
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* In the case that a entry with the same offset is found, a pointer to
|
||||
* the existing entry is stored in dupentry and the function returns -EEXIST
|
||||
*/
|
||||
static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
|
||||
struct zswap_entry **dupentry)
|
||||
{
|
||||
struct rb_node **link = &root->rb_node, *parent = NULL;
|
||||
struct zswap_entry *myentry;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
myentry = rb_entry(parent, struct zswap_entry, rbnode);
|
||||
if (myentry->offset > entry->offset)
|
||||
link = &(*link)->rb_left;
|
||||
else if (myentry->offset < entry->offset)
|
||||
link = &(*link)->rb_right;
|
||||
else {
|
||||
*dupentry = myentry;
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
rb_link_node(&entry->rbnode, parent, link);
|
||||
rb_insert_color(&entry->rbnode, root);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* per-cpu code
|
||||
**********************************/
|
||||
static DEFINE_PER_CPU(u8 *, zswap_dstmem);
|
||||
|
||||
static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
|
||||
{
|
||||
struct crypto_comp *tfm;
|
||||
u8 *dst;
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
|
||||
if (IS_ERR(tfm)) {
|
||||
pr_err("can't allocate compressor transform\n");
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
|
||||
dst = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
|
||||
if (!dst) {
|
||||
pr_err("can't allocate compressor buffer\n");
|
||||
crypto_free_comp(tfm);
|
||||
*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
per_cpu(zswap_dstmem, cpu) = dst;
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
case CPU_UP_CANCELED:
|
||||
tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
|
||||
if (tfm) {
|
||||
crypto_free_comp(tfm);
|
||||
*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
|
||||
}
|
||||
dst = per_cpu(zswap_dstmem, cpu);
|
||||
kfree(dst);
|
||||
per_cpu(zswap_dstmem, cpu) = NULL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int zswap_cpu_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *pcpu)
|
||||
{
|
||||
unsigned long cpu = (unsigned long)pcpu;
|
||||
return __zswap_cpu_notifier(action, cpu);
|
||||
}
|
||||
|
||||
static struct notifier_block zswap_cpu_notifier_block = {
|
||||
.notifier_call = zswap_cpu_notifier
|
||||
};
|
||||
|
||||
static int zswap_cpu_init(void)
|
||||
{
|
||||
unsigned long cpu;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
|
||||
goto cleanup;
|
||||
register_cpu_notifier(&zswap_cpu_notifier_block);
|
||||
put_online_cpus();
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
for_each_online_cpu(cpu)
|
||||
__zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
|
||||
put_online_cpus();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* helpers
|
||||
**********************************/
|
||||
static bool zswap_is_full(void)
|
||||
{
|
||||
return (totalram_pages * zswap_max_pool_percent / 100 <
|
||||
zswap_pool_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Carries out the common pattern of freeing and entry's zsmalloc allocation,
|
||||
* freeing the entry itself, and decrementing the number of stored pages.
|
||||
*/
|
||||
static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
|
||||
{
|
||||
zbud_free(tree->pool, entry->handle);
|
||||
zswap_entry_cache_free(entry);
|
||||
atomic_dec(&zswap_stored_pages);
|
||||
zswap_pool_pages = zbud_get_pool_size(tree->pool);
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* writeback code
|
||||
**********************************/
|
||||
/* return enum for zswap_get_swap_cache_page */
|
||||
enum zswap_get_swap_ret {
|
||||
ZSWAP_SWAPCACHE_NEW,
|
||||
ZSWAP_SWAPCACHE_EXIST,
|
||||
ZSWAP_SWAPCACHE_NOMEM
|
||||
};
|
||||
|
||||
/*
|
||||
* zswap_get_swap_cache_page
|
||||
*
|
||||
* This is an adaption of read_swap_cache_async()
|
||||
*
|
||||
* This function tries to find a page with the given swap entry
|
||||
* in the swapper_space address space (the swap cache). If the page
|
||||
* is found, it is returned in retpage. Otherwise, a page is allocated,
|
||||
* added to the swap cache, and returned in retpage.
|
||||
*
|
||||
* If success, the swap cache page is returned in retpage
|
||||
* Returns 0 if page was already in the swap cache, page is not locked
|
||||
* Returns 1 if the new page needs to be populated, page is locked
|
||||
* Returns <0 on error
|
||||
*/
|
||||
static int zswap_get_swap_cache_page(swp_entry_t entry,
|
||||
struct page **retpage)
|
||||
{
|
||||
struct page *found_page, *new_page = NULL;
|
||||
struct address_space *swapper_space = &swapper_spaces[swp_type(entry)];
|
||||
int err;
|
||||
|
||||
*retpage = NULL;
|
||||
do {
|
||||
/*
|
||||
* First check the swap cache. Since this is normally
|
||||
* called after lookup_swap_cache() failed, re-calling
|
||||
* that would confuse statistics.
|
||||
*/
|
||||
found_page = find_get_page(swapper_space, entry.val);
|
||||
if (found_page)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Get a new page to read into from swap.
|
||||
*/
|
||||
if (!new_page) {
|
||||
new_page = alloc_page(GFP_KERNEL);
|
||||
if (!new_page)
|
||||
break; /* Out of memory */
|
||||
}
|
||||
|
||||
/*
|
||||
* call radix_tree_preload() while we can wait.
|
||||
*/
|
||||
err = radix_tree_preload(GFP_KERNEL);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Swap entry may have been freed since our caller observed it.
|
||||
*/
|
||||
err = swapcache_prepare(entry);
|
||||
if (err == -EEXIST) { /* seems racy */
|
||||
radix_tree_preload_end();
|
||||
continue;
|
||||
}
|
||||
if (err) { /* swp entry is obsolete ? */
|
||||
radix_tree_preload_end();
|
||||
break;
|
||||
}
|
||||
|
||||
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
|
||||
__set_page_locked(new_page);
|
||||
SetPageSwapBacked(new_page);
|
||||
err = __add_to_swap_cache(new_page, entry);
|
||||
if (likely(!err)) {
|
||||
radix_tree_preload_end();
|
||||
lru_cache_add_anon(new_page);
|
||||
*retpage = new_page;
|
||||
return ZSWAP_SWAPCACHE_NEW;
|
||||
}
|
||||
radix_tree_preload_end();
|
||||
ClearPageSwapBacked(new_page);
|
||||
__clear_page_locked(new_page);
|
||||
/*
|
||||
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
|
||||
* clear SWAP_HAS_CACHE flag.
|
||||
*/
|
||||
swapcache_free(entry, NULL);
|
||||
} while (err != -ENOMEM);
|
||||
|
||||
if (new_page)
|
||||
page_cache_release(new_page);
|
||||
if (!found_page)
|
||||
return ZSWAP_SWAPCACHE_NOMEM;
|
||||
*retpage = found_page;
|
||||
return ZSWAP_SWAPCACHE_EXIST;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempts to free an entry by adding a page to the swap cache,
|
||||
* decompressing the entry data into the page, and issuing a
|
||||
* bio write to write the page back to the swap device.
|
||||
*
|
||||
* This can be thought of as a "resumed writeback" of the page
|
||||
* to the swap device. We are basically resuming the same swap
|
||||
* writeback path that was intercepted with the frontswap_store()
|
||||
* in the first place. After the page has been decompressed into
|
||||
* the swap cache, the compressed version stored by zswap can be
|
||||
* freed.
|
||||
*/
|
||||
static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
|
||||
{
|
||||
struct zswap_header *zhdr;
|
||||
swp_entry_t swpentry;
|
||||
struct zswap_tree *tree;
|
||||
pgoff_t offset;
|
||||
struct zswap_entry *entry;
|
||||
struct page *page;
|
||||
u8 *src, *dst;
|
||||
unsigned int dlen;
|
||||
int ret, refcount;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
};
|
||||
|
||||
/* extract swpentry from data */
|
||||
zhdr = zbud_map(pool, handle);
|
||||
swpentry = zhdr->swpentry; /* here */
|
||||
zbud_unmap(pool, handle);
|
||||
tree = zswap_trees[swp_type(swpentry)];
|
||||
offset = swp_offset(swpentry);
|
||||
BUG_ON(pool != tree->pool);
|
||||
|
||||
/* find and ref zswap entry */
|
||||
spin_lock(&tree->lock);
|
||||
entry = zswap_rb_search(&tree->rbroot, offset);
|
||||
if (!entry) {
|
||||
/* entry was invalidated */
|
||||
spin_unlock(&tree->lock);
|
||||
return 0;
|
||||
}
|
||||
zswap_entry_get(entry);
|
||||
spin_unlock(&tree->lock);
|
||||
BUG_ON(offset != entry->offset);
|
||||
|
||||
/* try to allocate swap cache page */
|
||||
switch (zswap_get_swap_cache_page(swpentry, &page)) {
|
||||
case ZSWAP_SWAPCACHE_NOMEM: /* no memory */
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
|
||||
case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */
|
||||
/* page is already in the swap cache, ignore for now */
|
||||
page_cache_release(page);
|
||||
ret = -EEXIST;
|
||||
goto fail;
|
||||
|
||||
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
|
||||
/* decompress */
|
||||
dlen = PAGE_SIZE;
|
||||
src = (u8 *)zbud_map(tree->pool, entry->handle) +
|
||||
sizeof(struct zswap_header);
|
||||
dst = kmap_atomic(page);
|
||||
ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
|
||||
entry->length, dst, &dlen);
|
||||
kunmap_atomic(dst);
|
||||
zbud_unmap(tree->pool, entry->handle);
|
||||
BUG_ON(ret);
|
||||
BUG_ON(dlen != PAGE_SIZE);
|
||||
|
||||
/* page is up to date */
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
|
||||
/* start writeback */
|
||||
__swap_writepage(page, &wbc, end_swap_bio_write);
|
||||
page_cache_release(page);
|
||||
zswap_written_back_pages++;
|
||||
|
||||
spin_lock(&tree->lock);
|
||||
|
||||
/* drop local reference */
|
||||
zswap_entry_put(entry);
|
||||
/* drop the initial reference from entry creation */
|
||||
refcount = zswap_entry_put(entry);
|
||||
|
||||
/*
|
||||
* There are three possible values for refcount here:
|
||||
* (1) refcount is 1, load is in progress, unlink from rbtree,
|
||||
* load will free
|
||||
* (2) refcount is 0, (normal case) entry is valid,
|
||||
* remove from rbtree and free entry
|
||||
* (3) refcount is -1, invalidate happened during writeback,
|
||||
* free entry
|
||||
*/
|
||||
if (refcount >= 0) {
|
||||
/* no invalidate yet, remove from rbtree */
|
||||
rb_erase(&entry->rbnode, &tree->rbroot);
|
||||
}
|
||||
spin_unlock(&tree->lock);
|
||||
if (refcount <= 0) {
|
||||
/* free the entry */
|
||||
zswap_free_entry(tree, entry);
|
||||
return 0;
|
||||
}
|
||||
return -EAGAIN;
|
||||
|
||||
fail:
|
||||
spin_lock(&tree->lock);
|
||||
zswap_entry_put(entry);
|
||||
spin_unlock(&tree->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* frontswap hooks
|
||||
**********************************/
|
||||
/* attempts to compress and store an single page */
|
||||
static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
||||
struct page *page)
|
||||
{
|
||||
struct zswap_tree *tree = zswap_trees[type];
|
||||
struct zswap_entry *entry, *dupentry;
|
||||
int ret;
|
||||
unsigned int dlen = PAGE_SIZE, len;
|
||||
unsigned long handle;
|
||||
char *buf;
|
||||
u8 *src, *dst;
|
||||
struct zswap_header *zhdr;
|
||||
|
||||
if (!tree) {
|
||||
ret = -ENODEV;
|
||||
goto reject;
|
||||
}
|
||||
|
||||
/* reclaim space if needed */
|
||||
if (zswap_is_full()) {
|
||||
zswap_pool_limit_hit++;
|
||||
if (zbud_reclaim_page(tree->pool, 8)) {
|
||||
zswap_reject_reclaim_fail++;
|
||||
ret = -ENOMEM;
|
||||
goto reject;
|
||||
}
|
||||
}
|
||||
|
||||
/* allocate entry */
|
||||
entry = zswap_entry_cache_alloc(GFP_KERNEL);
|
||||
if (!entry) {
|
||||
zswap_reject_kmemcache_fail++;
|
||||
ret = -ENOMEM;
|
||||
goto reject;
|
||||
}
|
||||
|
||||
/* compress */
|
||||
dst = get_cpu_var(zswap_dstmem);
|
||||
src = kmap_atomic(page);
|
||||
ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
|
||||
kunmap_atomic(src);
|
||||
if (ret) {
|
||||
ret = -EINVAL;
|
||||
goto freepage;
|
||||
}
|
||||
|
||||
/* store */
|
||||
len = dlen + sizeof(struct zswap_header);
|
||||
ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN,
|
||||
&handle);
|
||||
if (ret == -ENOSPC) {
|
||||
zswap_reject_compress_poor++;
|
||||
goto freepage;
|
||||
}
|
||||
if (ret) {
|
||||
zswap_reject_alloc_fail++;
|
||||
goto freepage;
|
||||
}
|
||||
zhdr = zbud_map(tree->pool, handle);
|
||||
zhdr->swpentry = swp_entry(type, offset);
|
||||
buf = (u8 *)(zhdr + 1);
|
||||
memcpy(buf, dst, dlen);
|
||||
zbud_unmap(tree->pool, handle);
|
||||
put_cpu_var(zswap_dstmem);
|
||||
|
||||
/* populate entry */
|
||||
entry->offset = offset;
|
||||
entry->handle = handle;
|
||||
entry->length = dlen;
|
||||
|
||||
/* map */
|
||||
spin_lock(&tree->lock);
|
||||
do {
|
||||
ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
|
||||
if (ret == -EEXIST) {
|
||||
zswap_duplicate_entry++;
|
||||
/* remove from rbtree */
|
||||
rb_erase(&dupentry->rbnode, &tree->rbroot);
|
||||
if (!zswap_entry_put(dupentry)) {
|
||||
/* free */
|
||||
zswap_free_entry(tree, dupentry);
|
||||
}
|
||||
}
|
||||
} while (ret == -EEXIST);
|
||||
spin_unlock(&tree->lock);
|
||||
|
||||
/* update stats */
|
||||
atomic_inc(&zswap_stored_pages);
|
||||
zswap_pool_pages = zbud_get_pool_size(tree->pool);
|
||||
|
||||
return 0;
|
||||
|
||||
freepage:
|
||||
put_cpu_var(zswap_dstmem);
|
||||
zswap_entry_cache_free(entry);
|
||||
reject:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 0 if the page was successfully decompressed
|
||||
* return -1 on entry not found or error
|
||||
*/
|
||||
static int zswap_frontswap_load(unsigned type, pgoff_t offset,
|
||||
struct page *page)
|
||||
{
|
||||
struct zswap_tree *tree = zswap_trees[type];
|
||||
struct zswap_entry *entry;
|
||||
u8 *src, *dst;
|
||||
unsigned int dlen;
|
||||
int refcount, ret;
|
||||
|
||||
/* find */
|
||||
spin_lock(&tree->lock);
|
||||
entry = zswap_rb_search(&tree->rbroot, offset);
|
||||
if (!entry) {
|
||||
/* entry was written back */
|
||||
spin_unlock(&tree->lock);
|
||||
return -1;
|
||||
}
|
||||
zswap_entry_get(entry);
|
||||
spin_unlock(&tree->lock);
|
||||
|
||||
/* decompress */
|
||||
dlen = PAGE_SIZE;
|
||||
src = (u8 *)zbud_map(tree->pool, entry->handle) +
|
||||
sizeof(struct zswap_header);
|
||||
dst = kmap_atomic(page);
|
||||
ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
|
||||
dst, &dlen);
|
||||
kunmap_atomic(dst);
|
||||
zbud_unmap(tree->pool, entry->handle);
|
||||
BUG_ON(ret);
|
||||
|
||||
spin_lock(&tree->lock);
|
||||
refcount = zswap_entry_put(entry);
|
||||
if (likely(refcount)) {
|
||||
spin_unlock(&tree->lock);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&tree->lock);
|
||||
|
||||
/*
|
||||
* We don't have to unlink from the rbtree because
|
||||
* zswap_writeback_entry() or zswap_frontswap_invalidate page()
|
||||
* has already done this for us if we are the last reference.
|
||||
*/
|
||||
/* free */
|
||||
|
||||
zswap_free_entry(tree, entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* frees an entry in zswap */
|
||||
static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
|
||||
{
|
||||
struct zswap_tree *tree = zswap_trees[type];
|
||||
struct zswap_entry *entry;
|
||||
int refcount;
|
||||
|
||||
/* find */
|
||||
spin_lock(&tree->lock);
|
||||
entry = zswap_rb_search(&tree->rbroot, offset);
|
||||
if (!entry) {
|
||||
/* entry was written back */
|
||||
spin_unlock(&tree->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* remove from rbtree */
|
||||
rb_erase(&entry->rbnode, &tree->rbroot);
|
||||
|
||||
/* drop the initial reference from entry creation */
|
||||
refcount = zswap_entry_put(entry);
|
||||
|
||||
spin_unlock(&tree->lock);
|
||||
|
||||
if (refcount) {
|
||||
/* writeback in progress, writeback will free */
|
||||
return;
|
||||
}
|
||||
|
||||
/* free */
|
||||
zswap_free_entry(tree, entry);
|
||||
}
|
||||
|
||||
/* frees all zswap entries for the given swap type */
|
||||
static void zswap_frontswap_invalidate_area(unsigned type)
|
||||
{
|
||||
struct zswap_tree *tree = zswap_trees[type];
|
||||
struct rb_node *node;
|
||||
struct zswap_entry *entry;
|
||||
|
||||
if (!tree)
|
||||
return;
|
||||
|
||||
/* walk the tree and free everything */
|
||||
spin_lock(&tree->lock);
|
||||
/*
|
||||
* TODO: Even though this code should not be executed because
|
||||
* the try_to_unuse() in swapoff should have emptied the tree,
|
||||
* it is very wasteful to rebalance the tree after every
|
||||
* removal when we are freeing the whole tree.
|
||||
*
|
||||
* If post-order traversal code is ever added to the rbtree
|
||||
* implementation, it should be used here.
|
||||
*/
|
||||
while ((node = rb_first(&tree->rbroot))) {
|
||||
entry = rb_entry(node, struct zswap_entry, rbnode);
|
||||
rb_erase(&entry->rbnode, &tree->rbroot);
|
||||
zbud_free(tree->pool, entry->handle);
|
||||
zswap_entry_cache_free(entry);
|
||||
atomic_dec(&zswap_stored_pages);
|
||||
}
|
||||
tree->rbroot = RB_ROOT;
|
||||
spin_unlock(&tree->lock);
|
||||
}
|
||||
|
||||
static struct zbud_ops zswap_zbud_ops = {
|
||||
.evict = zswap_writeback_entry
|
||||
};
|
||||
|
||||
static void zswap_frontswap_init(unsigned type)
|
||||
{
|
||||
struct zswap_tree *tree;
|
||||
|
||||
tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
|
||||
if (!tree)
|
||||
goto err;
|
||||
tree->pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops);
|
||||
if (!tree->pool)
|
||||
goto freetree;
|
||||
tree->rbroot = RB_ROOT;
|
||||
spin_lock_init(&tree->lock);
|
||||
zswap_trees[type] = tree;
|
||||
return;
|
||||
|
||||
freetree:
|
||||
kfree(tree);
|
||||
err:
|
||||
pr_err("alloc failed, zswap disabled for swap type %d\n", type);
|
||||
}
|
||||
|
||||
static struct frontswap_ops zswap_frontswap_ops = {
|
||||
.store = zswap_frontswap_store,
|
||||
.load = zswap_frontswap_load,
|
||||
.invalidate_page = zswap_frontswap_invalidate_page,
|
||||
.invalidate_area = zswap_frontswap_invalidate_area,
|
||||
.init = zswap_frontswap_init
|
||||
};
|
||||
|
||||
/*********************************
|
||||
* debugfs functions
|
||||
**********************************/
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
static struct dentry *zswap_debugfs_root;
|
||||
|
||||
static int __init zswap_debugfs_init(void)
|
||||
{
|
||||
if (!debugfs_initialized())
|
||||
return -ENODEV;
|
||||
|
||||
zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
|
||||
if (!zswap_debugfs_root)
|
||||
return -ENOMEM;
|
||||
|
||||
debugfs_create_u64("pool_limit_hit", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_pool_limit_hit);
|
||||
debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_reject_reclaim_fail);
|
||||
debugfs_create_u64("reject_alloc_fail", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_reject_alloc_fail);
|
||||
debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_reject_kmemcache_fail);
|
||||
debugfs_create_u64("reject_compress_poor", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_reject_compress_poor);
|
||||
debugfs_create_u64("written_back_pages", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_written_back_pages);
|
||||
debugfs_create_u64("duplicate_entry", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_duplicate_entry);
|
||||
debugfs_create_u64("pool_pages", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_pool_pages);
|
||||
debugfs_create_atomic_t("stored_pages", S_IRUGO,
|
||||
zswap_debugfs_root, &zswap_stored_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit zswap_debugfs_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(zswap_debugfs_root);
|
||||
}
|
||||
#else
|
||||
static int __init zswap_debugfs_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit zswap_debugfs_exit(void) { }
|
||||
#endif
|
||||
|
||||
/*********************************
|
||||
* module init and exit
|
||||
**********************************/
|
||||
static int __init init_zswap(void)
|
||||
{
|
||||
if (!zswap_enabled)
|
||||
return 0;
|
||||
|
||||
pr_info("loading zswap\n");
|
||||
if (zswap_entry_cache_create()) {
|
||||
pr_err("entry cache creation failed\n");
|
||||
goto error;
|
||||
}
|
||||
if (zswap_comp_init()) {
|
||||
pr_err("compressor initialization failed\n");
|
||||
goto compfail;
|
||||
}
|
||||
if (zswap_cpu_init()) {
|
||||
pr_err("per-cpu initialization failed\n");
|
||||
goto pcpufail;
|
||||
}
|
||||
frontswap_register_ops(&zswap_frontswap_ops);
|
||||
if (zswap_debugfs_init())
|
||||
pr_warn("debugfs initialization failed\n");
|
||||
return 0;
|
||||
pcpufail:
|
||||
zswap_comp_exit();
|
||||
compfail:
|
||||
zswap_entry_cache_destory();
|
||||
error:
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* must be late so crypto has time to come up */
|
||||
late_initcall(init_zswap);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
|
||||
MODULE_DESCRIPTION("Compressed cache for swap pages");
|
Loading…
Reference in New Issue