2011-05-27 00:02:21 +08:00
|
|
|
/*
|
|
|
|
* Xen implementation for transcendent memory (tmem)
|
|
|
|
*
|
2011-06-18 05:06:20 +08:00
|
|
|
* Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
|
2011-05-27 00:02:21 +08:00
|
|
|
* Author: Dan Magenheimer
|
|
|
|
*/
|
|
|
|
|
2013-06-28 18:21:41 +08:00
|
|
|
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
|
|
|
|
|
2013-05-01 06:27:00 +08:00
|
|
|
#include <linux/module.h>
|
2011-05-27 00:02:21 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/cleancache.h>
|
2011-06-18 05:06:20 +08:00
|
|
|
#include <linux/frontswap.h>
|
|
|
|
|
2011-05-27 00:02:21 +08:00
|
|
|
#include <xen/xen.h>
|
|
|
|
#include <xen/interface/xen.h>
|
2015-06-17 22:28:02 +08:00
|
|
|
#include <xen/page.h>
|
2011-05-27 00:02:21 +08:00
|
|
|
#include <asm/xen/hypercall.h>
|
|
|
|
#include <asm/xen/hypervisor.h>
|
2012-08-22 02:49:34 +08:00
|
|
|
#include <xen/tmem.h>
|
2011-05-27 00:02:21 +08:00
|
|
|
|
2013-05-09 03:50:59 +08:00
|
|
|
#ifndef CONFIG_XEN_TMEM_MODULE
|
|
|
|
bool __read_mostly tmem_enabled = false;
|
|
|
|
|
|
|
|
static int __init enable_tmem(char *s)
|
|
|
|
{
|
|
|
|
tmem_enabled = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("tmem", enable_tmem);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_CLEANCACHE
|
2013-05-09 05:10:08 +08:00
|
|
|
static bool cleancache __read_mostly = true;
|
|
|
|
module_param(cleancache, bool, S_IRUGO);
|
2013-05-09 05:12:44 +08:00
|
|
|
static bool selfballooning __read_mostly = true;
|
2013-05-09 05:10:08 +08:00
|
|
|
module_param(selfballooning, bool, S_IRUGO);
|
2013-05-09 03:50:59 +08:00
|
|
|
#endif /* CONFIG_CLEANCACHE */
|
|
|
|
|
|
|
|
#ifdef CONFIG_FRONTSWAP
|
2013-05-09 05:10:08 +08:00
|
|
|
static bool frontswap __read_mostly = true;
|
|
|
|
module_param(frontswap, bool, S_IRUGO);
|
2013-05-26 04:48:57 +08:00
|
|
|
#else /* CONFIG_FRONTSWAP */
|
|
|
|
#define frontswap (0)
|
2013-05-09 03:58:06 +08:00
|
|
|
#endif /* CONFIG_FRONTSWAP */
|
|
|
|
|
2013-05-09 04:57:35 +08:00
|
|
|
#ifdef CONFIG_XEN_SELFBALLOONING
|
2013-05-09 05:12:44 +08:00
|
|
|
static bool selfshrinking __read_mostly = true;
|
|
|
|
module_param(selfshrinking, bool, S_IRUGO);
|
2013-05-09 04:57:35 +08:00
|
|
|
#endif /* CONFIG_XEN_SELFBALLOONING */
|
2013-05-09 03:50:59 +08:00
|
|
|
|
2011-05-27 00:02:21 +08:00
|
|
|
#define TMEM_CONTROL 0
|
|
|
|
#define TMEM_NEW_POOL 1
|
|
|
|
#define TMEM_DESTROY_POOL 2
|
|
|
|
#define TMEM_NEW_PAGE 3
|
|
|
|
#define TMEM_PUT_PAGE 4
|
|
|
|
#define TMEM_GET_PAGE 5
|
|
|
|
#define TMEM_FLUSH_PAGE 6
|
|
|
|
#define TMEM_FLUSH_OBJECT 7
|
|
|
|
#define TMEM_READ 8
|
|
|
|
#define TMEM_WRITE 9
|
|
|
|
#define TMEM_XCHG 10
|
|
|
|
|
|
|
|
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
|
|
|
|
#define TMEM_POOL_PERSIST 1
|
|
|
|
#define TMEM_POOL_SHARED 2
|
|
|
|
#define TMEM_POOL_PAGESIZE_SHIFT 4
|
|
|
|
#define TMEM_VERSION_SHIFT 24
|
|
|
|
|
|
|
|
|
|
|
|
struct tmem_pool_uuid {
|
|
|
|
u64 uuid_lo;
|
|
|
|
u64 uuid_hi;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct tmem_oid {
|
|
|
|
u64 oid[3];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
|
|
|
|
|
|
|
|
/* flags for tmem_ops.new_pool */
|
|
|
|
#define TMEM_POOL_PERSIST 1
|
|
|
|
#define TMEM_POOL_SHARED 2
|
|
|
|
|
|
|
|
/* xen tmem foundation ops/hypercalls */
|
|
|
|
|
|
|
|
static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
|
|
|
|
u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
|
|
|
|
{
|
|
|
|
struct tmem_op op;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
op.cmd = tmem_cmd;
|
|
|
|
op.pool_id = tmem_pool;
|
|
|
|
op.u.gen.oid[0] = oid.oid[0];
|
|
|
|
op.u.gen.oid[1] = oid.oid[1];
|
|
|
|
op.u.gen.oid[2] = oid.oid[2];
|
|
|
|
op.u.gen.index = index;
|
|
|
|
op.u.gen.tmem_offset = tmem_offset;
|
|
|
|
op.u.gen.pfn_offset = pfn_offset;
|
|
|
|
op.u.gen.len = len;
|
|
|
|
set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
|
|
|
|
rc = HYPERVISOR_tmem_op(&op);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
|
|
|
|
u32 flags, unsigned long pagesize)
|
|
|
|
{
|
|
|
|
struct tmem_op op;
|
|
|
|
int rc = 0, pageshift;
|
|
|
|
|
|
|
|
for (pageshift = 0; pagesize != 1; pageshift++)
|
|
|
|
pagesize >>= 1;
|
|
|
|
flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
|
|
|
|
flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
|
|
|
|
op.cmd = TMEM_NEW_POOL;
|
|
|
|
op.u.new.uuid[0] = uuid.uuid_lo;
|
|
|
|
op.u.new.uuid[1] = uuid.uuid_hi;
|
|
|
|
op.u.new.flags = flags;
|
|
|
|
rc = HYPERVISOR_tmem_op(&op);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* xen generic tmem ops */
|
|
|
|
|
|
|
|
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
|
2015-08-08 00:34:38 +08:00
|
|
|
u32 index, struct page *page)
|
2011-05-27 00:02:21 +08:00
|
|
|
{
|
|
|
|
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
|
2015-08-08 00:34:38 +08:00
|
|
|
xen_page_to_gfn(page), 0, 0, 0);
|
2011-05-27 00:02:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
|
2015-08-08 00:34:38 +08:00
|
|
|
u32 index, struct page *page)
|
2011-05-27 00:02:21 +08:00
|
|
|
{
|
|
|
|
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
|
2015-08-08 00:34:38 +08:00
|
|
|
xen_page_to_gfn(page), 0, 0, 0);
|
2011-05-27 00:02:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
|
|
|
|
{
|
|
|
|
return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
|
|
|
|
0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
|
|
|
|
{
|
|
|
|
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-18 05:06:20 +08:00
|
|
|
#ifdef CONFIG_CLEANCACHE
|
|
|
|
static int xen_tmem_destroy_pool(u32 pool_id)
|
|
|
|
{
|
|
|
|
struct tmem_oid oid = { { 0 } };
|
|
|
|
|
|
|
|
return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
2011-05-27 00:02:21 +08:00
|
|
|
/* cleancache ops */
|
|
|
|
|
|
|
|
static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
|
|
|
|
pgoff_t index, struct page *page)
|
|
|
|
{
|
|
|
|
u32 ind = (u32) index;
|
|
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
|
|
|
|
|
|
if (pool < 0)
|
|
|
|
return;
|
|
|
|
if (ind != index)
|
|
|
|
return;
|
|
|
|
mb(); /* ensure page is quiescent; tmem may address it with an alias */
|
2015-08-08 00:34:38 +08:00
|
|
|
(void)xen_tmem_put_page((u32)pool, oid, ind, page);
|
2011-05-27 00:02:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
|
|
|
|
pgoff_t index, struct page *page)
|
|
|
|
{
|
|
|
|
u32 ind = (u32) index;
|
|
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* translate return values to linux semantics */
|
|
|
|
if (pool < 0)
|
|
|
|
return -1;
|
|
|
|
if (ind != index)
|
|
|
|
return -1;
|
2015-08-08 00:34:38 +08:00
|
|
|
ret = xen_tmem_get_page((u32)pool, oid, ind, page);
|
2011-05-27 00:02:21 +08:00
|
|
|
if (ret == 1)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
|
|
|
|
pgoff_t index)
|
|
|
|
{
|
|
|
|
u32 ind = (u32) index;
|
|
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
|
|
|
|
|
|
if (pool < 0)
|
|
|
|
return;
|
|
|
|
if (ind != index)
|
|
|
|
return;
|
|
|
|
(void)xen_tmem_flush_page((u32)pool, oid, ind);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
|
|
|
|
{
|
|
|
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
|
|
|
|
|
|
|
if (pool < 0)
|
|
|
|
return;
|
|
|
|
(void)xen_tmem_flush_object((u32)pool, oid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tmem_cleancache_flush_fs(int pool)
|
|
|
|
{
|
|
|
|
if (pool < 0)
|
|
|
|
return;
|
|
|
|
(void)xen_tmem_destroy_pool((u32)pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tmem_cleancache_init_fs(size_t pagesize)
|
|
|
|
{
|
|
|
|
struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
|
|
|
|
|
|
|
|
return xen_tmem_new_pool(uuid_private, 0, pagesize);
|
|
|
|
}
|
|
|
|
|
2017-05-10 21:06:33 +08:00
|
|
|
static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
|
2011-05-27 00:02:21 +08:00
|
|
|
{
|
|
|
|
struct tmem_pool_uuid shared_uuid;
|
|
|
|
|
2017-05-10 21:06:33 +08:00
|
|
|
shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
|
|
|
|
shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
|
2011-05-27 00:02:21 +08:00
|
|
|
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
|
|
|
|
}
|
|
|
|
|
2016-01-21 23:47:29 +08:00
|
|
|
static const struct cleancache_ops tmem_cleancache_ops = {
|
2011-05-27 00:02:21 +08:00
|
|
|
.put_page = tmem_cleancache_put_page,
|
|
|
|
.get_page = tmem_cleancache_get_page,
|
2012-01-13 03:03:25 +08:00
|
|
|
.invalidate_page = tmem_cleancache_flush_page,
|
|
|
|
.invalidate_inode = tmem_cleancache_flush_inode,
|
|
|
|
.invalidate_fs = tmem_cleancache_flush_fs,
|
2011-05-27 00:02:21 +08:00
|
|
|
.init_shared_fs = tmem_cleancache_init_shared_fs,
|
|
|
|
.init_fs = tmem_cleancache_init_fs
|
|
|
|
};
|
2011-06-18 05:06:20 +08:00
|
|
|
#endif
|
2011-05-27 00:02:21 +08:00
|
|
|
|
2011-06-18 05:06:20 +08:00
|
|
|
#ifdef CONFIG_FRONTSWAP
|
|
|
|
/* frontswap tmem operations */
|
|
|
|
|
|
|
|
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
|
|
|
|
static int tmem_frontswap_poolid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Swizzling increases objects per swaptype, increasing tmem concurrency
|
|
|
|
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
|
|
|
|
*/
|
|
|
|
#define SWIZ_BITS 4
|
|
|
|
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
|
|
|
|
#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
|
|
|
|
#define iswiz(_ind) (_ind >> SWIZ_BITS)
|
|
|
|
|
|
|
|
static inline struct tmem_oid oswiz(unsigned type, u32 ind)
|
2011-05-27 00:02:21 +08:00
|
|
|
{
|
2011-06-18 05:06:20 +08:00
|
|
|
struct tmem_oid oid = { .oid = { 0 } };
|
|
|
|
oid.oid[0] = _oswiz(type, ind);
|
|
|
|
return oid;
|
|
|
|
}
|
2011-05-27 00:02:21 +08:00
|
|
|
|
2011-06-18 05:06:20 +08:00
|
|
|
/* returns 0 if the page was successfully put into frontswap, -1 if not */
|
2012-05-15 23:32:15 +08:00
|
|
|
static int tmem_frontswap_store(unsigned type, pgoff_t offset,
|
2011-06-18 05:06:20 +08:00
|
|
|
struct page *page)
|
|
|
|
{
|
|
|
|
u64 ind64 = (u64)offset;
|
|
|
|
u32 ind = (u32)offset;
|
|
|
|
int pool = tmem_frontswap_poolid;
|
|
|
|
int ret;
|
|
|
|
|
mm, swap, frontswap: fix THP swap if frontswap enabled
It was reported by Sergey Senozhatsky that if THP (Transparent Huge
Page) and frontswap (via zswap) are both enabled, when memory goes low
so that swap is triggered, segfault and memory corruption will occur in
random user space applications as follow,
kernel: urxvt[338]: segfault at 20 ip 00007fc08889ae0d sp 00007ffc73a7fc40 error 6 in libc-2.26.so[7fc08881a000+1ae000]
#0 0x00007fc08889ae0d _int_malloc (libc.so.6)
#1 0x00007fc08889c2f3 malloc (libc.so.6)
#2 0x0000560e6004bff7 _Z14rxvt_wcstoutf8PKwi (urxvt)
#3 0x0000560e6005e75c n/a (urxvt)
#4 0x0000560e6007d9f1 _ZN16rxvt_perl_interp6invokeEP9rxvt_term9hook_typez (urxvt)
#5 0x0000560e6003d988 _ZN9rxvt_term9cmd_parseEv (urxvt)
#6 0x0000560e60042804 _ZN9rxvt_term6pty_cbERN2ev2ioEi (urxvt)
#7 0x0000560e6005c10f _Z17ev_invoke_pendingv (urxvt)
#8 0x0000560e6005cb55 ev_run (urxvt)
#9 0x0000560e6003b9b9 main (urxvt)
#10 0x00007fc08883af4a __libc_start_main (libc.so.6)
#11 0x0000560e6003f9da _start (urxvt)
After bisection, it was found the first bad commit is bd4c82c22c36 ("mm,
THP, swap: delay splitting THP after swapped out").
The root cause is as follows:
When the pages are written to swap device during swapping out in
swap_writepage(), zswap (fontswap) is tried to compress the pages to
improve performance. But zswap (frontswap) will treat THP as a normal
page, so only the head page is saved. After swapping in, tail pages
will not be restored to their original contents, causing memory
corruption in the applications.
This is fixed by refusing to save page in the frontswap store functions
if the page is a THP. So that the THP will be swapped out to swap
device.
Another choice is to split THP if frontswap is enabled. But it is found
that the frontswap enabling isn't flexible. For example, if
CONFIG_ZSWAP=y (cannot be module), frontswap will be enabled even if
zswap itself isn't enabled.
Frontswap has multiple backends, to make it easy for one backend to
enable THP support, the THP checking is put in backend frontswap store
functions instead of the general interfaces.
Link: http://lkml.kernel.org/r/20180209084947.22749-1-ying.huang@intel.com
Fixes: bd4c82c22c367e068 ("mm, THP, swap: delay splitting THP after swapped out")
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reported-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Tested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Suggested-by: Minchan Kim <minchan@kernel.org> [put THP checking in backend]
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Shaohua Li <shli@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: <stable@vger.kernel.org> [4.14]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-22 06:45:39 +08:00
|
|
|
/* THP isn't supported */
|
|
|
|
if (PageTransHuge(page))
|
|
|
|
return -1;
|
|
|
|
|
2011-06-18 05:06:20 +08:00
|
|
|
if (pool < 0)
|
|
|
|
return -1;
|
|
|
|
if (ind64 != ind)
|
|
|
|
return -1;
|
|
|
|
mb(); /* ensure page is quiescent; tmem may address it with an alias */
|
2015-08-08 00:34:38 +08:00
|
|
|
ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
|
2011-06-18 05:06:20 +08:00
|
|
|
/* translate Xen tmem return values to linux semantics */
|
|
|
|
if (ret == 1)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* returns 0 if the page was successfully gotten from frontswap, -1 if
|
|
|
|
* was not present (should never happen!)
|
|
|
|
*/
|
2012-05-15 23:32:15 +08:00
|
|
|
static int tmem_frontswap_load(unsigned type, pgoff_t offset,
|
2011-06-18 05:06:20 +08:00
|
|
|
struct page *page)
|
|
|
|
{
|
|
|
|
u64 ind64 = (u64)offset;
|
|
|
|
u32 ind = (u32)offset;
|
|
|
|
int pool = tmem_frontswap_poolid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (pool < 0)
|
|
|
|
return -1;
|
|
|
|
if (ind64 != ind)
|
|
|
|
return -1;
|
2015-08-08 00:34:38 +08:00
|
|
|
ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
|
2011-06-18 05:06:20 +08:00
|
|
|
/* translate Xen tmem return values to linux semantics */
|
|
|
|
if (ret == 1)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* flush a single page from frontswap */
|
|
|
|
static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
|
|
|
|
{
|
|
|
|
u64 ind64 = (u64)offset;
|
|
|
|
u32 ind = (u32)offset;
|
|
|
|
int pool = tmem_frontswap_poolid;
|
|
|
|
|
|
|
|
if (pool < 0)
|
|
|
|
return;
|
|
|
|
if (ind64 != ind)
|
|
|
|
return;
|
|
|
|
(void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* flush all pages from the passed swaptype */
|
|
|
|
static void tmem_frontswap_flush_area(unsigned type)
|
|
|
|
{
|
|
|
|
int pool = tmem_frontswap_poolid;
|
|
|
|
int ind;
|
|
|
|
|
|
|
|
if (pool < 0)
|
|
|
|
return;
|
|
|
|
for (ind = SWIZ_MASK; ind >= 0; ind--)
|
|
|
|
(void)xen_tmem_flush_object(pool, oswiz(type, ind));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tmem_frontswap_init(unsigned ignored)
|
|
|
|
{
|
|
|
|
struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
|
|
|
|
|
|
|
|
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
|
|
|
|
if (tmem_frontswap_poolid < 0)
|
|
|
|
tmem_frontswap_poolid =
|
|
|
|
xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
2013-05-01 06:26:51 +08:00
|
|
|
static struct frontswap_ops tmem_frontswap_ops = {
|
2012-05-15 23:32:15 +08:00
|
|
|
.store = tmem_frontswap_store,
|
|
|
|
.load = tmem_frontswap_load,
|
2012-01-13 03:03:25 +08:00
|
|
|
.invalidate_page = tmem_frontswap_flush_page,
|
|
|
|
.invalidate_area = tmem_frontswap_flush_area,
|
2011-06-18 05:06:20 +08:00
|
|
|
.init = tmem_frontswap_init
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2015-01-23 16:37:01 +08:00
|
|
|
static int __init xen_tmem_init(void)
|
2011-06-18 05:06:20 +08:00
|
|
|
{
|
2011-05-27 00:02:21 +08:00
|
|
|
if (!xen_domain())
|
|
|
|
return 0;
|
2011-06-18 05:06:20 +08:00
|
|
|
#ifdef CONFIG_FRONTSWAP
|
2013-05-09 05:10:08 +08:00
|
|
|
if (tmem_enabled && frontswap) {
|
2011-06-18 05:06:20 +08:00
|
|
|
char *s = "";
|
|
|
|
|
|
|
|
tmem_frontswap_poolid = -1;
|
2015-06-25 07:58:18 +08:00
|
|
|
frontswap_register_ops(&tmem_frontswap_ops);
|
2013-06-28 18:21:41 +08:00
|
|
|
pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
|
|
|
|
s);
|
2011-06-18 05:06:20 +08:00
|
|
|
}
|
|
|
|
#endif
|
2011-05-27 00:02:21 +08:00
|
|
|
#ifdef CONFIG_CLEANCACHE
|
2015-05-28 20:04:33 +08:00
|
|
|
BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
|
2013-05-09 05:10:08 +08:00
|
|
|
if (tmem_enabled && cleancache) {
|
cleancache: forbid overriding cleancache_ops
Currently, cleancache_register_ops returns the previous value of
cleancache_ops to allow chaining. However, chaining, as it is
implemented now, is extremely dangerous due to possible pool id
collisions. Suppose, a new cleancache driver is registered after the
previous one assigned an id to a super block. If the new driver assigns
the same id to another super block, which is perfectly possible, we will
have two different filesystems using the same id. No matter if the new
driver implements chaining or not, we are likely to get data corruption
with such a configuration eventually.
This patch therefore disables the ability to override cleancache_ops
altogether as potentially dangerous. If there is already cleancache
driver registered, all further calls to cleancache_register_ops will
return EBUSY. Since no user of cleancache implements chaining, we only
need to make minor changes to the code outside the cleancache core.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Bob Liu <lliubbo@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-15 06:46:45 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = cleancache_register_ops(&tmem_cleancache_ops);
|
|
|
|
if (err)
|
|
|
|
pr_warn("xen-tmem: failed to enable cleancache: %d\n",
|
|
|
|
err);
|
|
|
|
else
|
|
|
|
pr_info("cleancache enabled, RAM provided by "
|
|
|
|
"Xen Transcendent Memory\n");
|
2011-05-27 00:02:21 +08:00
|
|
|
}
|
2013-05-01 06:27:00 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_XEN_SELFBALLOONING
|
2013-05-15 01:56:42 +08:00
|
|
|
/*
|
|
|
|
* There is no point of driving pages to the swap system if they
|
|
|
|
* aren't going anywhere in tmem universe.
|
|
|
|
*/
|
|
|
|
if (!frontswap) {
|
|
|
|
selfshrinking = false;
|
|
|
|
selfballooning = false;
|
|
|
|
}
|
2013-05-09 05:12:44 +08:00
|
|
|
xen_selfballoon_init(selfballooning, selfshrinking);
|
2011-05-27 00:02:21 +08:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(xen_tmem_init)
|
2013-05-01 06:27:00 +08:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
|
|
|
|
MODULE_DESCRIPTION("Shim to Xen transcendent memory");
|