2011-05-27 00:01:36 +08:00
|
|
|
#ifndef _LINUX_CLEANCACHE_H
|
|
|
|
#define _LINUX_CLEANCACHE_H
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/exportfs.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
|
2015-04-15 06:46:48 +08:00
|
|
|
#define CLEANCACHE_NO_POOL -1
|
|
|
|
#define CLEANCACHE_NO_BACKEND -2
|
|
|
|
#define CLEANCACHE_NO_BACKEND_SHARED -3
|
|
|
|
|
2011-05-27 00:01:36 +08:00
|
|
|
#define CLEANCACHE_KEY_MAX 6
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cleancache requires every file with a page in cleancache to have a
|
|
|
|
* unique key unless/until the file is removed/truncated. For some
|
|
|
|
* filesystems, the inode number is unique, but for "modern" filesystems
|
|
|
|
* an exportable filehandle is required (see exportfs.h)
|
|
|
|
*/
|
|
|
|
struct cleancache_filekey {
|
|
|
|
union {
|
|
|
|
ino_t ino;
|
|
|
|
__u32 fh[CLEANCACHE_KEY_MAX];
|
|
|
|
u32 key[CLEANCACHE_KEY_MAX];
|
|
|
|
} u;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cleancache_ops {
|
|
|
|
int (*init_fs)(size_t);
|
|
|
|
int (*init_shared_fs)(char *uuid, size_t);
|
|
|
|
int (*get_page)(int, struct cleancache_filekey,
|
|
|
|
pgoff_t, struct page *);
|
|
|
|
void (*put_page)(int, struct cleancache_filekey,
|
|
|
|
pgoff_t, struct page *);
|
2012-01-13 03:03:25 +08:00
|
|
|
void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
|
|
|
|
void (*invalidate_inode)(int, struct cleancache_filekey);
|
|
|
|
void (*invalidate_fs)(int);
|
2011-05-27 00:01:36 +08:00
|
|
|
};
|
|
|
|
|
cleancache: forbid overriding cleancache_ops
Currently, cleancache_register_ops returns the previous value of
cleancache_ops to allow chaining. However, chaining, as it is
implemented now, is extremely dangerous due to possible pool id
collisions. Suppose, a new cleancache driver is registered after the
previous one assigned an id to a super block. If the new driver assigns
the same id to another super block, which is perfectly possible, we will
have two different filesystems using the same id. No matter if the new
driver implements chaining or not, we are likely to get data corruption
with such a configuration eventually.
This patch therefore disables the ability to override cleancache_ops
altogether as potentially dangerous. If there is already cleancache
driver registered, all further calls to cleancache_register_ops will
return EBUSY. Since no user of cleancache implements chaining, we only
need to make minor changes to the code outside the cleancache core.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Bob Liu <lliubbo@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-15 06:46:45 +08:00
|
|
|
extern int cleancache_register_ops(struct cleancache_ops *ops);
|
2011-05-27 00:01:36 +08:00
|
|
|
extern void __cleancache_init_fs(struct super_block *);
|
2015-04-15 06:46:42 +08:00
|
|
|
extern void __cleancache_init_shared_fs(struct super_block *);
|
2011-05-27 00:01:36 +08:00
|
|
|
extern int __cleancache_get_page(struct page *);
|
|
|
|
extern void __cleancache_put_page(struct page *);
|
2011-09-21 23:56:28 +08:00
|
|
|
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
|
|
|
|
extern void __cleancache_invalidate_inode(struct address_space *);
|
|
|
|
extern void __cleancache_invalidate_fs(struct super_block *);
|
2011-05-27 00:01:36 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_CLEANCACHE
|
2013-05-01 06:26:58 +08:00
|
|
|
#define cleancache_enabled (1)
|
2011-05-27 00:01:36 +08:00
|
|
|
static inline bool cleancache_fs_enabled(struct page *page)
|
|
|
|
{
|
|
|
|
return page->mapping->host->i_sb->cleancache_poolid >= 0;
|
|
|
|
}
|
|
|
|
static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
return mapping->host->i_sb->cleancache_poolid >= 0;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define cleancache_enabled (0)
|
|
|
|
#define cleancache_fs_enabled(_page) (0)
|
|
|
|
#define cleancache_fs_enabled_mapping(_page) (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The shim layer provided by these inline functions allows the compiler
|
|
|
|
* to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
|
|
|
|
* is disabled, to a single global variable check if CONFIG_CLEANCACHE
|
|
|
|
* is enabled but no cleancache "backend" has dynamically enabled it,
|
|
|
|
* and, for the most frequent cleancache ops, to a single global variable
|
|
|
|
* check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
|
|
|
|
* and a cleancache backend has dynamically enabled cleancache, but the
|
|
|
|
* filesystem referenced by that cleancache op has not enabled cleancache.
|
|
|
|
* As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
|
|
|
|
* no measurable performance impact.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline void cleancache_init_fs(struct super_block *sb)
|
|
|
|
{
|
|
|
|
if (cleancache_enabled)
|
|
|
|
__cleancache_init_fs(sb);
|
|
|
|
}
|
|
|
|
|
2015-04-15 06:46:42 +08:00
|
|
|
static inline void cleancache_init_shared_fs(struct super_block *sb)
|
2011-05-27 00:01:36 +08:00
|
|
|
{
|
|
|
|
if (cleancache_enabled)
|
2015-04-15 06:46:42 +08:00
|
|
|
__cleancache_init_shared_fs(sb);
|
2011-05-27 00:01:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int cleancache_get_page(struct page *page)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (cleancache_enabled && cleancache_fs_enabled(page))
|
|
|
|
ret = __cleancache_get_page(page);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cleancache_put_page(struct page *page)
|
|
|
|
{
|
|
|
|
if (cleancache_enabled && cleancache_fs_enabled(page))
|
|
|
|
__cleancache_put_page(page);
|
|
|
|
}
|
|
|
|
|
2011-09-21 23:56:28 +08:00
|
|
|
static inline void cleancache_invalidate_page(struct address_space *mapping,
|
2011-05-27 00:01:36 +08:00
|
|
|
struct page *page)
|
|
|
|
{
|
|
|
|
/* careful... page->mapping is NULL sometimes when this is called */
|
|
|
|
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
|
2011-09-21 23:56:28 +08:00
|
|
|
__cleancache_invalidate_page(mapping, page);
|
2011-05-27 00:01:36 +08:00
|
|
|
}
|
|
|
|
|
2011-09-21 23:56:28 +08:00
|
|
|
static inline void cleancache_invalidate_inode(struct address_space *mapping)
|
2011-05-27 00:01:36 +08:00
|
|
|
{
|
|
|
|
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
|
2011-09-21 23:56:28 +08:00
|
|
|
__cleancache_invalidate_inode(mapping);
|
2011-05-27 00:01:36 +08:00
|
|
|
}
|
|
|
|
|
2011-09-21 23:56:28 +08:00
|
|
|
static inline void cleancache_invalidate_fs(struct super_block *sb)
|
2011-05-27 00:01:36 +08:00
|
|
|
{
|
|
|
|
if (cleancache_enabled)
|
2011-09-21 23:56:28 +08:00
|
|
|
__cleancache_invalidate_fs(sb);
|
2011-05-27 00:01:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* _LINUX_CLEANCACHE_H */
|