staging: ramster: remove old driver to prep for new base
[V2: rebased to apply to 20120905 staging-next, no other changes] To prep for moving the ramster codebase on top of the new redesigned zcache2 codebase, we remove ramster (as well as its contained diverged v1.1 version of zcache) entirely. Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
3ddd31fa34
commit
c857ce1659
|
@ -122,8 +122,6 @@ source "drivers/staging/android/Kconfig"
|
|||
|
||||
source "drivers/staging/telephony/Kconfig"
|
||||
|
||||
source "drivers/staging/ramster/Kconfig"
|
||||
|
||||
source "drivers/staging/ozwpan/Kconfig"
|
||||
|
||||
source "drivers/staging/ccg/Kconfig"
|
||||
|
|
|
@ -54,7 +54,6 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
|
|||
obj-$(CONFIG_DRM_OMAP) += omapdrm/
|
||||
obj-$(CONFIG_ANDROID) += android/
|
||||
obj-$(CONFIG_PHONE) += telephony/
|
||||
obj-$(CONFIG_RAMSTER) += ramster/
|
||||
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
|
||||
obj-$(CONFIG_USB_G_CCG) += ccg/
|
||||
obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
config RAMSTER
|
||||
bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
|
||||
depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && NET
|
||||
select LZO_COMPRESS
|
||||
select LZO_DECOMPRESS
|
||||
default n
|
||||
help
|
||||
RAMster allows RAM on other machines in a cluster to be utilized
|
||||
dynamically and symmetrically instead of swapping to a local swap
|
||||
disk, thus improving performance on memory-constrained workloads
|
||||
while minimizing total RAM across the cluster. RAMster, like
|
||||
zcache, compresses swap pages into local RAM, but then remotifies
|
||||
the compressed pages to another node in the RAMster cluster.
|
|
@ -1 +0,0 @@
|
|||
obj-$(CONFIG_RAMSTER) += zcache-main.o tmem.o r2net.o xvmalloc.o cluster/
|
|
@ -1,13 +0,0 @@
|
|||
For this staging driver, RAMster duplicates code from drivers/staging/zcache
|
||||
then incorporates changes to the local copy of the code. For V5, it also
|
||||
directly incorporates the soon-to-be-removed drivers/staging/zram/xvmalloc.[ch]
|
||||
as all testing has been done with xvmalloc rather than the new zsmalloc.
|
||||
Before RAMster can be promoted from staging, the zcache and RAMster drivers
|
||||
should be either merged or reorganized to separate out common code.
|
||||
|
||||
Until V4, RAMster duplicated code from fs/ocfs2/cluster, but this made
|
||||
RAMster incompatible with ocfs2 running in the same kernel and included
|
||||
lots of code that could be removed. As of V5, the ocfs2 code has been
|
||||
mined and made RAMster-specific, made to communicate with a userland
|
||||
ramster-tools package rather than ocfs2-tools, and can co-exist with ocfs2
|
||||
both in the same kernel and in userland on the same machine.
|
|
@ -1,3 +0,0 @@
|
|||
obj-$(CONFIG_RAMSTER) += ramster_nodemanager.o
|
||||
|
||||
ramster_nodemanager-objs := heartbeat.o masklog.o nodemanager.o tcp.o
|
|
@ -1,464 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/configfs.h>
|
||||
|
||||
#include "heartbeat.h"
|
||||
#include "tcp.h"
|
||||
#include "nodemanager.h"
|
||||
|
||||
#include "masklog.h"
|
||||
|
||||
/*
|
||||
* The first heartbeat pass had one global thread that would serialize all hb
|
||||
* callback calls. This global serializing sem should only be removed once
|
||||
* we've made sure that all callees can deal with being called concurrently
|
||||
* from multiple hb region threads.
|
||||
*/
|
||||
static DECLARE_RWSEM(r2hb_callback_sem);
|
||||
|
||||
/*
|
||||
* multiple hb threads are watching multiple regions. A node is live
|
||||
* whenever any of the threads sees activity from the node in its region.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(r2hb_live_lock);
|
||||
static unsigned long r2hb_live_node_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
|
||||
|
||||
static struct r2hb_callback {
|
||||
struct list_head list;
|
||||
} r2hb_callbacks[R2HB_NUM_CB];
|
||||
|
||||
enum r2hb_heartbeat_modes {
|
||||
R2HB_HEARTBEAT_LOCAL = 0,
|
||||
R2HB_HEARTBEAT_GLOBAL,
|
||||
R2HB_HEARTBEAT_NUM_MODES,
|
||||
};
|
||||
|
||||
char *r2hb_heartbeat_mode_desc[R2HB_HEARTBEAT_NUM_MODES] = {
|
||||
"local", /* R2HB_HEARTBEAT_LOCAL */
|
||||
"global", /* R2HB_HEARTBEAT_GLOBAL */
|
||||
};
|
||||
|
||||
unsigned int r2hb_dead_threshold = R2HB_DEFAULT_DEAD_THRESHOLD;
|
||||
unsigned int r2hb_heartbeat_mode = R2HB_HEARTBEAT_LOCAL;
|
||||
|
||||
/* Only sets a new threshold if there are no active regions.
|
||||
*
|
||||
* No locking or otherwise interesting code is required for reading
|
||||
* r2hb_dead_threshold as it can't change once regions are active and
|
||||
* it's not interesting to anyone until then anyway. */
|
||||
static void r2hb_dead_threshold_set(unsigned int threshold)
|
||||
{
|
||||
if (threshold > R2HB_MIN_DEAD_THRESHOLD) {
|
||||
spin_lock(&r2hb_live_lock);
|
||||
r2hb_dead_threshold = threshold;
|
||||
spin_unlock(&r2hb_live_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static int r2hb_global_hearbeat_mode_set(unsigned int hb_mode)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (hb_mode < R2HB_HEARTBEAT_NUM_MODES) {
|
||||
spin_lock(&r2hb_live_lock);
|
||||
r2hb_heartbeat_mode = hb_mode;
|
||||
ret = 0;
|
||||
spin_unlock(&r2hb_live_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void r2hb_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
int r2hb_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(r2hb_callbacks); i++)
|
||||
INIT_LIST_HEAD(&r2hb_callbacks[i].list);
|
||||
|
||||
memset(r2hb_live_node_bitmap, 0, sizeof(r2hb_live_node_bitmap));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* if we're already in a callback then we're already serialized by the sem */
|
||||
static void r2hb_fill_node_map_from_callback(unsigned long *map,
|
||||
unsigned bytes)
|
||||
{
|
||||
BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
|
||||
|
||||
memcpy(map, &r2hb_live_node_bitmap, bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* get a map of all nodes that are heartbeating in any regions
|
||||
*/
|
||||
void r2hb_fill_node_map(unsigned long *map, unsigned bytes)
|
||||
{
|
||||
/* callers want to serialize this map and callbacks so that they
|
||||
* can trust that they don't miss nodes coming to the party */
|
||||
down_read(&r2hb_callback_sem);
|
||||
spin_lock(&r2hb_live_lock);
|
||||
r2hb_fill_node_map_from_callback(map, bytes);
|
||||
spin_unlock(&r2hb_live_lock);
|
||||
up_read(&r2hb_callback_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2hb_fill_node_map);
|
||||
|
||||
/*
|
||||
* heartbeat configfs bits. The heartbeat set is a default set under
|
||||
* the cluster set in nodemanager.c.
|
||||
*/
|
||||
|
||||
/* heartbeat set */
|
||||
|
||||
struct r2hb_hb_group {
|
||||
struct config_group hs_group;
|
||||
/* some stuff? */
|
||||
};
|
||||
|
||||
static struct r2hb_hb_group *to_r2hb_hb_group(struct config_group *group)
|
||||
{
|
||||
return group ?
|
||||
container_of(group, struct r2hb_hb_group, hs_group)
|
||||
: NULL;
|
||||
}
|
||||
|
||||
static struct config_item r2hb_config_item;
|
||||
|
||||
static struct config_item *r2hb_hb_group_make_item(struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (strlen(name) > R2HB_MAX_REGION_NAME_LEN) {
|
||||
ret = -ENAMETOOLONG;
|
||||
goto free;
|
||||
}
|
||||
|
||||
config_item_put(&r2hb_config_item);
|
||||
|
||||
return &r2hb_config_item;
|
||||
free:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void r2hb_hb_group_drop_item(struct config_group *group,
|
||||
struct config_item *item)
|
||||
{
|
||||
if (r2hb_global_heartbeat_active()) {
|
||||
printk(KERN_NOTICE "ramster: Heartbeat %s "
|
||||
"on region %s (%s)\n",
|
||||
"stopped/aborted", config_item_name(item),
|
||||
"no region");
|
||||
}
|
||||
|
||||
config_item_put(item);
|
||||
}
|
||||
|
||||
struct r2hb_hb_group_attribute {
|
||||
struct configfs_attribute attr;
|
||||
ssize_t (*show)(struct r2hb_hb_group *, char *);
|
||||
ssize_t (*store)(struct r2hb_hb_group *, const char *, size_t);
|
||||
};
|
||||
|
||||
static ssize_t r2hb_hb_group_show(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
|
||||
struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
|
||||
container_of(attr, struct r2hb_hb_group_attribute, attr);
|
||||
ssize_t ret = 0;
|
||||
|
||||
if (r2hb_hb_group_attr->show)
|
||||
ret = r2hb_hb_group_attr->show(reg, page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t r2hb_hb_group_store(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
|
||||
struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
|
||||
container_of(attr, struct r2hb_hb_group_attribute, attr);
|
||||
ssize_t ret = -EINVAL;
|
||||
|
||||
if (r2hb_hb_group_attr->store)
|
||||
ret = r2hb_hb_group_attr->store(reg, page, count);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t r2hb_hb_group_threshold_show(struct r2hb_hb_group *group,
|
||||
char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", r2hb_dead_threshold);
|
||||
}
|
||||
|
||||
static ssize_t r2hb_hb_group_threshold_store(struct r2hb_hb_group *group,
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
int err;
|
||||
|
||||
err = kstrtoul(p, 10, &tmp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* this will validate ranges for us. */
|
||||
r2hb_dead_threshold_set((unsigned int) tmp);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static
|
||||
ssize_t r2hb_hb_group_mode_show(struct r2hb_hb_group *group,
|
||||
char *page)
|
||||
{
|
||||
return sprintf(page, "%s\n",
|
||||
r2hb_heartbeat_mode_desc[r2hb_heartbeat_mode]);
|
||||
}
|
||||
|
||||
static
|
||||
ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret;
|
||||
size_t len;
|
||||
|
||||
len = (page[count - 1] == '\n') ? count - 1 : count;
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < R2HB_HEARTBEAT_NUM_MODES; ++i) {
|
||||
if (strnicmp(page, r2hb_heartbeat_mode_desc[i], len))
|
||||
continue;
|
||||
|
||||
ret = r2hb_global_hearbeat_mode_set(i);
|
||||
if (!ret)
|
||||
printk(KERN_NOTICE "ramster: Heartbeat mode "
|
||||
"set to %s\n",
|
||||
r2hb_heartbeat_mode_desc[i]);
|
||||
return count;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
|
||||
static struct r2hb_hb_group_attribute r2hb_hb_group_attr_threshold = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "dead_threshold",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2hb_hb_group_threshold_show,
|
||||
.store = r2hb_hb_group_threshold_store,
|
||||
};
|
||||
|
||||
static struct r2hb_hb_group_attribute r2hb_hb_group_attr_mode = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "mode",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2hb_hb_group_mode_show,
|
||||
.store = r2hb_hb_group_mode_store,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *r2hb_hb_group_attrs[] = {
|
||||
&r2hb_hb_group_attr_threshold.attr,
|
||||
&r2hb_hb_group_attr_mode.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct configfs_item_operations r2hb_hearbeat_group_item_ops = {
|
||||
.show_attribute = r2hb_hb_group_show,
|
||||
.store_attribute = r2hb_hb_group_store,
|
||||
};
|
||||
|
||||
static struct configfs_group_operations r2hb_hb_group_group_ops = {
|
||||
.make_item = r2hb_hb_group_make_item,
|
||||
.drop_item = r2hb_hb_group_drop_item,
|
||||
};
|
||||
|
||||
static struct config_item_type r2hb_hb_group_type = {
|
||||
.ct_group_ops = &r2hb_hb_group_group_ops,
|
||||
.ct_item_ops = &r2hb_hearbeat_group_item_ops,
|
||||
.ct_attrs = r2hb_hb_group_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* this is just here to avoid touching group in heartbeat.h which the
|
||||
* entire damn world #includes */
|
||||
struct config_group *r2hb_alloc_hb_set(void)
|
||||
{
|
||||
struct r2hb_hb_group *hs = NULL;
|
||||
struct config_group *ret = NULL;
|
||||
|
||||
hs = kzalloc(sizeof(struct r2hb_hb_group), GFP_KERNEL);
|
||||
if (hs == NULL)
|
||||
goto out;
|
||||
|
||||
config_group_init_type_name(&hs->hs_group, "heartbeat",
|
||||
&r2hb_hb_group_type);
|
||||
|
||||
ret = &hs->hs_group;
|
||||
out:
|
||||
if (ret == NULL)
|
||||
kfree(hs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void r2hb_free_hb_set(struct config_group *group)
|
||||
{
|
||||
struct r2hb_hb_group *hs = to_r2hb_hb_group(group);
|
||||
kfree(hs);
|
||||
}
|
||||
|
||||
/* hb callback registration and issuing */
|
||||
|
||||
static struct r2hb_callback *hbcall_from_type(enum r2hb_callback_type type)
|
||||
{
|
||||
if (type == R2HB_NUM_CB)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return &r2hb_callbacks[type];
|
||||
}
|
||||
|
||||
void r2hb_setup_callback(struct r2hb_callback_func *hc,
|
||||
enum r2hb_callback_type type,
|
||||
r2hb_cb_func *func,
|
||||
void *data,
|
||||
int priority)
|
||||
{
|
||||
INIT_LIST_HEAD(&hc->hc_item);
|
||||
hc->hc_func = func;
|
||||
hc->hc_data = data;
|
||||
hc->hc_priority = priority;
|
||||
hc->hc_type = type;
|
||||
hc->hc_magic = R2HB_CB_MAGIC;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2hb_setup_callback);
|
||||
|
||||
int r2hb_register_callback(const char *region_uuid,
|
||||
struct r2hb_callback_func *hc)
|
||||
{
|
||||
struct r2hb_callback_func *tmp;
|
||||
struct list_head *iter;
|
||||
struct r2hb_callback *hbcall;
|
||||
int ret;
|
||||
|
||||
BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
|
||||
BUG_ON(!list_empty(&hc->hc_item));
|
||||
|
||||
hbcall = hbcall_from_type(hc->hc_type);
|
||||
if (IS_ERR(hbcall)) {
|
||||
ret = PTR_ERR(hbcall);
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_write(&r2hb_callback_sem);
|
||||
|
||||
list_for_each(iter, &hbcall->list) {
|
||||
tmp = list_entry(iter, struct r2hb_callback_func, hc_item);
|
||||
if (hc->hc_priority < tmp->hc_priority) {
|
||||
list_add_tail(&hc->hc_item, iter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (list_empty(&hc->hc_item))
|
||||
list_add_tail(&hc->hc_item, &hbcall->list);
|
||||
|
||||
up_write(&r2hb_callback_sem);
|
||||
ret = 0;
|
||||
out:
|
||||
mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
|
||||
ret, __builtin_return_address(0), hc);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2hb_register_callback);
|
||||
|
||||
void r2hb_unregister_callback(const char *region_uuid,
|
||||
struct r2hb_callback_func *hc)
|
||||
{
|
||||
BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
|
||||
|
||||
mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
|
||||
__builtin_return_address(0), hc);
|
||||
|
||||
/* XXX Can this happen _with_ a region reference? */
|
||||
if (list_empty(&hc->hc_item))
|
||||
return;
|
||||
|
||||
down_write(&r2hb_callback_sem);
|
||||
|
||||
list_del_init(&hc->hc_item);
|
||||
|
||||
up_write(&r2hb_callback_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2hb_unregister_callback);
|
||||
|
||||
int r2hb_check_node_heartbeating_from_callback(u8 node_num)
|
||||
{
|
||||
unsigned long testing_map[BITS_TO_LONGS(R2NM_MAX_NODES)];
|
||||
|
||||
r2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
|
||||
if (!test_bit(node_num, testing_map)) {
|
||||
mlog(ML_HEARTBEAT,
|
||||
"node (%u) does not have heartbeating enabled.\n",
|
||||
node_num);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2hb_check_node_heartbeating_from_callback);
|
||||
|
||||
void r2hb_stop_all_regions(void)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2hb_stop_all_regions);
|
||||
|
||||
/*
|
||||
* this is just a hack until we get the plumbing which flips file systems
|
||||
* read only and drops the hb ref instead of killing the node dead.
|
||||
*/
|
||||
int r2hb_global_heartbeat_active(void)
|
||||
{
|
||||
return (r2hb_heartbeat_mode == R2HB_HEARTBEAT_GLOBAL);
|
||||
}
|
||||
EXPORT_SYMBOL(r2hb_global_heartbeat_active);
|
||||
|
||||
/* added for RAMster */
|
||||
void r2hb_manual_set_node_heartbeating(int node_num)
|
||||
{
|
||||
if (node_num < R2NM_MAX_NODES)
|
||||
set_bit(node_num, r2hb_live_node_bitmap);
|
||||
}
|
||||
EXPORT_SYMBOL(r2hb_manual_set_node_heartbeating);
|
|
@ -1,87 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* heartbeat.h
|
||||
*
|
||||
* Function prototypes
|
||||
*
|
||||
* Copyright (C) 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef R2CLUSTER_HEARTBEAT_H
|
||||
#define R2CLUSTER_HEARTBEAT_H
|
||||
|
||||
#define R2HB_REGION_TIMEOUT_MS 2000
|
||||
|
||||
#define R2HB_MAX_REGION_NAME_LEN 32
|
||||
|
||||
/* number of changes to be seen as live */
|
||||
#define R2HB_LIVE_THRESHOLD 2
|
||||
/* number of equal samples to be seen as dead */
|
||||
extern unsigned int r2hb_dead_threshold;
|
||||
#define R2HB_DEFAULT_DEAD_THRESHOLD 31
|
||||
/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
|
||||
#define R2HB_MIN_DEAD_THRESHOLD 2
|
||||
#define R2HB_MAX_WRITE_TIMEOUT_MS \
|
||||
(R2HB_REGION_TIMEOUT_MS * (r2hb_dead_threshold - 1))
|
||||
|
||||
#define R2HB_CB_MAGIC 0x51d1e4ec
|
||||
|
||||
/* callback stuff */
|
||||
enum r2hb_callback_type {
|
||||
R2HB_NODE_DOWN_CB = 0,
|
||||
R2HB_NODE_UP_CB,
|
||||
R2HB_NUM_CB
|
||||
};
|
||||
|
||||
struct r2nm_node;
|
||||
typedef void (r2hb_cb_func)(struct r2nm_node *, int, void *);
|
||||
|
||||
struct r2hb_callback_func {
|
||||
u32 hc_magic;
|
||||
struct list_head hc_item;
|
||||
r2hb_cb_func *hc_func;
|
||||
void *hc_data;
|
||||
int hc_priority;
|
||||
enum r2hb_callback_type hc_type;
|
||||
};
|
||||
|
||||
struct config_group *r2hb_alloc_hb_set(void);
|
||||
void r2hb_free_hb_set(struct config_group *group);
|
||||
|
||||
void r2hb_setup_callback(struct r2hb_callback_func *hc,
|
||||
enum r2hb_callback_type type,
|
||||
r2hb_cb_func *func,
|
||||
void *data,
|
||||
int priority);
|
||||
int r2hb_register_callback(const char *region_uuid,
|
||||
struct r2hb_callback_func *hc);
|
||||
void r2hb_unregister_callback(const char *region_uuid,
|
||||
struct r2hb_callback_func *hc);
|
||||
void r2hb_fill_node_map(unsigned long *map,
|
||||
unsigned bytes);
|
||||
void r2hb_exit(void);
|
||||
int r2hb_init(void);
|
||||
int r2hb_check_node_heartbeating_from_callback(u8 node_num);
|
||||
void r2hb_stop_all_regions(void);
|
||||
int r2hb_get_all_regions(char *region_uuids, u8 numregions);
|
||||
int r2hb_global_heartbeat_active(void);
|
||||
void r2hb_manual_set_node_heartbeating(int);
|
||||
|
||||
#endif /* R2CLUSTER_HEARTBEAT_H */
|
|
@ -1,155 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "masklog.h"
|
||||
|
||||
struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
|
||||
EXPORT_SYMBOL_GPL(r2_mlog_and_bits);
|
||||
struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0);
|
||||
EXPORT_SYMBOL_GPL(r2_mlog_not_bits);
|
||||
|
||||
static ssize_t mlog_mask_show(u64 mask, char *buf)
|
||||
{
|
||||
char *state;
|
||||
|
||||
if (__mlog_test_u64(mask, r2_mlog_and_bits))
|
||||
state = "allow";
|
||||
else if (__mlog_test_u64(mask, r2_mlog_not_bits))
|
||||
state = "deny";
|
||||
else
|
||||
state = "off";
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", state);
|
||||
}
|
||||
|
||||
static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
|
||||
{
|
||||
if (!strnicmp(buf, "allow", 5)) {
|
||||
__mlog_set_u64(mask, r2_mlog_and_bits);
|
||||
__mlog_clear_u64(mask, r2_mlog_not_bits);
|
||||
} else if (!strnicmp(buf, "deny", 4)) {
|
||||
__mlog_set_u64(mask, r2_mlog_not_bits);
|
||||
__mlog_clear_u64(mask, r2_mlog_and_bits);
|
||||
} else if (!strnicmp(buf, "off", 3)) {
|
||||
__mlog_clear_u64(mask, r2_mlog_not_bits);
|
||||
__mlog_clear_u64(mask, r2_mlog_and_bits);
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
struct mlog_attribute {
|
||||
struct attribute attr;
|
||||
u64 mask;
|
||||
};
|
||||
|
||||
#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
|
||||
|
||||
#define define_mask(_name) { \
|
||||
.attr = { \
|
||||
.name = #_name, \
|
||||
.mode = S_IRUGO | S_IWUSR, \
|
||||
}, \
|
||||
.mask = ML_##_name, \
|
||||
}
|
||||
|
||||
static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
|
||||
define_mask(TCP),
|
||||
define_mask(MSG),
|
||||
define_mask(SOCKET),
|
||||
define_mask(HEARTBEAT),
|
||||
define_mask(HB_BIO),
|
||||
define_mask(DLMFS),
|
||||
define_mask(DLM),
|
||||
define_mask(DLM_DOMAIN),
|
||||
define_mask(DLM_THREAD),
|
||||
define_mask(DLM_MASTER),
|
||||
define_mask(DLM_RECOVERY),
|
||||
define_mask(DLM_GLUE),
|
||||
define_mask(VOTE),
|
||||
define_mask(CONN),
|
||||
define_mask(QUORUM),
|
||||
define_mask(BASTS),
|
||||
define_mask(CLUSTER),
|
||||
define_mask(ERROR),
|
||||
define_mask(NOTICE),
|
||||
define_mask(KTHREAD),
|
||||
};
|
||||
|
||||
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
|
||||
|
||||
static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
|
||||
|
||||
return mlog_mask_show(mlog_attr->mask, buf);
|
||||
}
|
||||
|
||||
static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
|
||||
|
||||
return mlog_mask_store(mlog_attr->mask, buf, count);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops mlog_attr_ops = {
|
||||
.show = mlog_show,
|
||||
.store = mlog_store,
|
||||
};
|
||||
|
||||
static struct kobj_type mlog_ktype = {
|
||||
.default_attrs = mlog_attr_ptrs,
|
||||
.sysfs_ops = &mlog_attr_ops,
|
||||
};
|
||||
|
||||
static struct kset mlog_kset = {
|
||||
.kobj = {.ktype = &mlog_ktype},
|
||||
};
|
||||
|
||||
int r2_mlog_sys_init(struct kset *r2cb_kset)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
while (mlog_attrs[i].attr.mode) {
|
||||
mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
|
||||
i++;
|
||||
}
|
||||
mlog_attr_ptrs[i] = NULL;
|
||||
|
||||
kobject_set_name(&mlog_kset.kobj, "logmask");
|
||||
mlog_kset.kobj.kset = r2cb_kset;
|
||||
return kset_register(&mlog_kset);
|
||||
}
|
||||
|
||||
void r2_mlog_sys_shutdown(void)
|
||||
{
|
||||
kset_unregister(&mlog_kset);
|
||||
}
|
|
@ -1,220 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2005, 2012 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef R2CLUSTER_MASKLOG_H
|
||||
#define R2CLUSTER_MASKLOG_H
|
||||
|
||||
/*
|
||||
* For now this is a trivial wrapper around printk() that gives the critical
|
||||
* ability to enable sets of debugging output at run-time. In the future this
|
||||
* will almost certainly be redirected to relayfs so that it can pay a
|
||||
* substantially lower heisenberg tax.
|
||||
*
|
||||
* Callers associate the message with a bitmask and a global bitmask is
|
||||
* maintained with help from /proc. If any of the bits match the message is
|
||||
* output.
|
||||
*
|
||||
* We must have efficient bit tests on i386 and it seems gcc still emits crazy
|
||||
* code for the 64bit compare. It emits very good code for the dual unsigned
|
||||
* long tests, though, completely avoiding tests that can never pass if the
|
||||
* caller gives a constant bitmask that fills one of the longs with all 0s. So
|
||||
* the desire is to have almost all of the calls decided on by comparing just
|
||||
* one of the longs. This leads to having infrequently given bits that are
|
||||
* frequently matched in the high bits.
|
||||
*
|
||||
* _ERROR and _NOTICE are used for messages that always go to the console and
|
||||
* have appropriate KERN_ prefixes. We wrap these in our function instead of
|
||||
* just calling printk() so that this can eventually make its way through
|
||||
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
|
||||
* The inline tests and macro dance give GCC the opportunity to quite cleverly
|
||||
* only emit the appropriage printk() when the caller passes in a constant
|
||||
* mask, as is almost always the case.
|
||||
*
|
||||
* All this bitmask nonsense is managed from the files under
|
||||
* /sys/fs/r2cb/logmask/. Reading the files gives a straightforward
|
||||
* indication of which bits are allowed (allow) or denied (off/deny).
|
||||
* ENTRY deny
|
||||
* EXIT deny
|
||||
* TCP off
|
||||
* MSG off
|
||||
* SOCKET off
|
||||
* ERROR allow
|
||||
* NOTICE allow
|
||||
*
|
||||
* Writing changes the state of a given bit and requires a strictly formatted
|
||||
* single write() call:
|
||||
*
|
||||
* write(fd, "allow", 5);
|
||||
*
|
||||
* Echoing allow/deny/off string into the logmask files can flip the bits
|
||||
* on or off as expected; here is the bash script for example:
|
||||
*
|
||||
* log_mask="/sys/fs/r2cb/log_mask"
|
||||
* for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
|
||||
* echo allow >"$log_mask"/"$node"
|
||||
* done
|
||||
*
|
||||
* The debugfs.ramster tool can also flip the bits with the -l option:
|
||||
*
|
||||
* debugfs.ramster -l TCP allow
|
||||
*/
|
||||
|
||||
/* for task_struct */
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* bits that are frequently given and infrequently matched in the low word */
|
||||
/* NOTE: If you add a flag, you need to also update masklog.c! */
|
||||
#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
|
||||
#define ML_MSG 0x0000000000000002ULL /* net network messages */
|
||||
#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
|
||||
#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
|
||||
#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
|
||||
#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
|
||||
#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
|
||||
#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
|
||||
#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
|
||||
#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
|
||||
#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
|
||||
#define ML_DLM_GLUE 0x0000000000000800ULL /* ramster dlm glue layer */
|
||||
#define ML_VOTE 0x0000000000001000ULL /* ramster node messaging */
|
||||
#define ML_CONN 0x0000000000002000ULL /* net connection management */
|
||||
#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
|
||||
#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
|
||||
#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
|
||||
|
||||
/* bits that are infrequently given and frequently matched in the high word */
|
||||
#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
|
||||
#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
|
||||
#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
|
||||
|
||||
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
|
||||
#ifndef MLOG_MASK_PREFIX
|
||||
#define MLOG_MASK_PREFIX 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* When logging is disabled, force the bit test to 0 for anything other
|
||||
* than errors and notices, allowing gcc to remove the code completely.
|
||||
* When enabled, allow all masks.
|
||||
*/
|
||||
#if defined(CONFIG_RAMSTER_DEBUG_MASKLOG)
|
||||
#define ML_ALLOWED_BITS (~0)
|
||||
#else
|
||||
#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
|
||||
#endif
|
||||
|
||||
#define MLOG_MAX_BITS 64
|
||||
|
||||
struct mlog_bits {
|
||||
unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
|
||||
};
|
||||
|
||||
extern struct mlog_bits r2_mlog_and_bits, r2_mlog_not_bits;
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
|
||||
#define __mlog_test_u64(mask, bits) \
|
||||
((u32)(mask & 0xffffffff) & bits.words[0] || \
|
||||
((u64)(mask) >> 32) & bits.words[1])
|
||||
#define __mlog_set_u64(mask, bits) do { \
|
||||
bits.words[0] |= (u32)(mask & 0xffffffff); \
|
||||
bits.words[1] |= (u64)(mask) >> 32; \
|
||||
} while (0)
|
||||
#define __mlog_clear_u64(mask, bits) do { \
|
||||
bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
|
||||
bits.words[1] &= ~((u64)(mask) >> 32); \
|
||||
} while (0)
|
||||
#define MLOG_BITS_RHS(mask) { \
|
||||
{ \
|
||||
[0] = (u32)(mask & 0xffffffff), \
|
||||
[1] = (u64)(mask) >> 32, \
|
||||
} \
|
||||
}
|
||||
|
||||
#else /* 32bit long above, 64bit long below */
|
||||
|
||||
#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
|
||||
#define __mlog_set_u64(mask, bits) do { \
|
||||
bits.words[0] |= (mask); \
|
||||
} while (0)
|
||||
#define __mlog_clear_u64(mask, bits) do { \
|
||||
bits.words[0] &= ~(mask); \
|
||||
} while (0)
|
||||
#define MLOG_BITS_RHS(mask) { { (mask) } }
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* smp_processor_id() "helpfully" screams when called outside preemptible
|
||||
* regions in current kernels. sles doesn't have the variants that don't
|
||||
* scream. just do this instead of trying to guess which we're building
|
||||
* against.. *sigh*.
|
||||
*/
|
||||
#define __mlog_cpu_guess ({ \
|
||||
unsigned long _cpu = get_cpu(); \
|
||||
put_cpu(); \
|
||||
_cpu; \
|
||||
})
|
||||
|
||||
/* In the following two macros, the whitespace after the ',' just
|
||||
* before ##args is intentional. Otherwise, gcc 2.95 will eat the
|
||||
* previous token if args expands to nothing.
|
||||
*/
|
||||
#define __mlog_printk(level, fmt, args...) \
|
||||
printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
|
||||
task_pid_nr(current), __mlog_cpu_guess, \
|
||||
__PRETTY_FUNCTION__, __LINE__ , ##args)
|
||||
|
||||
#define mlog(mask, fmt, args...) do { \
|
||||
u64 __m = MLOG_MASK_PREFIX | (mask); \
|
||||
if ((__m & ML_ALLOWED_BITS) && \
|
||||
__mlog_test_u64(__m, r2_mlog_and_bits) && \
|
||||
!__mlog_test_u64(__m, r2_mlog_not_bits)) { \
|
||||
if (__m & ML_ERROR) \
|
||||
__mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
|
||||
else if (__m & ML_NOTICE) \
|
||||
__mlog_printk(KERN_NOTICE, fmt , ##args); \
|
||||
else \
|
||||
__mlog_printk(KERN_INFO, fmt , ##args); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define mlog_errno(st) do { \
|
||||
int _st = (st); \
|
||||
if (_st != -ERESTARTSYS && _st != -EINTR && \
|
||||
_st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \
|
||||
mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
|
||||
} while (0)
|
||||
|
||||
#define mlog_bug_on_msg(cond, fmt, args...) do { \
|
||||
if (cond) { \
|
||||
mlog(ML_ERROR, "bug expression: " #cond "\n"); \
|
||||
mlog(ML_ERROR, fmt, ##args); \
|
||||
BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
int r2_mlog_sys_init(struct kset *r2cb_subsys);
|
||||
void r2_mlog_sys_shutdown(void);
|
||||
|
||||
#endif /* R2CLUSTER_MASKLOG_H */
|
|
@ -1,992 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/configfs.h>
|
||||
|
||||
#include "tcp.h"
|
||||
#include "nodemanager.h"
|
||||
#include "heartbeat.h"
|
||||
#include "masklog.h"
|
||||
|
||||
/* for now we operate under the assertion that there can be only one
|
||||
* cluster active at a time. Changing this will require trickling
|
||||
* cluster references throughout where nodes are looked up */
|
||||
struct r2nm_cluster *r2nm_single_cluster;
|
||||
|
||||
char *r2nm_fence_method_desc[R2NM_FENCE_METHODS] = {
|
||||
"reset", /* R2NM_FENCE_RESET */
|
||||
"panic", /* R2NM_FENCE_PANIC */
|
||||
};
|
||||
|
||||
struct r2nm_node *r2nm_get_node_by_num(u8 node_num)
|
||||
{
|
||||
struct r2nm_node *node = NULL;
|
||||
|
||||
if (node_num >= R2NM_MAX_NODES || r2nm_single_cluster == NULL)
|
||||
goto out;
|
||||
|
||||
read_lock(&r2nm_single_cluster->cl_nodes_lock);
|
||||
node = r2nm_single_cluster->cl_nodes[node_num];
|
||||
if (node)
|
||||
config_item_get(&node->nd_item);
|
||||
read_unlock(&r2nm_single_cluster->cl_nodes_lock);
|
||||
out:
|
||||
return node;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2nm_get_node_by_num);
|
||||
|
||||
int r2nm_configured_node_map(unsigned long *map, unsigned bytes)
|
||||
{
|
||||
struct r2nm_cluster *cluster = r2nm_single_cluster;
|
||||
|
||||
BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
|
||||
|
||||
if (cluster == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
read_lock(&cluster->cl_nodes_lock);
|
||||
memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
|
||||
read_unlock(&cluster->cl_nodes_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2nm_configured_node_map);
|
||||
|
||||
static struct r2nm_node *r2nm_node_ip_tree_lookup(struct r2nm_cluster *cluster,
|
||||
__be32 ip_needle,
|
||||
struct rb_node ***ret_p,
|
||||
struct rb_node **ret_parent)
|
||||
{
|
||||
struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct r2nm_node *node, *ret = NULL;
|
||||
|
||||
while (*p) {
|
||||
int cmp;
|
||||
|
||||
parent = *p;
|
||||
node = rb_entry(parent, struct r2nm_node, nd_ip_node);
|
||||
|
||||
cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
|
||||
sizeof(ip_needle));
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else if (cmp > 0)
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
ret = node;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret_p != NULL)
|
||||
*ret_p = p;
|
||||
if (ret_parent != NULL)
|
||||
*ret_parent = parent;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct r2nm_node *r2nm_get_node_by_ip(__be32 addr)
|
||||
{
|
||||
struct r2nm_node *node = NULL;
|
||||
struct r2nm_cluster *cluster = r2nm_single_cluster;
|
||||
|
||||
if (cluster == NULL)
|
||||
goto out;
|
||||
|
||||
read_lock(&cluster->cl_nodes_lock);
|
||||
node = r2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
|
||||
if (node)
|
||||
config_item_get(&node->nd_item);
|
||||
read_unlock(&cluster->cl_nodes_lock);
|
||||
|
||||
out:
|
||||
return node;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2nm_get_node_by_ip);
|
||||
|
||||
void r2nm_node_put(struct r2nm_node *node)
|
||||
{
|
||||
config_item_put(&node->nd_item);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2nm_node_put);
|
||||
|
||||
void r2nm_node_get(struct r2nm_node *node)
|
||||
{
|
||||
config_item_get(&node->nd_item);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2nm_node_get);
|
||||
|
||||
u8 r2nm_this_node(void)
|
||||
{
|
||||
u8 node_num = R2NM_MAX_NODES;
|
||||
|
||||
if (r2nm_single_cluster && r2nm_single_cluster->cl_has_local)
|
||||
node_num = r2nm_single_cluster->cl_local_node;
|
||||
|
||||
return node_num;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r2nm_this_node);
|
||||
|
||||
/* node configfs bits */
|
||||
|
||||
static struct r2nm_cluster *to_r2nm_cluster(struct config_item *item)
|
||||
{
|
||||
return item ?
|
||||
container_of(to_config_group(item), struct r2nm_cluster,
|
||||
cl_group)
|
||||
: NULL;
|
||||
}
|
||||
|
||||
static struct r2nm_node *to_r2nm_node(struct config_item *item)
|
||||
{
|
||||
return item ? container_of(item, struct r2nm_node, nd_item) : NULL;
|
||||
}
|
||||
|
||||
static void r2nm_node_release(struct config_item *item)
|
||||
{
|
||||
struct r2nm_node *node = to_r2nm_node(item);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%d\n", node->nd_num);
|
||||
}
|
||||
|
||||
static struct r2nm_cluster *to_r2nm_cluster_from_node(struct r2nm_node *node)
|
||||
{
|
||||
/* through the first node_set .parent
|
||||
* mycluster/nodes/mynode == r2nm_cluster->r2nm_node_group->r2nm_node */
|
||||
return to_r2nm_cluster(node->nd_item.ci_parent->ci_parent);
|
||||
}
|
||||
|
||||
enum {
|
||||
R2NM_NODE_ATTR_NUM = 0,
|
||||
R2NM_NODE_ATTR_PORT,
|
||||
R2NM_NODE_ATTR_ADDRESS,
|
||||
R2NM_NODE_ATTR_LOCAL,
|
||||
};
|
||||
|
||||
static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
int err;
|
||||
|
||||
err = kstrtoul(p, 10, &tmp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (tmp >= R2NM_MAX_NODES)
|
||||
return -ERANGE;
|
||||
|
||||
/* once we're in the cl_nodes tree networking can look us up by
|
||||
* node number and try to use our address and port attributes
|
||||
* to connect to this node.. make sure that they've been set
|
||||
* before writing the node attribute? */
|
||||
if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
|
||||
!test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
|
||||
return -EINVAL; /* XXX */
|
||||
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
if (cluster->cl_nodes[tmp])
|
||||
p = NULL;
|
||||
else {
|
||||
cluster->cl_nodes[tmp] = node;
|
||||
node->nd_num = tmp;
|
||||
set_bit(tmp, cluster->cl_nodes_bitmap);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
if (p == NULL)
|
||||
return -EEXIST;
|
||||
|
||||
return count;
|
||||
}
|
||||
static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_ipv4_port_write(struct r2nm_node *node,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
int err;
|
||||
|
||||
err = kstrtoul(p, 10, &tmp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (tmp == 0)
|
||||
return -EINVAL;
|
||||
if (tmp >= (u16)-1)
|
||||
return -ERANGE;
|
||||
|
||||
node->nd_ipv4_port = htons(tmp);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_ipv4_address_write(struct r2nm_node *node,
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
|
||||
int ret, i;
|
||||
struct rb_node **p, *parent;
|
||||
unsigned int octets[4];
|
||||
__be32 ipv4_addr = 0;
|
||||
|
||||
ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
|
||||
&octets[1], &octets[0]);
|
||||
if (ret != 4)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(octets); i++) {
|
||||
if (octets[i] > 255)
|
||||
return -ERANGE;
|
||||
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
if (r2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
|
||||
ret = -EEXIST;
|
||||
else {
|
||||
rb_link_node(&node->nd_ip_node, parent, p);
|
||||
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_local_read(struct r2nm_node *node, char *page)
|
||||
{
|
||||
return sprintf(page, "%d\n", node->nd_local);
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_local_write(struct r2nm_node *node, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
ssize_t ret;
|
||||
int err;
|
||||
|
||||
err = kstrtoul(p, 10, &tmp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
tmp = !!tmp; /* boolean of whether this node wants to be local */
|
||||
|
||||
/* setting local turns on networking rx for now so we require having
|
||||
* set everything else first */
|
||||
if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
|
||||
!test_bit(R2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
|
||||
!test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
|
||||
return -EINVAL; /* XXX */
|
||||
|
||||
/* the only failure case is trying to set a new local node
|
||||
* when a different one is already set */
|
||||
if (tmp && tmp == cluster->cl_has_local &&
|
||||
cluster->cl_local_node != node->nd_num)
|
||||
return -EBUSY;
|
||||
|
||||
/* bring up the rx thread if we're setting the new local node. */
|
||||
if (tmp && !cluster->cl_has_local) {
|
||||
ret = r2net_start_listening(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!tmp && cluster->cl_has_local &&
|
||||
cluster->cl_local_node == node->nd_num) {
|
||||
r2net_stop_listening(node);
|
||||
cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
|
||||
}
|
||||
|
||||
node->nd_local = tmp;
|
||||
if (node->nd_local) {
|
||||
cluster->cl_has_local = tmp;
|
||||
cluster->cl_local_node = node->nd_num;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
struct r2nm_node_attribute {
|
||||
struct configfs_attribute attr;
|
||||
ssize_t (*show)(struct r2nm_node *, char *);
|
||||
ssize_t (*store)(struct r2nm_node *, const char *, size_t);
|
||||
};
|
||||
|
||||
static struct r2nm_node_attribute r2nm_node_attr_num = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "num",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_node_num_read,
|
||||
.store = r2nm_node_num_write,
|
||||
};
|
||||
|
||||
static struct r2nm_node_attribute r2nm_node_attr_ipv4_port = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "ipv4_port",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_node_ipv4_port_read,
|
||||
.store = r2nm_node_ipv4_port_write,
|
||||
};
|
||||
|
||||
static struct r2nm_node_attribute r2nm_node_attr_ipv4_address = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "ipv4_address",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_node_ipv4_address_read,
|
||||
.store = r2nm_node_ipv4_address_write,
|
||||
};
|
||||
|
||||
static struct r2nm_node_attribute r2nm_node_attr_local = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "local",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_node_local_read,
|
||||
.store = r2nm_node_local_write,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *r2nm_node_attrs[] = {
|
||||
[R2NM_NODE_ATTR_NUM] = &r2nm_node_attr_num.attr,
|
||||
[R2NM_NODE_ATTR_PORT] = &r2nm_node_attr_ipv4_port.attr,
|
||||
[R2NM_NODE_ATTR_ADDRESS] = &r2nm_node_attr_ipv4_address.attr,
|
||||
[R2NM_NODE_ATTR_LOCAL] = &r2nm_node_attr_local.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int r2nm_attr_index(struct configfs_attribute *attr)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(r2nm_node_attrs); i++) {
|
||||
if (attr == r2nm_node_attrs[i])
|
||||
return i;
|
||||
}
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_show(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct r2nm_node *node = to_r2nm_node(item);
|
||||
struct r2nm_node_attribute *r2nm_node_attr =
|
||||
container_of(attr, struct r2nm_node_attribute, attr);
|
||||
ssize_t ret = 0;
|
||||
|
||||
if (r2nm_node_attr->show)
|
||||
ret = r2nm_node_attr->show(node, page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_node_store(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct r2nm_node *node = to_r2nm_node(item);
|
||||
struct r2nm_node_attribute *r2nm_node_attr =
|
||||
container_of(attr, struct r2nm_node_attribute, attr);
|
||||
ssize_t ret;
|
||||
int attr_index = r2nm_attr_index(attr);
|
||||
|
||||
if (r2nm_node_attr->store == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (test_bit(attr_index, &node->nd_set_attributes))
|
||||
return -EBUSY;
|
||||
|
||||
ret = r2nm_node_attr->store(node, page, count);
|
||||
if (ret < count)
|
||||
goto out;
|
||||
|
||||
set_bit(attr_index, &node->nd_set_attributes);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct configfs_item_operations r2nm_node_item_ops = {
|
||||
.release = r2nm_node_release,
|
||||
.show_attribute = r2nm_node_show,
|
||||
.store_attribute = r2nm_node_store,
|
||||
};
|
||||
|
||||
static struct config_item_type r2nm_node_type = {
|
||||
.ct_item_ops = &r2nm_node_item_ops,
|
||||
.ct_attrs = r2nm_node_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* node set */
|
||||
|
||||
struct r2nm_node_group {
|
||||
struct config_group ns_group;
|
||||
/* some stuff? */
|
||||
};
|
||||
|
||||
#if 0
|
||||
static struct r2nm_node_group *to_r2nm_node_group(struct config_group *group)
|
||||
{
|
||||
return group ?
|
||||
container_of(group, struct r2nm_node_group, ns_group)
|
||||
: NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct r2nm_cluster_attribute {
|
||||
struct configfs_attribute attr;
|
||||
ssize_t (*show)(struct r2nm_cluster *, char *);
|
||||
ssize_t (*store)(struct r2nm_cluster *, const char *, size_t);
|
||||
};
|
||||
|
||||
static ssize_t r2nm_cluster_attr_write(const char *page, ssize_t count,
|
||||
unsigned int *val)
|
||||
{
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
int err;
|
||||
|
||||
err = kstrtoul(p, 10, &tmp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (tmp == 0)
|
||||
return -EINVAL;
|
||||
if (tmp >= (u32)-1)
|
||||
return -ERANGE;
|
||||
|
||||
*val = tmp;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_idle_timeout_ms_read(
|
||||
struct r2nm_cluster *cluster, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_idle_timeout_ms_write(
|
||||
struct r2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
ssize_t ret;
|
||||
unsigned int val = 0;
|
||||
|
||||
ret = r2nm_cluster_attr_write(page, count, &val);
|
||||
|
||||
if (ret > 0) {
|
||||
if (cluster->cl_idle_timeout_ms != val
|
||||
&& r2net_num_connected_peers()) {
|
||||
mlog(ML_NOTICE,
|
||||
"r2net: cannot change idle timeout after "
|
||||
"the first peer has agreed to it."
|
||||
" %d connected peers\n",
|
||||
r2net_num_connected_peers());
|
||||
ret = -EINVAL;
|
||||
} else if (val <= cluster->cl_keepalive_delay_ms) {
|
||||
mlog(ML_NOTICE, "r2net: idle timeout must be larger "
|
||||
"than keepalive delay\n");
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
cluster->cl_idle_timeout_ms = val;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_keepalive_delay_ms_read(
|
||||
struct r2nm_cluster *cluster, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write(
|
||||
struct r2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
ssize_t ret;
|
||||
unsigned int val = 0;
|
||||
|
||||
ret = r2nm_cluster_attr_write(page, count, &val);
|
||||
|
||||
if (ret > 0) {
|
||||
if (cluster->cl_keepalive_delay_ms != val
|
||||
&& r2net_num_connected_peers()) {
|
||||
mlog(ML_NOTICE,
|
||||
"r2net: cannot change keepalive delay after"
|
||||
" the first peer has agreed to it."
|
||||
" %d connected peers\n",
|
||||
r2net_num_connected_peers());
|
||||
ret = -EINVAL;
|
||||
} else if (val >= cluster->cl_idle_timeout_ms) {
|
||||
mlog(ML_NOTICE, "r2net: keepalive delay must be "
|
||||
"smaller than idle timeout\n");
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
cluster->cl_keepalive_delay_ms = val;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_reconnect_delay_ms_read(
|
||||
struct r2nm_cluster *cluster, char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_reconnect_delay_ms_write(
|
||||
struct r2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
return r2nm_cluster_attr_write(page, count,
|
||||
&cluster->cl_reconnect_delay_ms);
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_fence_method_read(
|
||||
struct r2nm_cluster *cluster, char *page)
|
||||
{
|
||||
ssize_t ret = 0;
|
||||
|
||||
if (cluster)
|
||||
ret = sprintf(page, "%s\n",
|
||||
r2nm_fence_method_desc[cluster->cl_fence_method]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_attr_fence_method_write(
|
||||
struct r2nm_cluster *cluster, const char *page, size_t count)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (page[count - 1] != '\n')
|
||||
goto bail;
|
||||
|
||||
for (i = 0; i < R2NM_FENCE_METHODS; ++i) {
|
||||
if (count != strlen(r2nm_fence_method_desc[i]) + 1)
|
||||
continue;
|
||||
if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1))
|
||||
continue;
|
||||
if (cluster->cl_fence_method != i) {
|
||||
printk(KERN_INFO "ramster: Changing fence method to %s\n",
|
||||
r2nm_fence_method_desc[i]);
|
||||
cluster->cl_fence_method = i;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
bail:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct r2nm_cluster_attribute r2nm_cluster_attr_idle_timeout_ms = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "idle_timeout_ms",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_cluster_attr_idle_timeout_ms_read,
|
||||
.store = r2nm_cluster_attr_idle_timeout_ms_write,
|
||||
};
|
||||
|
||||
static struct r2nm_cluster_attribute r2nm_cluster_attr_keepalive_delay_ms = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "keepalive_delay_ms",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_cluster_attr_keepalive_delay_ms_read,
|
||||
.store = r2nm_cluster_attr_keepalive_delay_ms_write,
|
||||
};
|
||||
|
||||
static struct r2nm_cluster_attribute r2nm_cluster_attr_reconnect_delay_ms = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "reconnect_delay_ms",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_cluster_attr_reconnect_delay_ms_read,
|
||||
.store = r2nm_cluster_attr_reconnect_delay_ms_write,
|
||||
};
|
||||
|
||||
static struct r2nm_cluster_attribute r2nm_cluster_attr_fence_method = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "fence_method",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = r2nm_cluster_attr_fence_method_read,
|
||||
.store = r2nm_cluster_attr_fence_method_write,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *r2nm_cluster_attrs[] = {
|
||||
&r2nm_cluster_attr_idle_timeout_ms.attr,
|
||||
&r2nm_cluster_attr_keepalive_delay_ms.attr,
|
||||
&r2nm_cluster_attr_reconnect_delay_ms.attr,
|
||||
&r2nm_cluster_attr_fence_method.attr,
|
||||
NULL,
|
||||
};
|
||||
static ssize_t r2nm_cluster_show(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct r2nm_cluster *cluster = to_r2nm_cluster(item);
|
||||
struct r2nm_cluster_attribute *r2nm_cluster_attr =
|
||||
container_of(attr, struct r2nm_cluster_attribute, attr);
|
||||
ssize_t ret = 0;
|
||||
|
||||
if (r2nm_cluster_attr->show)
|
||||
ret = r2nm_cluster_attr->show(cluster, page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t r2nm_cluster_store(struct config_item *item,
|
||||
struct configfs_attribute *attr,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct r2nm_cluster *cluster = to_r2nm_cluster(item);
|
||||
struct r2nm_cluster_attribute *r2nm_cluster_attr =
|
||||
container_of(attr, struct r2nm_cluster_attribute, attr);
|
||||
ssize_t ret;
|
||||
|
||||
if (r2nm_cluster_attr->store == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = r2nm_cluster_attr->store(cluster, page, count);
|
||||
if (ret < count)
|
||||
goto out;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct config_item *r2nm_node_group_make_item(struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
struct r2nm_node *node = NULL;
|
||||
|
||||
if (strlen(name) > R2NM_MAX_NAME_LEN)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
node = kzalloc(sizeof(struct r2nm_node), GFP_KERNEL);
|
||||
if (node == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
|
||||
config_item_init_type_name(&node->nd_item, name, &r2nm_node_type);
|
||||
spin_lock_init(&node->nd_lock);
|
||||
|
||||
mlog(ML_CLUSTER, "r2nm: Registering node %s\n", name);
|
||||
|
||||
return &node->nd_item;
|
||||
}
|
||||
|
||||
static void r2nm_node_group_drop_item(struct config_group *group,
|
||||
struct config_item *item)
|
||||
{
|
||||
struct r2nm_node *node = to_r2nm_node(item);
|
||||
struct r2nm_cluster *cluster =
|
||||
to_r2nm_cluster(group->cg_item.ci_parent);
|
||||
|
||||
r2net_disconnect_node(node);
|
||||
|
||||
if (cluster->cl_has_local &&
|
||||
(cluster->cl_local_node == node->nd_num)) {
|
||||
cluster->cl_has_local = 0;
|
||||
cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
|
||||
r2net_stop_listening(node);
|
||||
}
|
||||
|
||||
/* XXX call into net to stop this node from trading messages */
|
||||
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
|
||||
/* XXX sloppy */
|
||||
if (node->nd_ipv4_address)
|
||||
rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
|
||||
|
||||
/* nd_num might be 0 if the node number hasn't been set.. */
|
||||
if (cluster->cl_nodes[node->nd_num] == node) {
|
||||
cluster->cl_nodes[node->nd_num] = NULL;
|
||||
clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
|
||||
mlog(ML_CLUSTER, "r2nm: Unregistered node %s\n",
|
||||
config_item_name(&node->nd_item));
|
||||
|
||||
config_item_put(item);
|
||||
}
|
||||
|
||||
static struct configfs_group_operations r2nm_node_group_group_ops = {
|
||||
.make_item = r2nm_node_group_make_item,
|
||||
.drop_item = r2nm_node_group_drop_item,
|
||||
};
|
||||
|
||||
static struct config_item_type r2nm_node_group_type = {
|
||||
.ct_group_ops = &r2nm_node_group_group_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* cluster */
|
||||
|
||||
static void r2nm_cluster_release(struct config_item *item)
|
||||
{
|
||||
struct r2nm_cluster *cluster = to_r2nm_cluster(item);
|
||||
|
||||
kfree(cluster->cl_group.default_groups);
|
||||
kfree(cluster);
|
||||
}
|
||||
|
||||
static struct configfs_item_operations r2nm_cluster_item_ops = {
|
||||
.release = r2nm_cluster_release,
|
||||
.show_attribute = r2nm_cluster_show,
|
||||
.store_attribute = r2nm_cluster_store,
|
||||
};
|
||||
|
||||
static struct config_item_type r2nm_cluster_type = {
|
||||
.ct_item_ops = &r2nm_cluster_item_ops,
|
||||
.ct_attrs = r2nm_cluster_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* cluster set */
|
||||
|
||||
struct r2nm_cluster_group {
|
||||
struct configfs_subsystem cs_subsys;
|
||||
/* some stuff? */
|
||||
};
|
||||
|
||||
#if 0
|
||||
static struct r2nm_cluster_group *
|
||||
to_r2nm_cluster_group(struct config_group *group)
|
||||
{
|
||||
return group ?
|
||||
container_of(to_configfs_subsystem(group),
|
||||
struct r2nm_cluster_group, cs_subsys)
|
||||
: NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct config_group *
|
||||
r2nm_cluster_group_make_group(struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
struct r2nm_cluster *cluster = NULL;
|
||||
struct r2nm_node_group *ns = NULL;
|
||||
struct config_group *r2hb_group = NULL, *ret = NULL;
|
||||
void *defs = NULL;
|
||||
|
||||
/* this runs under the parent dir's i_mutex; there can be only
|
||||
* one caller in here at a time */
|
||||
if (r2nm_single_cluster)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
cluster = kzalloc(sizeof(struct r2nm_cluster), GFP_KERNEL);
|
||||
ns = kzalloc(sizeof(struct r2nm_node_group), GFP_KERNEL);
|
||||
defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
|
||||
r2hb_group = r2hb_alloc_hb_set();
|
||||
if (cluster == NULL || ns == NULL || r2hb_group == NULL || defs == NULL)
|
||||
goto out;
|
||||
|
||||
config_group_init_type_name(&cluster->cl_group, name,
|
||||
&r2nm_cluster_type);
|
||||
config_group_init_type_name(&ns->ns_group, "node",
|
||||
&r2nm_node_group_type);
|
||||
|
||||
cluster->cl_group.default_groups = defs;
|
||||
cluster->cl_group.default_groups[0] = &ns->ns_group;
|
||||
cluster->cl_group.default_groups[1] = r2hb_group;
|
||||
cluster->cl_group.default_groups[2] = NULL;
|
||||
rwlock_init(&cluster->cl_nodes_lock);
|
||||
cluster->cl_node_ip_tree = RB_ROOT;
|
||||
cluster->cl_reconnect_delay_ms = R2NET_RECONNECT_DELAY_MS_DEFAULT;
|
||||
cluster->cl_idle_timeout_ms = R2NET_IDLE_TIMEOUT_MS_DEFAULT;
|
||||
cluster->cl_keepalive_delay_ms = R2NET_KEEPALIVE_DELAY_MS_DEFAULT;
|
||||
cluster->cl_fence_method = R2NM_FENCE_RESET;
|
||||
|
||||
ret = &cluster->cl_group;
|
||||
r2nm_single_cluster = cluster;
|
||||
|
||||
out:
|
||||
if (ret == NULL) {
|
||||
kfree(cluster);
|
||||
kfree(ns);
|
||||
r2hb_free_hb_set(r2hb_group);
|
||||
kfree(defs);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void r2nm_cluster_group_drop_item(struct config_group *group,
|
||||
struct config_item *item)
|
||||
{
|
||||
struct r2nm_cluster *cluster = to_r2nm_cluster(item);
|
||||
int i;
|
||||
struct config_item *killme;
|
||||
|
||||
BUG_ON(r2nm_single_cluster != cluster);
|
||||
r2nm_single_cluster = NULL;
|
||||
|
||||
for (i = 0; cluster->cl_group.default_groups[i]; i++) {
|
||||
killme = &cluster->cl_group.default_groups[i]->cg_item;
|
||||
cluster->cl_group.default_groups[i] = NULL;
|
||||
config_item_put(killme);
|
||||
}
|
||||
|
||||
config_item_put(item);
|
||||
}
|
||||
|
||||
static struct configfs_group_operations r2nm_cluster_group_group_ops = {
|
||||
.make_group = r2nm_cluster_group_make_group,
|
||||
.drop_item = r2nm_cluster_group_drop_item,
|
||||
};
|
||||
|
||||
static struct config_item_type r2nm_cluster_group_type = {
|
||||
.ct_group_ops = &r2nm_cluster_group_group_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct r2nm_cluster_group r2nm_cluster_group = {
|
||||
.cs_subsys = {
|
||||
.su_group = {
|
||||
.cg_item = {
|
||||
.ci_namebuf = "cluster",
|
||||
.ci_type = &r2nm_cluster_group_type,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
int r2nm_depend_item(struct config_item *item)
|
||||
{
|
||||
return configfs_depend_item(&r2nm_cluster_group.cs_subsys, item);
|
||||
}
|
||||
|
||||
void r2nm_undepend_item(struct config_item *item)
|
||||
{
|
||||
configfs_undepend_item(&r2nm_cluster_group.cs_subsys, item);
|
||||
}
|
||||
|
||||
int r2nm_depend_this_node(void)
|
||||
{
|
||||
int ret = 0;
|
||||
struct r2nm_node *local_node;
|
||||
|
||||
local_node = r2nm_get_node_by_num(r2nm_this_node());
|
||||
if (!local_node) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = r2nm_depend_item(&local_node->nd_item);
|
||||
r2nm_node_put(local_node);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void r2nm_undepend_this_node(void)
|
||||
{
|
||||
struct r2nm_node *local_node;
|
||||
|
||||
local_node = r2nm_get_node_by_num(r2nm_this_node());
|
||||
BUG_ON(!local_node);
|
||||
|
||||
r2nm_undepend_item(&local_node->nd_item);
|
||||
r2nm_node_put(local_node);
|
||||
}
|
||||
|
||||
|
||||
static void __exit exit_r2nm(void)
|
||||
{
|
||||
/* XXX sync with hb callbacks and shut down hb? */
|
||||
r2net_unregister_hb_callbacks();
|
||||
configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
|
||||
|
||||
r2net_exit();
|
||||
r2hb_exit();
|
||||
}
|
||||
|
||||
static int __init init_r2nm(void)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
ret = r2hb_init();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = r2net_init();
|
||||
if (ret)
|
||||
goto out_r2hb;
|
||||
|
||||
ret = r2net_register_hb_callbacks();
|
||||
if (ret)
|
||||
goto out_r2net;
|
||||
|
||||
config_group_init(&r2nm_cluster_group.cs_subsys.su_group);
|
||||
mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex);
|
||||
ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
|
||||
goto out_callbacks;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
|
||||
out_callbacks:
|
||||
r2net_unregister_hb_callbacks();
|
||||
out_r2net:
|
||||
r2net_exit();
|
||||
out_r2hb:
|
||||
r2hb_exit();
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Oracle");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(init_r2nm)
|
||||
module_exit(exit_r2nm)
|
|
@ -1,88 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* nodemanager.h
|
||||
*
|
||||
* Function prototypes
|
||||
*
|
||||
* Copyright (C) 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef R2CLUSTER_NODEMANAGER_H
|
||||
#define R2CLUSTER_NODEMANAGER_H
|
||||
|
||||
#include "ramster_nodemanager.h"
|
||||
|
||||
/* This totally doesn't belong here. */
|
||||
#include <linux/configfs.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
enum r2nm_fence_method {
|
||||
R2NM_FENCE_RESET = 0,
|
||||
R2NM_FENCE_PANIC,
|
||||
R2NM_FENCE_METHODS, /* Number of fence methods */
|
||||
};
|
||||
|
||||
struct r2nm_node {
|
||||
spinlock_t nd_lock;
|
||||
struct config_item nd_item;
|
||||
char nd_name[R2NM_MAX_NAME_LEN+1]; /* replace? */
|
||||
__u8 nd_num;
|
||||
/* only one address per node, as attributes, for now. */
|
||||
__be32 nd_ipv4_address;
|
||||
__be16 nd_ipv4_port;
|
||||
struct rb_node nd_ip_node;
|
||||
/* there can be only one local node for now */
|
||||
int nd_local;
|
||||
|
||||
unsigned long nd_set_attributes;
|
||||
};
|
||||
|
||||
struct r2nm_cluster {
|
||||
struct config_group cl_group;
|
||||
unsigned cl_has_local:1;
|
||||
u8 cl_local_node;
|
||||
rwlock_t cl_nodes_lock;
|
||||
struct r2nm_node *cl_nodes[R2NM_MAX_NODES];
|
||||
struct rb_root cl_node_ip_tree;
|
||||
unsigned int cl_idle_timeout_ms;
|
||||
unsigned int cl_keepalive_delay_ms;
|
||||
unsigned int cl_reconnect_delay_ms;
|
||||
enum r2nm_fence_method cl_fence_method;
|
||||
|
||||
/* part of a hack for disk bitmap.. will go eventually. - zab */
|
||||
unsigned long cl_nodes_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
|
||||
};
|
||||
|
||||
extern struct r2nm_cluster *r2nm_single_cluster;
|
||||
|
||||
u8 r2nm_this_node(void);
|
||||
|
||||
int r2nm_configured_node_map(unsigned long *map, unsigned bytes);
|
||||
struct r2nm_node *r2nm_get_node_by_num(u8 node_num);
|
||||
struct r2nm_node *r2nm_get_node_by_ip(__be32 addr);
|
||||
void r2nm_node_get(struct r2nm_node *node);
|
||||
void r2nm_node_put(struct r2nm_node *node);
|
||||
|
||||
int r2nm_depend_item(struct config_item *item);
|
||||
void r2nm_undepend_item(struct config_item *item);
|
||||
int r2nm_depend_this_node(void);
|
||||
void r2nm_undepend_this_node(void);
|
||||
|
||||
#endif /* R2CLUSTER_NODEMANAGER_H */
|
|
@ -1,39 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* ramster_nodemanager.h
|
||||
*
|
||||
* Header describing the interface between userspace and the kernel
|
||||
* for the ramster_nodemanager module.
|
||||
*
|
||||
* Copyright (C) 2002, 2004, 2012 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RAMSTER_NODEMANAGER_H
|
||||
#define _RAMSTER_NODEMANAGER_H
|
||||
|
||||
#define R2NM_API_VERSION 5
|
||||
|
||||
#define R2NM_MAX_NODES 255
|
||||
#define R2NM_INVALID_NODE_NUM 255
|
||||
|
||||
/* host name, group name, cluster name all 64 bytes */
|
||||
#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */
|
||||
|
||||
#endif /* _RAMSTER_NODEMANAGER_H */
|
File diff suppressed because it is too large
Load Diff
|
@ -1,159 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* tcp.h
|
||||
*
|
||||
* Function prototypes
|
||||
*
|
||||
* Copyright (C) 2004 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef R2CLUSTER_TCP_H
|
||||
#define R2CLUSTER_TCP_H
|
||||
|
||||
#include <linux/socket.h>
|
||||
#ifdef __KERNEL__
|
||||
#include <net/sock.h>
|
||||
#include <linux/tcp.h>
|
||||
#else
|
||||
#include <sys/socket.h>
|
||||
#endif
|
||||
#include <linux/inet.h>
|
||||
#include <linux/in.h>
|
||||
|
||||
struct r2net_msg {
|
||||
__be16 magic;
|
||||
__be16 data_len;
|
||||
__be16 msg_type;
|
||||
__be16 pad1;
|
||||
__be32 sys_status;
|
||||
__be32 status;
|
||||
__be32 key;
|
||||
__be32 msg_num;
|
||||
__u8 buf[0];
|
||||
};
|
||||
|
||||
typedef int (r2net_msg_handler_func)(struct r2net_msg *msg, u32 len, void *data,
|
||||
void **ret_data);
|
||||
typedef void (r2net_post_msg_handler_func)(int status, void *data,
|
||||
void *ret_data);
|
||||
|
||||
#define R2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct r2net_msg))
|
||||
|
||||
/* same as hb delay, we're waiting for another node to recognize our hb */
|
||||
#define R2NET_RECONNECT_DELAY_MS_DEFAULT 2000
|
||||
|
||||
#define R2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000
|
||||
#define R2NET_IDLE_TIMEOUT_MS_DEFAULT 30000
|
||||
|
||||
|
||||
/* TODO: figure this out.... */
|
||||
static inline int r2net_link_down(int err, struct socket *sock)
|
||||
{
|
||||
if (sock) {
|
||||
if (sock->sk->sk_state != TCP_ESTABLISHED &&
|
||||
sock->sk->sk_state != TCP_CLOSE_WAIT)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (err >= 0)
|
||||
return 0;
|
||||
switch (err) {
|
||||
|
||||
/* ????????????????????????? */
|
||||
case -ERESTARTSYS:
|
||||
case -EBADF:
|
||||
/* When the server has died, an ICMP port unreachable
|
||||
* message prompts ECONNREFUSED. */
|
||||
case -ECONNREFUSED:
|
||||
case -ENOTCONN:
|
||||
case -ECONNRESET:
|
||||
case -EPIPE:
|
||||
return 1;
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
R2NET_DRIVER_UNINITED,
|
||||
R2NET_DRIVER_READY,
|
||||
};
|
||||
|
||||
int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
|
||||
u8 target_node, int *status);
|
||||
int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
|
||||
size_t veclen, u8 target_node, int *status);
|
||||
|
||||
int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
|
||||
r2net_msg_handler_func *func, void *data,
|
||||
r2net_post_msg_handler_func *post_func,
|
||||
struct list_head *unreg_list);
|
||||
void r2net_unregister_handler_list(struct list_head *list);
|
||||
|
||||
void r2net_fill_node_map(unsigned long *map, unsigned bytes);
|
||||
|
||||
void r2net_force_data_magic(struct r2net_msg *, u16, u32);
|
||||
void r2net_hb_node_up_manual(int);
|
||||
struct r2net_node *r2net_nn_from_num(u8);
|
||||
|
||||
struct r2nm_node;
|
||||
int r2net_register_hb_callbacks(void);
|
||||
void r2net_unregister_hb_callbacks(void);
|
||||
int r2net_start_listening(struct r2nm_node *node);
|
||||
void r2net_stop_listening(struct r2nm_node *node);
|
||||
void r2net_disconnect_node(struct r2nm_node *node);
|
||||
int r2net_num_connected_peers(void);
|
||||
|
||||
int r2net_init(void);
|
||||
void r2net_exit(void);
|
||||
|
||||
struct r2net_send_tracking;
|
||||
struct r2net_sock_container;
|
||||
|
||||
#if 0
|
||||
int r2net_debugfs_init(void);
|
||||
void r2net_debugfs_exit(void);
|
||||
void r2net_debug_add_nst(struct r2net_send_tracking *nst);
|
||||
void r2net_debug_del_nst(struct r2net_send_tracking *nst);
|
||||
void r2net_debug_add_sc(struct r2net_sock_container *sc);
|
||||
void r2net_debug_del_sc(struct r2net_sock_container *sc);
|
||||
#else
|
||||
static inline int r2net_debugfs_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void r2net_debugfs_exit(void)
|
||||
{
|
||||
}
|
||||
static inline void r2net_debug_add_nst(struct r2net_send_tracking *nst)
|
||||
{
|
||||
}
|
||||
static inline void r2net_debug_del_nst(struct r2net_send_tracking *nst)
|
||||
{
|
||||
}
|
||||
static inline void r2net_debug_add_sc(struct r2net_sock_container *sc)
|
||||
{
|
||||
}
|
||||
static inline void r2net_debug_del_sc(struct r2net_sock_container *sc)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif /* R2CLUSTER_TCP_H */
|
|
@ -1,248 +0,0 @@
|
|||
/* -*- mode: c; c-basic-offset: 8; -*-
|
||||
* vim: noexpandtab sw=8 ts=8 sts=0:
|
||||
*
|
||||
* Copyright (C) 2005 Oracle. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef R2CLUSTER_TCP_INTERNAL_H
|
||||
#define R2CLUSTER_TCP_INTERNAL_H
|
||||
|
||||
#define R2NET_MSG_MAGIC ((u16)0xfa55)
|
||||
#define R2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
|
||||
#define R2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
|
||||
#define R2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
|
||||
/*
|
||||
* "data magic" is a long version of "status magic" where the message
|
||||
* payload actually contains data to be passed in reply to certain messages
|
||||
*/
|
||||
#define R2NET_MSG_DATA_MAGIC ((u16)0xfa59)
|
||||
|
||||
/* we're delaying our quorum decision so that heartbeat will have timed
|
||||
* out truly dead nodes by the time we come around to making decisions
|
||||
* on their number */
|
||||
#define R2NET_QUORUM_DELAY_MS \
|
||||
((r2hb_dead_threshold + 2) * R2HB_REGION_TIMEOUT_MS)
|
||||
|
||||
/*
|
||||
* This version number represents quite a lot, unfortunately. It not
|
||||
* only represents the raw network message protocol on the wire but also
|
||||
* locking semantics of the file system using the protocol. It should
|
||||
* be somewhere else, I'm sure, but right now it isn't.
|
||||
*
|
||||
* With version 11, we separate out the filesystem locking portion. The
|
||||
* filesystem now has a major.minor version it negotiates. Version 11
|
||||
* introduces this negotiation to the r2dlm protocol, and as such the
|
||||
* version here in tcp_internal.h should not need to be bumped for
|
||||
* filesystem locking changes.
|
||||
*
|
||||
* New in version 11
|
||||
* - Negotiation of filesystem locking in the dlm join.
|
||||
*
|
||||
* New in version 10:
|
||||
* - Meta/data locks combined
|
||||
*
|
||||
* New in version 9:
|
||||
* - All votes removed
|
||||
*
|
||||
* New in version 8:
|
||||
* - Replace delete inode votes with a cluster lock
|
||||
*
|
||||
* New in version 7:
|
||||
* - DLM join domain includes the live nodemap
|
||||
*
|
||||
* New in version 6:
|
||||
* - DLM lockres remote refcount fixes.
|
||||
*
|
||||
* New in version 5:
|
||||
* - Network timeout checking protocol
|
||||
*
|
||||
* New in version 4:
|
||||
* - Remove i_generation from lock names for better stat performance.
|
||||
*
|
||||
* New in version 3:
|
||||
* - Replace dentry votes with a cluster lock
|
||||
*
|
||||
* New in version 2:
|
||||
* - full 64 bit i_size in the metadata lock lvbs
|
||||
* - introduction of "rw" lock and pushing meta/data locking down
|
||||
*/
|
||||
#define R2NET_PROTOCOL_VERSION 11ULL
|
||||
struct r2net_handshake {
|
||||
__be64 protocol_version;
|
||||
__be64 connector_id;
|
||||
__be32 r2hb_heartbeat_timeout_ms;
|
||||
__be32 r2net_idle_timeout_ms;
|
||||
__be32 r2net_keepalive_delay_ms;
|
||||
__be32 r2net_reconnect_delay_ms;
|
||||
};
|
||||
|
||||
struct r2net_node {
|
||||
/* this is never called from int/bh */
|
||||
spinlock_t nn_lock;
|
||||
|
||||
/* set the moment an sc is allocated and a connect is started */
|
||||
struct r2net_sock_container *nn_sc;
|
||||
/* _valid is only set after the handshake passes and tx can happen */
|
||||
unsigned nn_sc_valid:1;
|
||||
/* if this is set tx just returns it */
|
||||
int nn_persistent_error;
|
||||
/* It is only set to 1 after the idle time out. */
|
||||
atomic_t nn_timeout;
|
||||
|
||||
/* threads waiting for an sc to arrive wait on the wq for generation
|
||||
* to increase. it is increased when a connecting socket succeeds
|
||||
* or fails or when an accepted socket is attached. */
|
||||
wait_queue_head_t nn_sc_wq;
|
||||
|
||||
struct idr nn_status_idr;
|
||||
struct list_head nn_status_list;
|
||||
|
||||
/* connects are attempted from when heartbeat comes up until either hb
|
||||
* goes down, the node is unconfigured, no connect attempts succeed
|
||||
* before R2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
|
||||
* is queued from set_nn_state both from hb up and from itself if a
|
||||
* connect attempt fails and so can be self-arming. shutdown is
|
||||
* careful to first mark the nn such that no connects will be attempted
|
||||
* before canceling delayed connect work and flushing the queue. */
|
||||
struct delayed_work nn_connect_work;
|
||||
unsigned long nn_last_connect_attempt;
|
||||
|
||||
/* this is queued as nodes come up and is canceled when a connection is
|
||||
* established. this expiring gives up on the node and errors out
|
||||
* transmits */
|
||||
struct delayed_work nn_connect_expired;
|
||||
|
||||
/* after we give up on a socket we wait a while before deciding
|
||||
* that it is still heartbeating and that we should do some
|
||||
* quorum work */
|
||||
struct delayed_work nn_still_up;
|
||||
};
|
||||
|
||||
struct r2net_sock_container {
|
||||
struct kref sc_kref;
|
||||
/* the next two are valid for the life time of the sc */
|
||||
struct socket *sc_sock;
|
||||
struct r2nm_node *sc_node;
|
||||
|
||||
/* all of these sc work structs hold refs on the sc while they are
|
||||
* queued. they should not be able to ref a freed sc. the teardown
|
||||
* race is with r2net_wq destruction in r2net_stop_listening() */
|
||||
|
||||
/* rx and connect work are generated from socket callbacks. sc
|
||||
* shutdown removes the callbacks and then flushes the work queue */
|
||||
struct work_struct sc_rx_work;
|
||||
struct work_struct sc_connect_work;
|
||||
/* shutdown work is triggered in two ways. the simple way is
|
||||
* for a code path calls ensure_shutdown which gets a lock, removes
|
||||
* the sc from the nn, and queues the work. in this case the
|
||||
* work is single-shot. the work is also queued from a sock
|
||||
* callback, though, and in this case the work will find the sc
|
||||
* still on the nn and will call ensure_shutdown itself.. this
|
||||
* ends up triggering the shutdown work again, though nothing
|
||||
* will be done in that second iteration. so work queue teardown
|
||||
* has to be careful to remove the sc from the nn before waiting
|
||||
* on the work queue so that the shutdown work doesn't remove the
|
||||
* sc and rearm itself.
|
||||
*/
|
||||
struct work_struct sc_shutdown_work;
|
||||
|
||||
struct timer_list sc_idle_timeout;
|
||||
struct delayed_work sc_keepalive_work;
|
||||
|
||||
unsigned sc_handshake_ok:1;
|
||||
|
||||
struct page *sc_page;
|
||||
size_t sc_page_off;
|
||||
|
||||
/* original handlers for the sockets */
|
||||
void (*sc_state_change)(struct sock *sk);
|
||||
void (*sc_data_ready)(struct sock *sk, int bytes);
|
||||
|
||||
u32 sc_msg_key;
|
||||
u16 sc_msg_type;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct list_head sc_net_debug_item;
|
||||
ktime_t sc_tv_timer;
|
||||
ktime_t sc_tv_data_ready;
|
||||
ktime_t sc_tv_advance_start;
|
||||
ktime_t sc_tv_advance_stop;
|
||||
ktime_t sc_tv_func_start;
|
||||
ktime_t sc_tv_func_stop;
|
||||
#endif
|
||||
#ifdef CONFIG_RAMSTER_FS_STATS
|
||||
ktime_t sc_tv_acquiry_total;
|
||||
ktime_t sc_tv_send_total;
|
||||
ktime_t sc_tv_status_total;
|
||||
u32 sc_send_count;
|
||||
u32 sc_recv_count;
|
||||
ktime_t sc_tv_process_total;
|
||||
#endif
|
||||
struct mutex sc_send_lock;
|
||||
};
|
||||
|
||||
struct r2net_msg_handler {
|
||||
struct rb_node nh_node;
|
||||
u32 nh_max_len;
|
||||
u32 nh_msg_type;
|
||||
u32 nh_key;
|
||||
r2net_msg_handler_func *nh_func;
|
||||
r2net_msg_handler_func *nh_func_data;
|
||||
r2net_post_msg_handler_func
|
||||
*nh_post_func;
|
||||
struct kref nh_kref;
|
||||
struct list_head nh_unregister_item;
|
||||
};
|
||||
|
||||
enum r2net_system_error {
|
||||
R2NET_ERR_NONE = 0,
|
||||
R2NET_ERR_NO_HNDLR,
|
||||
R2NET_ERR_OVERFLOW,
|
||||
R2NET_ERR_DIED,
|
||||
R2NET_ERR_MAX
|
||||
};
|
||||
|
||||
struct r2net_status_wait {
|
||||
enum r2net_system_error ns_sys_status;
|
||||
s32 ns_status;
|
||||
int ns_id;
|
||||
wait_queue_head_t ns_wq;
|
||||
struct list_head ns_node_item;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/* just for state dumps */
|
||||
struct r2net_send_tracking {
|
||||
struct list_head st_net_debug_item;
|
||||
struct task_struct *st_task;
|
||||
struct r2net_sock_container *st_sc;
|
||||
u32 st_id;
|
||||
u32 st_msg_type;
|
||||
u32 st_msg_key;
|
||||
u8 st_node;
|
||||
ktime_t st_sock_time;
|
||||
ktime_t st_send_time;
|
||||
ktime_t st_status_time;
|
||||
};
|
||||
#else
|
||||
struct r2net_send_tracking {
|
||||
u32 dummy;
|
||||
};
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif /* R2CLUSTER_TCP_INTERNAL_H */
|
|
@ -1,401 +0,0 @@
|
|||
/*
|
||||
* r2net.c
|
||||
*
|
||||
* Copyright (c) 2011, Dan Magenheimer, Oracle Corp.
|
||||
*
|
||||
* Ramster_r2net provides an interface between zcache and r2net.
|
||||
*
|
||||
* FIXME: support more than two nodes
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include "cluster/tcp.h"
|
||||
#include "cluster/nodemanager.h"
|
||||
#include "tmem.h"
|
||||
#include "zcache.h"
|
||||
#include "ramster.h"
|
||||
|
||||
#define RAMSTER_TESTING
|
||||
|
||||
#define RMSTR_KEY 0x77347734
|
||||
|
||||
enum {
|
||||
RMSTR_TMEM_PUT_EPH = 100,
|
||||
RMSTR_TMEM_PUT_PERS,
|
||||
RMSTR_TMEM_ASYNC_GET_REQUEST,
|
||||
RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
|
||||
RMSTR_TMEM_ASYNC_GET_REPLY,
|
||||
RMSTR_TMEM_FLUSH,
|
||||
RMSTR_TMEM_FLOBJ,
|
||||
RMSTR_TMEM_DESTROY_POOL,
|
||||
};
|
||||
|
||||
#define RMSTR_R2NET_MAX_LEN \
|
||||
(R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
|
||||
|
||||
#include "cluster/tcp_internal.h"
|
||||
|
||||
static struct r2nm_node *r2net_target_node;
|
||||
static int r2net_target_nodenum;
|
||||
|
||||
int r2net_remote_target_node_set(int node_num)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
r2net_target_node = r2nm_get_node_by_num(node_num);
|
||||
if (r2net_target_node != NULL) {
|
||||
r2net_target_nodenum = node_num;
|
||||
r2nm_node_put(r2net_target_node);
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* FIXME following buffer should be per-cpu, protected by preempt_disable */
|
||||
static char ramster_async_get_buf[R2NET_MAX_PAYLOAD_BYTES];
|
||||
|
||||
static int ramster_remote_async_get_request_handler(struct r2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
char *pdata;
|
||||
struct tmem_xhandle xh;
|
||||
int found;
|
||||
size_t size = RMSTR_R2NET_MAX_LEN;
|
||||
u16 msgtype = be16_to_cpu(msg->msg_type);
|
||||
bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST);
|
||||
unsigned long flags;
|
||||
|
||||
xh = *(struct tmem_xhandle *)msg->buf;
|
||||
if (xh.xh_data_size > RMSTR_R2NET_MAX_LEN)
|
||||
BUG();
|
||||
pdata = ramster_async_get_buf;
|
||||
*(struct tmem_xhandle *)pdata = xh;
|
||||
pdata += sizeof(struct tmem_xhandle);
|
||||
local_irq_save(flags);
|
||||
found = zcache_get(xh.client_id, xh.pool_id, &xh.oid, xh.index,
|
||||
pdata, &size, 1, get_and_free ? 1 : -1);
|
||||
local_irq_restore(flags);
|
||||
if (found < 0) {
|
||||
/* a zero size indicates the get failed */
|
||||
size = 0;
|
||||
}
|
||||
if (size > RMSTR_R2NET_MAX_LEN)
|
||||
BUG();
|
||||
*ret_data = pdata - sizeof(struct tmem_xhandle);
|
||||
/* now make caller (r2net_process_message) handle specially */
|
||||
r2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY);
|
||||
return size + sizeof(struct tmem_xhandle);
|
||||
}
|
||||
|
||||
static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
char *in = (char *)msg->buf;
|
||||
int datalen = len - sizeof(struct r2net_msg);
|
||||
int ret = -1;
|
||||
struct tmem_xhandle *xh = (struct tmem_xhandle *)in;
|
||||
|
||||
in += sizeof(struct tmem_xhandle);
|
||||
datalen -= sizeof(struct tmem_xhandle);
|
||||
BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
|
||||
ret = zcache_localify(xh->pool_id, &xh->oid, xh->index,
|
||||
in, datalen, xh->extra);
|
||||
#ifdef RAMSTER_TESTING
|
||||
if (ret == -EEXIST)
|
||||
pr_err("TESTING ArrgREP, aborted overwrite on racy put\n");
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_put_handler(struct r2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
struct tmem_xhandle *xh;
|
||||
char *p = (char *)msg->buf;
|
||||
int datalen = len - sizeof(struct r2net_msg) -
|
||||
sizeof(struct tmem_xhandle);
|
||||
u16 msgtype = be16_to_cpu(msg->msg_type);
|
||||
bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
xh = (struct tmem_xhandle *)p;
|
||||
p += sizeof(struct tmem_xhandle);
|
||||
zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
|
||||
local_irq_save(flags);
|
||||
ret = zcache_put(xh->client_id, xh->pool_id, &xh->oid, xh->index,
|
||||
p, datalen, 1, ephemeral ? 1 : -1);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_flush_handler(struct r2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
struct tmem_xhandle *xh;
|
||||
char *p = (char *)msg->buf;
|
||||
|
||||
xh = (struct tmem_xhandle *)p;
|
||||
p += sizeof(struct tmem_xhandle);
|
||||
(void)zcache_flush(xh->client_id, xh->pool_id, &xh->oid, xh->index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ramster_remote_flobj_handler(struct r2net_msg *msg,
|
||||
u32 len, void *data, void **ret_data)
|
||||
{
|
||||
struct tmem_xhandle *xh;
|
||||
char *p = (char *)msg->buf;
|
||||
|
||||
xh = (struct tmem_xhandle *)p;
|
||||
p += sizeof(struct tmem_xhandle);
|
||||
(void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
|
||||
size_t expect_size, uint8_t expect_cksum,
|
||||
void *extra)
|
||||
{
|
||||
int ret = -1, status;
|
||||
struct r2nm_node *node = NULL;
|
||||
struct kvec vec[1];
|
||||
size_t veclen = 1;
|
||||
u32 msg_type;
|
||||
|
||||
node = r2nm_get_node_by_num(remotenode);
|
||||
if (node == NULL)
|
||||
goto out;
|
||||
xh->client_id = r2nm_this_node(); /* which node is getting */
|
||||
xh->xh_data_cksum = expect_cksum;
|
||||
xh->xh_data_size = expect_size;
|
||||
xh->extra = extra;
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
if (free)
|
||||
msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
|
||||
else
|
||||
msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST;
|
||||
ret = r2net_send_message_vec(msg_type, RMSTR_KEY,
|
||||
vec, veclen, remotenode, &status);
|
||||
r2nm_node_put(node);
|
||||
if (ret < 0) {
|
||||
/* FIXME handle bad message possibilities here? */
|
||||
pr_err("UNTESTED ret<0 in ramster_remote_async_get\n");
|
||||
}
|
||||
ret = status;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef RAMSTER_TESTING
|
||||
/* leave me here to see if it catches a weird crash */
|
||||
static void ramster_check_irq_counts(void)
|
||||
{
|
||||
static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
|
||||
int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;
|
||||
|
||||
cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
|
||||
if (cur_hardirq_cnt > last_hardirq_cnt) {
|
||||
last_hardirq_cnt = cur_hardirq_cnt;
|
||||
if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
|
||||
pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
|
||||
last_hardirq_cnt);
|
||||
}
|
||||
cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
|
||||
if (cur_softirq_cnt > last_softirq_cnt) {
|
||||
last_softirq_cnt = cur_softirq_cnt;
|
||||
if (!(last_softirq_cnt&(last_softirq_cnt-1)))
|
||||
pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
|
||||
last_softirq_cnt);
|
||||
}
|
||||
cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
|
||||
if (cur_preempt_cnt > last_preempt_cnt) {
|
||||
last_preempt_cnt = cur_preempt_cnt;
|
||||
if (!(last_preempt_cnt&(last_preempt_cnt-1)))
|
||||
pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
|
||||
last_preempt_cnt);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
|
||||
bool ephemeral, int *remotenode)
|
||||
{
|
||||
int nodenum, ret = -1, status;
|
||||
struct r2nm_node *node = NULL;
|
||||
struct kvec vec[2];
|
||||
size_t veclen = 2;
|
||||
u32 msg_type;
|
||||
#ifdef RAMSTER_TESTING
|
||||
struct r2net_node *nn;
|
||||
#endif
|
||||
|
||||
BUG_ON(size > RMSTR_R2NET_MAX_LEN);
|
||||
xh->client_id = r2nm_this_node(); /* which node is putting */
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
vec[1].iov_len = size;
|
||||
vec[1].iov_base = data;
|
||||
node = r2net_target_node;
|
||||
if (!node)
|
||||
goto out;
|
||||
|
||||
nodenum = r2net_target_nodenum;
|
||||
|
||||
r2nm_node_get(node);
|
||||
|
||||
#ifdef RAMSTER_TESTING
|
||||
nn = r2net_nn_from_num(nodenum);
|
||||
WARN_ON_ONCE(nn->nn_persistent_error || !nn->nn_sc_valid);
|
||||
#endif
|
||||
|
||||
if (ephemeral)
|
||||
msg_type = RMSTR_TMEM_PUT_EPH;
|
||||
else
|
||||
msg_type = RMSTR_TMEM_PUT_PERS;
|
||||
#ifdef RAMSTER_TESTING
|
||||
/* leave me here to see if it catches a weird crash */
|
||||
ramster_check_irq_counts();
|
||||
#endif
|
||||
|
||||
ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen,
|
||||
nodenum, &status);
|
||||
#ifdef RAMSTER_TESTING
|
||||
if (ret != 0) {
|
||||
static unsigned long cnt;
|
||||
cnt++;
|
||||
if (!(cnt&(cnt-1)))
|
||||
pr_err("ramster_remote_put: message failed, ret=%d, cnt=%lu\n",
|
||||
ret, cnt);
|
||||
ret = -1;
|
||||
}
|
||||
#endif
|
||||
if (ret < 0)
|
||||
ret = -1;
|
||||
else {
|
||||
ret = status;
|
||||
*remotenode = nodenum;
|
||||
}
|
||||
|
||||
r2nm_node_put(node);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
|
||||
{
|
||||
int ret = -1, status;
|
||||
struct r2nm_node *node = NULL;
|
||||
struct kvec vec[1];
|
||||
size_t veclen = 1;
|
||||
|
||||
node = r2nm_get_node_by_num(remotenode);
|
||||
BUG_ON(node == NULL);
|
||||
xh->client_id = r2nm_this_node(); /* which node is flushing */
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
BUG_ON(irqs_disabled());
|
||||
BUG_ON(in_softirq());
|
||||
ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
|
||||
vec, veclen, remotenode, &status);
|
||||
r2nm_node_put(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ramster_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
|
||||
{
|
||||
int ret = -1, status;
|
||||
struct r2nm_node *node = NULL;
|
||||
struct kvec vec[1];
|
||||
size_t veclen = 1;
|
||||
|
||||
node = r2nm_get_node_by_num(remotenode);
|
||||
BUG_ON(node == NULL);
|
||||
xh->client_id = r2nm_this_node(); /* which node is flobjing */
|
||||
vec[0].iov_len = sizeof(*xh);
|
||||
vec[0].iov_base = xh;
|
||||
ret = r2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
|
||||
vec, veclen, remotenode, &status);
|
||||
r2nm_node_put(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler registration
|
||||
*/
|
||||
|
||||
static LIST_HEAD(r2net_unreg_list);
|
||||
|
||||
static void r2net_unregister_handlers(void)
|
||||
{
|
||||
r2net_unregister_handler_list(&r2net_unreg_list);
|
||||
}
|
||||
|
||||
int r2net_register_handlers(void)
|
||||
{
|
||||
int status;
|
||||
|
||||
status = r2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY,
|
||||
RMSTR_R2NET_MAX_LEN,
|
||||
ramster_remote_put_handler,
|
||||
NULL, NULL, &r2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = r2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY,
|
||||
RMSTR_R2NET_MAX_LEN,
|
||||
ramster_remote_put_handler,
|
||||
NULL, NULL, &r2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY,
|
||||
RMSTR_R2NET_MAX_LEN,
|
||||
ramster_remote_async_get_request_handler,
|
||||
NULL, NULL,
|
||||
&r2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
|
||||
RMSTR_KEY, RMSTR_R2NET_MAX_LEN,
|
||||
ramster_remote_async_get_request_handler,
|
||||
NULL, NULL,
|
||||
&r2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY,
|
||||
RMSTR_R2NET_MAX_LEN,
|
||||
ramster_remote_async_get_reply_handler,
|
||||
NULL, NULL,
|
||||
&r2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = r2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY,
|
||||
RMSTR_R2NET_MAX_LEN,
|
||||
ramster_remote_flush_handler,
|
||||
NULL, NULL,
|
||||
&r2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
status = r2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
|
||||
RMSTR_R2NET_MAX_LEN,
|
||||
ramster_remote_flobj_handler,
|
||||
NULL, NULL,
|
||||
&r2net_unreg_list);
|
||||
if (status)
|
||||
goto bail;
|
||||
|
||||
pr_info("ramster: r2net handlers registered\n");
|
||||
|
||||
bail:
|
||||
if (status) {
|
||||
r2net_unregister_handlers();
|
||||
pr_err("ramster: couldn't register r2net handlers\n");
|
||||
}
|
||||
return status;
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
/*
|
||||
* ramster.h
|
||||
*
|
||||
* Peer-to-peer transcendent memory
|
||||
*
|
||||
* Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
|
||||
*/
|
||||
|
||||
#ifndef _RAMSTER_H_
|
||||
#define _RAMSTER_H_
|
||||
|
||||
/*
|
||||
* format of remote pampd:
|
||||
* bit 0 == intransit
|
||||
* bit 1 == is_remote... if this bit is set, then
|
||||
* bit 2-9 == remotenode
|
||||
* bit 10-22 == size
|
||||
* bit 23-30 == cksum
|
||||
*/
|
||||
#define FAKE_PAMPD_INTRANSIT_BITS 1
|
||||
#define FAKE_PAMPD_ISREMOTE_BITS 1
|
||||
#define FAKE_PAMPD_REMOTENODE_BITS 8
|
||||
#define FAKE_PAMPD_REMOTESIZE_BITS 13
|
||||
#define FAKE_PAMPD_CHECKSUM_BITS 8
|
||||
|
||||
#define FAKE_PAMPD_INTRANSIT_SHIFT 0
|
||||
#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
|
||||
FAKE_PAMPD_INTRANSIT_BITS)
|
||||
#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
|
||||
FAKE_PAMPD_ISREMOTE_BITS)
|
||||
#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
|
||||
FAKE_PAMPD_REMOTENODE_BITS)
|
||||
#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
|
||||
FAKE_PAMPD_REMOTESIZE_BITS)
|
||||
|
||||
#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
|
||||
|
||||
static inline void *pampd_make_remote(int remotenode, size_t size,
|
||||
unsigned char cksum)
|
||||
{
|
||||
unsigned long fake_pampd = 0;
|
||||
fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
|
||||
fake_pampd |= ((unsigned long)remotenode &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
|
||||
FAKE_PAMPD_REMOTENODE_SHIFT;
|
||||
fake_pampd |= ((unsigned long)size &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
|
||||
FAKE_PAMPD_REMOTESIZE_SHIFT;
|
||||
fake_pampd |= ((unsigned long)cksum &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
|
||||
FAKE_PAMPD_CHECKSUM_SHIFT;
|
||||
return (void *)fake_pampd;
|
||||
}
|
||||
|
||||
static inline unsigned int pampd_remote_node(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
|
||||
}
|
||||
|
||||
static inline unsigned int pampd_remote_size(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
|
||||
}
|
||||
|
||||
static inline unsigned char pampd_remote_cksum(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
|
||||
}
|
||||
|
||||
static inline bool pampd_is_remote(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
|
||||
}
|
||||
|
||||
static inline bool pampd_is_intransit(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
|
||||
FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
|
||||
}
|
||||
|
||||
/* note that it is a BUG for intransit to be set without isremote also set */
|
||||
static inline void *pampd_mark_intransit(void *pampd)
|
||||
{
|
||||
unsigned long fake_pampd = (unsigned long)pampd;
|
||||
|
||||
fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
|
||||
fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
|
||||
return (void *)fake_pampd;
|
||||
}
|
||||
|
||||
static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
|
||||
{
|
||||
unsigned long pampd = (unsigned long)marked_pampd;
|
||||
|
||||
pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
|
||||
pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
|
||||
return (void *)pampd;
|
||||
}
|
||||
|
||||
extern int ramster_remote_async_get(struct tmem_xhandle *,
|
||||
bool, int, size_t, uint8_t, void *extra);
|
||||
extern int ramster_remote_put(struct tmem_xhandle *, char *, size_t,
|
||||
bool, int *);
|
||||
extern int ramster_remote_flush(struct tmem_xhandle *, int);
|
||||
extern int ramster_remote_flush_object(struct tmem_xhandle *, int);
|
||||
extern int r2net_register_handlers(void);
|
||||
extern int r2net_remote_target_node_set(int);
|
||||
|
||||
#endif /* _TMEM_H */
|
|
@ -1,851 +0,0 @@
|
|||
/*
|
||||
* In-kernel transcendent memory (generic implementation)
|
||||
*
|
||||
* Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
|
||||
*
|
||||
* The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
|
||||
* "handles" (triples containing a pool id, and object id, and an index), to
|
||||
* pages in a page-accessible memory (PAM). Tmem references the PAM pages via
|
||||
* an abstract "pampd" (PAM page-descriptor), which can be operated on by a
|
||||
* set of functions (pamops). Each pampd contains some representation of
|
||||
* PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
|
||||
* pages and must be able to insert, find, and delete these pages at a
|
||||
* potential frequency of thousands per second concurrently across many CPUs,
|
||||
* (and, if used with KVM, across many vcpus across many guests).
|
||||
* Tmem is tracked with a hierarchy of data structures, organized by
|
||||
* the elements in a handle-tuple: pool_id, object_id, and page index.
|
||||
* One or more "clients" (e.g. guests) each provide one or more tmem_pools.
|
||||
* Each pool, contains a hash table of rb_trees of tmem_objs. Each
|
||||
* tmem_obj contains a radix-tree-like tree of pointers, with intermediate
|
||||
* nodes called tmem_objnodes. Each leaf pointer in this tree points to
|
||||
* a pampd, which is accessible only through a small set of callbacks
|
||||
* registered by the PAM implementation (see tmem_register_pamops). Tmem
|
||||
* does all memory allocation via a set of callbacks registered by the tmem
|
||||
* host implementation (e.g. see tmem_register_hostops).
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "tmem.h"
|
||||
|
||||
/* data structure sentinels used for debugging... see tmem.h */
|
||||
#define POOL_SENTINEL 0x87658765
|
||||
#define OBJ_SENTINEL 0x12345678
|
||||
#define OBJNODE_SENTINEL 0xfedcba09
|
||||
|
||||
/*
|
||||
* A tmem host implementation must use this function to register callbacks
|
||||
* for memory allocation.
|
||||
*/
|
||||
static struct tmem_hostops tmem_hostops;
|
||||
|
||||
static void tmem_objnode_tree_init(void);
|
||||
|
||||
void tmem_register_hostops(struct tmem_hostops *m)
|
||||
{
|
||||
tmem_objnode_tree_init();
|
||||
tmem_hostops = *m;
|
||||
}
|
||||
|
||||
/*
|
||||
* A tmem host implementation must use this function to register
|
||||
* callbacks for a page-accessible memory (PAM) implementation
|
||||
*/
|
||||
static struct tmem_pamops tmem_pamops;
|
||||
|
||||
void tmem_register_pamops(struct tmem_pamops *m)
|
||||
{
|
||||
tmem_pamops = *m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Oid's are potentially very sparse and tmem_objs may have an indeterminately
|
||||
* short life, being added and deleted at a relatively high frequency.
|
||||
* So an rb_tree is an ideal data structure to manage tmem_objs. But because
|
||||
* of the potentially huge number of tmem_objs, each pool manages a hashtable
|
||||
* of rb_trees to reduce search, insert, delete, and rebalancing time.
|
||||
* Each hashbucket also has a lock to manage concurrent access.
|
||||
*
|
||||
* The following routines manage tmem_objs. When any tmem_obj is accessed,
|
||||
* the hashbucket lock must be held.
|
||||
*/
|
||||
|
||||
/* searches for object==oid in pool, returns locked object if found */
|
||||
static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
|
||||
struct tmem_oid *oidp)
|
||||
{
|
||||
struct rb_node *rbnode;
|
||||
struct tmem_obj *obj;
|
||||
|
||||
rbnode = hb->obj_rb_root.rb_node;
|
||||
while (rbnode) {
|
||||
BUG_ON(RB_EMPTY_NODE(rbnode));
|
||||
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
|
||||
switch (tmem_oid_compare(oidp, &obj->oid)) {
|
||||
case 0: /* equal */
|
||||
goto out;
|
||||
case -1:
|
||||
rbnode = rbnode->rb_left;
|
||||
break;
|
||||
case 1:
|
||||
rbnode = rbnode->rb_right;
|
||||
break;
|
||||
}
|
||||
}
|
||||
obj = NULL;
|
||||
out:
|
||||
return obj;
|
||||
}
|
||||
|
||||
static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
|
||||
|
||||
/* free an object that has no more pampds in it */
|
||||
static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
|
||||
{
|
||||
struct tmem_pool *pool;
|
||||
|
||||
BUG_ON(obj == NULL);
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pampd_count > 0);
|
||||
pool = obj->pool;
|
||||
BUG_ON(pool == NULL);
|
||||
if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
|
||||
tmem_pampd_destroy_all_in_obj(obj);
|
||||
BUG_ON(obj->objnode_tree_root != NULL);
|
||||
BUG_ON((long)obj->objnode_count != 0);
|
||||
atomic_dec(&pool->obj_count);
|
||||
BUG_ON(atomic_read(&pool->obj_count) < 0);
|
||||
INVERT_SENTINEL(obj, OBJ);
|
||||
obj->pool = NULL;
|
||||
tmem_oid_set_invalid(&obj->oid);
|
||||
rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
|
||||
}
|
||||
|
||||
/*
|
||||
* initialize, and insert an tmem_object_root (called only if find failed)
|
||||
*/
|
||||
static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
|
||||
struct tmem_pool *pool,
|
||||
struct tmem_oid *oidp)
|
||||
{
|
||||
struct rb_root *root = &hb->obj_rb_root;
|
||||
struct rb_node **new = &(root->rb_node), *parent = NULL;
|
||||
struct tmem_obj *this;
|
||||
|
||||
BUG_ON(pool == NULL);
|
||||
atomic_inc(&pool->obj_count);
|
||||
obj->objnode_tree_height = 0;
|
||||
obj->objnode_tree_root = NULL;
|
||||
obj->pool = pool;
|
||||
obj->oid = *oidp;
|
||||
obj->objnode_count = 0;
|
||||
obj->pampd_count = 0;
|
||||
(*tmem_pamops.new_obj)(obj);
|
||||
SET_SENTINEL(obj, OBJ);
|
||||
while (*new) {
|
||||
BUG_ON(RB_EMPTY_NODE(*new));
|
||||
this = rb_entry(*new, struct tmem_obj, rb_tree_node);
|
||||
parent = *new;
|
||||
switch (tmem_oid_compare(oidp, &this->oid)) {
|
||||
case 0:
|
||||
BUG(); /* already present; should never happen! */
|
||||
break;
|
||||
case -1:
|
||||
new = &(*new)->rb_left;
|
||||
break;
|
||||
case 1:
|
||||
new = &(*new)->rb_right;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rb_link_node(&obj->rb_tree_node, parent, new);
|
||||
rb_insert_color(&obj->rb_tree_node, root);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tmem is managed as a set of tmem_pools with certain attributes, such as
|
||||
* "ephemeral" vs "persistent". These attributes apply to all tmem_objs
|
||||
* and all pampds that belong to a tmem_pool. A tmem_pool is created
|
||||
* or deleted relatively rarely (for example, when a filesystem is
|
||||
* mounted or unmounted.
|
||||
*/
|
||||
|
||||
/* flush all data from a pool and, optionally, free it */
|
||||
static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
|
||||
{
|
||||
struct rb_node *rbnode;
|
||||
struct tmem_obj *obj;
|
||||
struct tmem_hashbucket *hb = &pool->hashbucket[0];
|
||||
int i;
|
||||
|
||||
BUG_ON(pool == NULL);
|
||||
for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
|
||||
spin_lock(&hb->lock);
|
||||
rbnode = rb_first(&hb->obj_rb_root);
|
||||
while (rbnode != NULL) {
|
||||
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
|
||||
rbnode = rb_next(rbnode);
|
||||
tmem_pampd_destroy_all_in_obj(obj);
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
}
|
||||
spin_unlock(&hb->lock);
|
||||
}
|
||||
if (destroy)
|
||||
list_del(&pool->pool_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* A tmem_obj contains a radix-tree-like tree in which the intermediate
|
||||
* nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
|
||||
* is very specialized and tuned for specific uses and is not particularly
|
||||
* suited for use from this code, though some code from the core algorithms has
|
||||
* been reused, thus the copyright notices below). Each tmem_objnode contains
|
||||
* a set of pointers which point to either a set of intermediate tmem_objnodes
|
||||
* or a set of of pampds.
|
||||
*
|
||||
* Portions Copyright (C) 2001 Momchil Velikov
|
||||
* Portions Copyright (C) 2001 Christoph Hellwig
|
||||
* Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
|
||||
*/
|
||||
|
||||
struct tmem_objnode_tree_path {
|
||||
struct tmem_objnode *objnode;
|
||||
int offset;
|
||||
};
|
||||
|
||||
/* objnode height_to_maxindex translation */
|
||||
static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
|
||||
|
||||
static void tmem_objnode_tree_init(void)
|
||||
{
|
||||
unsigned int ht, tmp;
|
||||
|
||||
for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
|
||||
tmp = ht * OBJNODE_TREE_MAP_SHIFT;
|
||||
if (tmp >= OBJNODE_TREE_INDEX_BITS)
|
||||
tmem_objnode_tree_h2max[ht] = ~0UL;
|
||||
else
|
||||
tmem_objnode_tree_h2max[ht] =
|
||||
(~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
|
||||
}
|
||||
}
|
||||
|
||||
static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
|
||||
{
|
||||
struct tmem_objnode *objnode;
|
||||
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pool == NULL);
|
||||
ASSERT_SENTINEL(obj->pool, POOL);
|
||||
objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
|
||||
if (unlikely(objnode == NULL))
|
||||
goto out;
|
||||
objnode->obj = obj;
|
||||
SET_SENTINEL(objnode, OBJNODE);
|
||||
memset(&objnode->slots, 0, sizeof(objnode->slots));
|
||||
objnode->slots_in_use = 0;
|
||||
obj->objnode_count++;
|
||||
out:
|
||||
return objnode;
|
||||
}
|
||||
|
||||
static void tmem_objnode_free(struct tmem_objnode *objnode)
|
||||
{
|
||||
struct tmem_pool *pool;
|
||||
int i;
|
||||
|
||||
BUG_ON(objnode == NULL);
|
||||
for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
|
||||
BUG_ON(objnode->slots[i] != NULL);
|
||||
ASSERT_SENTINEL(objnode, OBJNODE);
|
||||
INVERT_SENTINEL(objnode, OBJNODE);
|
||||
BUG_ON(objnode->obj == NULL);
|
||||
ASSERT_SENTINEL(objnode->obj, OBJ);
|
||||
pool = objnode->obj->pool;
|
||||
BUG_ON(pool == NULL);
|
||||
ASSERT_SENTINEL(pool, POOL);
|
||||
objnode->obj->objnode_count--;
|
||||
objnode->obj = NULL;
|
||||
(*tmem_hostops.objnode_free)(objnode, pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* lookup index in object and return associated pampd (or NULL if not found)
|
||||
*/
|
||||
static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
|
||||
{
|
||||
unsigned int height, shift;
|
||||
struct tmem_objnode **slot = NULL;
|
||||
|
||||
BUG_ON(obj == NULL);
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pool == NULL);
|
||||
ASSERT_SENTINEL(obj->pool, POOL);
|
||||
|
||||
height = obj->objnode_tree_height;
|
||||
if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
|
||||
goto out;
|
||||
if (height == 0 && obj->objnode_tree_root) {
|
||||
slot = &obj->objnode_tree_root;
|
||||
goto out;
|
||||
}
|
||||
shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
|
||||
slot = &obj->objnode_tree_root;
|
||||
while (height > 0) {
|
||||
if (*slot == NULL)
|
||||
goto out;
|
||||
slot = (struct tmem_objnode **)
|
||||
((*slot)->slots +
|
||||
((index >> shift) & OBJNODE_TREE_MAP_MASK));
|
||||
shift -= OBJNODE_TREE_MAP_SHIFT;
|
||||
height--;
|
||||
}
|
||||
out:
|
||||
return slot != NULL ? (void **)slot : NULL;
|
||||
}
|
||||
|
||||
static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
|
||||
{
|
||||
struct tmem_objnode **slot;
|
||||
|
||||
slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
|
||||
return slot != NULL ? *slot : NULL;
|
||||
}
|
||||
|
||||
static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
|
||||
void *new_pampd, bool no_free)
|
||||
{
|
||||
struct tmem_objnode **slot;
|
||||
void *ret = NULL;
|
||||
|
||||
slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
|
||||
if ((slot != NULL) && (*slot != NULL)) {
|
||||
void *old_pampd = *(void **)slot;
|
||||
*(void **)slot = new_pampd;
|
||||
if (!no_free)
|
||||
(*tmem_pamops.free)(old_pampd, obj->pool,
|
||||
NULL, 0, false);
|
||||
ret = new_pampd;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
|
||||
void *pampd)
|
||||
{
|
||||
int ret = 0;
|
||||
struct tmem_objnode *objnode = NULL, *newnode, *slot;
|
||||
unsigned int height, shift;
|
||||
int offset = 0;
|
||||
|
||||
/* if necessary, extend the tree to be higher */
|
||||
if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
|
||||
height = obj->objnode_tree_height + 1;
|
||||
if (index > tmem_objnode_tree_h2max[height])
|
||||
while (index > tmem_objnode_tree_h2max[height])
|
||||
height++;
|
||||
if (obj->objnode_tree_root == NULL) {
|
||||
obj->objnode_tree_height = height;
|
||||
goto insert;
|
||||
}
|
||||
do {
|
||||
newnode = tmem_objnode_alloc(obj);
|
||||
if (!newnode) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
newnode->slots[0] = obj->objnode_tree_root;
|
||||
newnode->slots_in_use = 1;
|
||||
obj->objnode_tree_root = newnode;
|
||||
obj->objnode_tree_height++;
|
||||
} while (height > obj->objnode_tree_height);
|
||||
}
|
||||
insert:
|
||||
slot = obj->objnode_tree_root;
|
||||
height = obj->objnode_tree_height;
|
||||
shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
|
||||
while (height > 0) {
|
||||
if (slot == NULL) {
|
||||
/* add a child objnode. */
|
||||
slot = tmem_objnode_alloc(obj);
|
||||
if (!slot) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (objnode) {
|
||||
|
||||
objnode->slots[offset] = slot;
|
||||
objnode->slots_in_use++;
|
||||
} else
|
||||
obj->objnode_tree_root = slot;
|
||||
}
|
||||
/* go down a level */
|
||||
offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
|
||||
objnode = slot;
|
||||
slot = objnode->slots[offset];
|
||||
shift -= OBJNODE_TREE_MAP_SHIFT;
|
||||
height--;
|
||||
}
|
||||
BUG_ON(slot != NULL);
|
||||
if (objnode) {
|
||||
objnode->slots_in_use++;
|
||||
objnode->slots[offset] = pampd;
|
||||
} else
|
||||
obj->objnode_tree_root = pampd;
|
||||
obj->pampd_count++;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
|
||||
{
|
||||
struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
|
||||
struct tmem_objnode_tree_path *pathp = path;
|
||||
struct tmem_objnode *slot = NULL;
|
||||
unsigned int height, shift;
|
||||
int offset;
|
||||
|
||||
BUG_ON(obj == NULL);
|
||||
ASSERT_SENTINEL(obj, OBJ);
|
||||
BUG_ON(obj->pool == NULL);
|
||||
ASSERT_SENTINEL(obj->pool, POOL);
|
||||
height = obj->objnode_tree_height;
|
||||
if (index > tmem_objnode_tree_h2max[height])
|
||||
goto out;
|
||||
slot = obj->objnode_tree_root;
|
||||
if (height == 0 && obj->objnode_tree_root) {
|
||||
obj->objnode_tree_root = NULL;
|
||||
goto out;
|
||||
}
|
||||
shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
|
||||
pathp->objnode = NULL;
|
||||
do {
|
||||
if (slot == NULL)
|
||||
goto out;
|
||||
pathp++;
|
||||
offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
|
||||
pathp->offset = offset;
|
||||
pathp->objnode = slot;
|
||||
slot = slot->slots[offset];
|
||||
shift -= OBJNODE_TREE_MAP_SHIFT;
|
||||
height--;
|
||||
} while (height > 0);
|
||||
if (slot == NULL)
|
||||
goto out;
|
||||
while (pathp->objnode) {
|
||||
pathp->objnode->slots[pathp->offset] = NULL;
|
||||
pathp->objnode->slots_in_use--;
|
||||
if (pathp->objnode->slots_in_use) {
|
||||
if (pathp->objnode == obj->objnode_tree_root) {
|
||||
while (obj->objnode_tree_height > 0 &&
|
||||
obj->objnode_tree_root->slots_in_use == 1 &&
|
||||
obj->objnode_tree_root->slots[0]) {
|
||||
struct tmem_objnode *to_free =
|
||||
obj->objnode_tree_root;
|
||||
|
||||
obj->objnode_tree_root =
|
||||
to_free->slots[0];
|
||||
obj->objnode_tree_height--;
|
||||
to_free->slots[0] = NULL;
|
||||
to_free->slots_in_use = 0;
|
||||
tmem_objnode_free(to_free);
|
||||
}
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
|
||||
pathp--;
|
||||
}
|
||||
obj->objnode_tree_height = 0;
|
||||
obj->objnode_tree_root = NULL;
|
||||
|
||||
out:
|
||||
if (slot != NULL)
|
||||
obj->pampd_count--;
|
||||
BUG_ON(obj->pampd_count < 0);
|
||||
return slot;
|
||||
}
|
||||
|
||||
/* recursively walk the objnode_tree destroying pampds and objnodes */
|
||||
static void tmem_objnode_node_destroy(struct tmem_obj *obj,
|
||||
struct tmem_objnode *objnode,
|
||||
unsigned int ht)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ht == 0)
|
||||
return;
|
||||
for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
|
||||
if (objnode->slots[i]) {
|
||||
if (ht == 1) {
|
||||
obj->pampd_count--;
|
||||
(*tmem_pamops.free)(objnode->slots[i],
|
||||
obj->pool, NULL, 0, true);
|
||||
objnode->slots[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
|
||||
tmem_objnode_free(objnode->slots[i]);
|
||||
objnode->slots[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
|
||||
{
|
||||
if (obj->objnode_tree_root == NULL)
|
||||
return;
|
||||
if (obj->objnode_tree_height == 0) {
|
||||
obj->pampd_count--;
|
||||
(*tmem_pamops.free)(obj->objnode_tree_root,
|
||||
obj->pool, NULL, 0, true);
|
||||
} else {
|
||||
tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
|
||||
obj->objnode_tree_height);
|
||||
tmem_objnode_free(obj->objnode_tree_root);
|
||||
obj->objnode_tree_height = 0;
|
||||
}
|
||||
obj->objnode_tree_root = NULL;
|
||||
(*tmem_pamops.free_obj)(obj->pool, obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tmem is operated on by a set of well-defined actions:
|
||||
* "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
|
||||
* (The tmem ABI allows for subpages and exchanges but these operations
|
||||
* are not included in this implementation.)
|
||||
*
|
||||
* These "tmem core" operations are implemented in the following functions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* "Put" a page, e.g. copy a page from the kernel into newly allocated
|
||||
* PAM space (if such space is available). Tmem_put is complicated by
|
||||
* a corner case: What if a page with matching handle already exists in
|
||||
* tmem? To guarantee coherency, one of two actions is necessary: Either
|
||||
* the data for the page must be overwritten, or the page must be
|
||||
* "flushed" so that the data is not accessible to a subsequent "get".
|
||||
* Since these "duplicate puts" are relatively rare, this implementation
|
||||
* always flushes for simplicity.
|
||||
*/
|
||||
int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
|
||||
char *data, size_t size, bool raw, int ephemeral)
|
||||
{
|
||||
struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
|
||||
void *pampd = NULL, *pampd_del = NULL;
|
||||
int ret = -ENOMEM;
|
||||
struct tmem_hashbucket *hb;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = objfound = tmem_obj_find(hb, oidp);
|
||||
if (obj != NULL) {
|
||||
pampd = tmem_pampd_lookup_in_obj(objfound, index);
|
||||
if (pampd != NULL) {
|
||||
/* if found, is a dup put, flush the old one */
|
||||
pampd_del = tmem_pampd_delete_from_obj(obj, index);
|
||||
BUG_ON(pampd_del != pampd);
|
||||
(*tmem_pamops.free)(pampd, pool, oidp, index, true);
|
||||
if (obj->pampd_count == 0) {
|
||||
objnew = obj;
|
||||
objfound = NULL;
|
||||
}
|
||||
pampd = NULL;
|
||||
}
|
||||
} else {
|
||||
obj = objnew = (*tmem_hostops.obj_alloc)(pool);
|
||||
if (unlikely(obj == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
tmem_obj_init(obj, hb, pool, oidp);
|
||||
}
|
||||
BUG_ON(obj == NULL);
|
||||
BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
|
||||
pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
|
||||
obj->pool, &obj->oid, index);
|
||||
if (unlikely(pampd == NULL))
|
||||
goto free;
|
||||
ret = tmem_pampd_add_to_obj(obj, index, pampd);
|
||||
if (unlikely(ret == -ENOMEM))
|
||||
/* may have partially built objnode tree ("stump") */
|
||||
goto delete_and_free;
|
||||
goto out;
|
||||
|
||||
delete_and_free:
|
||||
(void)tmem_pampd_delete_from_obj(obj, index);
|
||||
free:
|
||||
if (pampd)
|
||||
(*tmem_pamops.free)(pampd, pool, NULL, 0, true);
|
||||
if (objnew) {
|
||||
tmem_obj_free(objnew, hb);
|
||||
(*tmem_hostops.obj_free)(objnew, pool);
|
||||
}
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
|
||||
uint32_t index, struct tmem_obj **ret_obj,
|
||||
void **saved_hb)
|
||||
{
|
||||
struct tmem_hashbucket *hb;
|
||||
struct tmem_obj *obj = NULL;
|
||||
void *pampd = NULL;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (likely(obj != NULL))
|
||||
pampd = tmem_pampd_lookup_in_obj(obj, index);
|
||||
*ret_obj = obj;
|
||||
*saved_hb = (void *)hb;
|
||||
/* note, hashbucket remains locked */
|
||||
return pampd;
|
||||
}
|
||||
|
||||
void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
|
||||
void *pampd, void *saved_hb, bool delete)
|
||||
{
|
||||
struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
|
||||
|
||||
BUG_ON(!spin_is_locked(&hb->lock));
|
||||
if (pampd != NULL) {
|
||||
BUG_ON(obj == NULL);
|
||||
(void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
|
||||
} else if (delete) {
|
||||
BUG_ON(obj == NULL);
|
||||
(void)tmem_pampd_delete_from_obj(obj, index);
|
||||
}
|
||||
spin_unlock(&hb->lock);
|
||||
}
|
||||
|
||||
static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
|
||||
struct tmem_pool *pool, struct tmem_oid *oidp,
|
||||
uint32_t index, bool free, char *data)
|
||||
{
|
||||
void *old_pampd = *ppampd, *new_pampd = NULL;
|
||||
bool intransit = false;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
if (!is_ephemeral(pool))
|
||||
new_pampd = (*tmem_pamops.repatriate_preload)(
|
||||
old_pampd, pool, oidp, index, &intransit);
|
||||
if (intransit)
|
||||
ret = -EAGAIN;
|
||||
else if (new_pampd != NULL)
|
||||
*ppampd = new_pampd;
|
||||
/* must release the hb->lock else repatriate can't sleep */
|
||||
spin_unlock(&hb->lock);
|
||||
if (!intransit)
|
||||
ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
|
||||
oidp, index, free, data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Get" a page, e.g. if one can be found, copy the tmem page with the
|
||||
* matching handle from PAM space to the kernel. By tmem definition,
|
||||
* when a "get" is successful on an ephemeral page, the page is "flushed",
|
||||
* and when a "get" is successful on a persistent page, the page is retained
|
||||
* in tmem. Note that to preserve
|
||||
* coherency, "get" can never be skipped if tmem contains the data.
|
||||
* That is, if a get is done with a certain handle and fails, any
|
||||
* subsequent "get" must also fail (unless of course there is a
|
||||
* "put" done with the same handle).
|
||||
|
||||
*/
|
||||
int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
|
||||
char *data, size_t *size, bool raw, int get_and_free)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
void *pampd;
|
||||
bool ephemeral = is_ephemeral(pool);
|
||||
int ret = -1;
|
||||
struct tmem_hashbucket *hb;
|
||||
bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
|
||||
bool lock_held = 0;
|
||||
void **ppampd;
|
||||
|
||||
again:
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
lock_held = 1;
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
ppampd = __tmem_pampd_lookup_in_obj(obj, index);
|
||||
if (ppampd == NULL)
|
||||
goto out;
|
||||
if (tmem_pamops.is_remote(*ppampd)) {
|
||||
ret = tmem_repatriate(ppampd, hb, pool, oidp,
|
||||
index, free, data);
|
||||
lock_held = 0; /* note hb->lock has been unlocked */
|
||||
if (ret == -EAGAIN) {
|
||||
/* rare I think, but should cond_resched()??? */
|
||||
usleep_range(10, 1000);
|
||||
goto again;
|
||||
} else if (ret != 0) {
|
||||
if (ret != -ENOENT)
|
||||
pr_err("UNTESTED case in tmem_get, ret=%d\n",
|
||||
ret);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
if (free)
|
||||
pampd = tmem_pampd_delete_from_obj(obj, index);
|
||||
else
|
||||
pampd = tmem_pampd_lookup_in_obj(obj, index);
|
||||
if (pampd == NULL)
|
||||
goto out;
|
||||
if (free) {
|
||||
if (obj->pampd_count == 0) {
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
obj = NULL;
|
||||
}
|
||||
}
|
||||
if (free)
|
||||
ret = (*tmem_pamops.get_data_and_free)(
|
||||
data, size, raw, pampd, pool, oidp, index);
|
||||
else
|
||||
ret = (*tmem_pamops.get_data)(
|
||||
data, size, raw, pampd, pool, oidp, index);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
out:
|
||||
if (lock_held)
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a page in tmem matches the handle, "flush" this page from tmem such
|
||||
* that any subsequent "get" does not succeed (unless, of course, there
|
||||
* was another "put" with the same handle).
|
||||
*/
|
||||
int tmem_flush_page(struct tmem_pool *pool,
|
||||
struct tmem_oid *oidp, uint32_t index)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
void *pampd;
|
||||
int ret = -1;
|
||||
struct tmem_hashbucket *hb;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
pampd = tmem_pampd_delete_from_obj(obj, index);
|
||||
if (pampd == NULL)
|
||||
goto out;
|
||||
(*tmem_pamops.free)(pampd, pool, oidp, index, true);
|
||||
if (obj->pampd_count == 0) {
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a page in tmem matches the handle, replace the page so that any
|
||||
* subsequent "get" gets the new page. Returns the new page if
|
||||
* there was a page to replace, else returns NULL.
|
||||
*/
|
||||
int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
|
||||
uint32_t index, void *new_pampd)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
int ret = -1;
|
||||
struct tmem_hashbucket *hb;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
|
||||
ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Flush" all pages in tmem matching this oid.
|
||||
*/
|
||||
int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
|
||||
{
|
||||
struct tmem_obj *obj;
|
||||
struct tmem_hashbucket *hb;
|
||||
int ret = -1;
|
||||
|
||||
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
|
||||
spin_lock(&hb->lock);
|
||||
obj = tmem_obj_find(hb, oidp);
|
||||
if (obj == NULL)
|
||||
goto out;
|
||||
tmem_pampd_destroy_all_in_obj(obj);
|
||||
tmem_obj_free(obj, hb);
|
||||
(*tmem_hostops.obj_free)(obj, pool);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
spin_unlock(&hb->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Flush" all pages (and tmem_objs) from this tmem_pool and disable
|
||||
* all subsequent access to this tmem_pool.
|
||||
*/
|
||||
int tmem_destroy_pool(struct tmem_pool *pool)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (pool == NULL)
|
||||
goto out;
|
||||
tmem_pool_flush(pool, 1);
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static LIST_HEAD(tmem_global_pool_list);
|
||||
|
||||
/*
|
||||
* Create a new tmem_pool with the provided flag and return
|
||||
* a pool id provided by the tmem host implementation.
|
||||
*/
|
||||
void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
|
||||
{
|
||||
int persistent = flags & TMEM_POOL_PERSIST;
|
||||
int shared = flags & TMEM_POOL_SHARED;
|
||||
struct tmem_hashbucket *hb = &pool->hashbucket[0];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
|
||||
hb->obj_rb_root = RB_ROOT;
|
||||
spin_lock_init(&hb->lock);
|
||||
}
|
||||
INIT_LIST_HEAD(&pool->pool_list);
|
||||
atomic_set(&pool->obj_count, 0);
|
||||
SET_SENTINEL(pool, POOL);
|
||||
list_add_tail(&pool->pool_list, &tmem_global_pool_list);
|
||||
pool->persistent = persistent;
|
||||
pool->shared = shared;
|
||||
}
|
|
@ -1,244 +0,0 @@
|
|||
/*
|
||||
* tmem.h
|
||||
*
|
||||
* Transcendent memory
|
||||
*
|
||||
* Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
|
||||
*/
|
||||
|
||||
#ifndef _TMEM_H_
|
||||
#define _TMEM_H_
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
/*
|
||||
* These are pre-defined by the Xen<->Linux ABI
|
||||
*/
|
||||
#define TMEM_PUT_PAGE 4
|
||||
#define TMEM_GET_PAGE 5
|
||||
#define TMEM_FLUSH_PAGE 6
|
||||
#define TMEM_FLUSH_OBJECT 7
|
||||
#define TMEM_POOL_PERSIST 1
|
||||
#define TMEM_POOL_SHARED 2
|
||||
#define TMEM_POOL_PRECOMPRESSED 4
|
||||
#define TMEM_POOL_PAGESIZE_SHIFT 4
|
||||
#define TMEM_POOL_PAGESIZE_MASK 0xf
|
||||
#define TMEM_POOL_RESERVED_BITS 0x00ffff00
|
||||
|
||||
/*
|
||||
* sentinels have proven very useful for debugging but can be removed
|
||||
* or disabled before final merge.
|
||||
*/
|
||||
#define SENTINELS
|
||||
#ifdef SENTINELS
|
||||
#define DECL_SENTINEL uint32_t sentinel;
|
||||
#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
|
||||
#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
|
||||
#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
|
||||
#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
|
||||
#else
|
||||
#define DECL_SENTINEL
|
||||
#define SET_SENTINEL(_x, _y) do { } while (0)
|
||||
#define INVERT_SENTINEL(_x, _y) do { } while (0)
|
||||
#define ASSERT_SENTINEL(_x, _y) do { } while (0)
|
||||
#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
|
||||
|
||||
/*
|
||||
* A pool is the highest-level data structure managed by tmem and
|
||||
* usually corresponds to a large independent set of pages such as
|
||||
* a filesystem. Each pool has an id, and certain attributes and counters.
|
||||
* It also contains a set of hash buckets, each of which contains an rbtree
|
||||
* of objects and a lock to manage concurrency within the pool.
|
||||
*/
|
||||
|
||||
#define TMEM_HASH_BUCKET_BITS 8
|
||||
#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
|
||||
|
||||
struct tmem_hashbucket {
|
||||
struct rb_root obj_rb_root;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct tmem_pool {
|
||||
void *client; /* "up" for some clients, avoids table lookup */
|
||||
struct list_head pool_list;
|
||||
uint32_t pool_id;
|
||||
bool persistent;
|
||||
bool shared;
|
||||
atomic_t obj_count;
|
||||
atomic_t refcount;
|
||||
struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
|
||||
DECL_SENTINEL
|
||||
};
|
||||
|
||||
#define is_persistent(_p) (_p->persistent)
|
||||
#define is_ephemeral(_p) (!(_p->persistent))
|
||||
|
||||
/*
|
||||
* An object id ("oid") is large: 192-bits (to ensure, for example, files
|
||||
* in a modern filesystem can be uniquely identified).
|
||||
*/
|
||||
|
||||
struct tmem_oid {
|
||||
uint64_t oid[3];
|
||||
};
|
||||
|
||||
struct tmem_xhandle {
|
||||
uint8_t client_id;
|
||||
uint8_t xh_data_cksum;
|
||||
uint16_t xh_data_size;
|
||||
uint16_t pool_id;
|
||||
struct tmem_oid oid;
|
||||
uint32_t index;
|
||||
void *extra;
|
||||
};
|
||||
|
||||
static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
|
||||
struct tmem_pool *pool,
|
||||
struct tmem_oid *oidp,
|
||||
uint32_t index)
|
||||
{
|
||||
struct tmem_xhandle xh;
|
||||
xh.client_id = client_id;
|
||||
xh.xh_data_cksum = (uint8_t)-1;
|
||||
xh.xh_data_size = (uint16_t)-1;
|
||||
xh.pool_id = pool->pool_id;
|
||||
xh.oid = *oidp;
|
||||
xh.index = index;
|
||||
return xh;
|
||||
}
|
||||
|
||||
static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
|
||||
{
|
||||
oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
|
||||
}
|
||||
|
||||
static inline bool tmem_oid_valid(struct tmem_oid *oidp)
|
||||
{
|
||||
return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
|
||||
oidp->oid[2] != -1UL;
|
||||
}
|
||||
|
||||
static inline int tmem_oid_compare(struct tmem_oid *left,
|
||||
struct tmem_oid *right)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (left->oid[2] == right->oid[2]) {
|
||||
if (left->oid[1] == right->oid[1]) {
|
||||
if (left->oid[0] == right->oid[0])
|
||||
ret = 0;
|
||||
else if (left->oid[0] < right->oid[0])
|
||||
ret = -1;
|
||||
else
|
||||
return 1;
|
||||
} else if (left->oid[1] < right->oid[1])
|
||||
ret = -1;
|
||||
else
|
||||
ret = 1;
|
||||
} else if (left->oid[2] < right->oid[2])
|
||||
ret = -1;
|
||||
else
|
||||
ret = 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
|
||||
{
|
||||
return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
|
||||
TMEM_HASH_BUCKET_BITS);
|
||||
}
|
||||
|
||||
/*
|
||||
* A tmem_obj contains an identifier (oid), pointers to the parent
|
||||
* pool and the rb_tree to which it belongs, counters, and an ordered
|
||||
* set of pampds, structured in a radix-tree-like tree. The intermediate
|
||||
* nodes of the tree are called tmem_objnodes.
|
||||
*/
|
||||
|
||||
struct tmem_objnode;
|
||||
|
||||
struct tmem_obj {
|
||||
struct tmem_oid oid;
|
||||
struct tmem_pool *pool;
|
||||
struct rb_node rb_tree_node;
|
||||
struct tmem_objnode *objnode_tree_root;
|
||||
unsigned int objnode_tree_height;
|
||||
unsigned long objnode_count;
|
||||
long pampd_count;
|
||||
/* for current design of ramster, all pages belonging to
|
||||
* an object reside on the same remotenode and extra is
|
||||
* used to record the number of the remotenode so a
|
||||
* flush-object operation can specify it */
|
||||
void *extra; /* for use by pampd implementation */
|
||||
DECL_SENTINEL
|
||||
};
|
||||
|
||||
#define OBJNODE_TREE_MAP_SHIFT 6
|
||||
#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
|
||||
#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
|
||||
#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
|
||||
#define OBJNODE_TREE_MAX_PATH \
|
||||
(OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
|
||||
|
||||
struct tmem_objnode {
|
||||
struct tmem_obj *obj;
|
||||
DECL_SENTINEL
|
||||
void *slots[OBJNODE_TREE_MAP_SIZE];
|
||||
unsigned int slots_in_use;
|
||||
};
|
||||
|
||||
/* pampd abstract datatype methods provided by the PAM implementation */
|
||||
struct tmem_pamops {
|
||||
void *(*create)(char *, size_t, bool, int,
|
||||
struct tmem_pool *, struct tmem_oid *, uint32_t);
|
||||
int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t);
|
||||
int (*get_data_and_free)(char *, size_t *, bool, void *,
|
||||
struct tmem_pool *, struct tmem_oid *,
|
||||
uint32_t);
|
||||
void (*free)(void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t, bool);
|
||||
void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
|
||||
bool (*is_remote)(void *);
|
||||
void *(*repatriate_preload)(void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t, bool *);
|
||||
int (*repatriate)(void *, void *, struct tmem_pool *,
|
||||
struct tmem_oid *, uint32_t, bool, void *);
|
||||
void (*new_obj)(struct tmem_obj *);
|
||||
int (*replace_in_obj)(void *, struct tmem_obj *);
|
||||
};
|
||||
extern void tmem_register_pamops(struct tmem_pamops *m);
|
||||
|
||||
/* memory allocation methods provided by the host implementation */
|
||||
struct tmem_hostops {
|
||||
struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
|
||||
void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
|
||||
struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
|
||||
void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
|
||||
};
|
||||
extern void tmem_register_hostops(struct tmem_hostops *m);
|
||||
|
||||
/* core tmem accessor functions */
|
||||
extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
|
||||
char *, size_t, bool, int);
|
||||
extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
|
||||
char *, size_t *, bool, int);
|
||||
extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
|
||||
void *);
|
||||
extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
|
||||
uint32_t index, struct tmem_obj **,
|
||||
void **);
|
||||
extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
|
||||
void *, void *, bool);
|
||||
extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
|
||||
uint32_t index);
|
||||
extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
|
||||
extern int tmem_destroy_pool(struct tmem_pool *);
|
||||
extern void tmem_new_pool(struct tmem_pool *, uint32_t);
|
||||
#endif /* _TMEM_H */
|
|
@ -1,509 +0,0 @@
|
|||
/*
|
||||
* xvmalloc memory allocator
|
||||
*
|
||||
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
|
||||
*
|
||||
* This code is released using a dual license strategy: BSD/GPL
|
||||
* You can choose the licence that better fits your requirements.
|
||||
*
|
||||
* Released under the terms of 3-clause BSD License
|
||||
* Released under the terms of GNU General Public License Version 2.0
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ZRAM_DEBUG
|
||||
#define DEBUG
|
||||
#endif
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "xvmalloc.h"
|
||||
#include "xvmalloc_int.h"
|
||||
|
||||
static void stat_inc(u64 *value)
|
||||
{
|
||||
*value = *value + 1;
|
||||
}
|
||||
|
||||
static void stat_dec(u64 *value)
|
||||
{
|
||||
*value = *value - 1;
|
||||
}
|
||||
|
||||
static int test_flag(struct block_header *block, enum blockflags flag)
|
||||
{
|
||||
return block->prev & BIT(flag);
|
||||
}
|
||||
|
||||
static void set_flag(struct block_header *block, enum blockflags flag)
|
||||
{
|
||||
block->prev |= BIT(flag);
|
||||
}
|
||||
|
||||
static void clear_flag(struct block_header *block, enum blockflags flag)
|
||||
{
|
||||
block->prev &= ~BIT(flag);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given <page, offset> pair, provide a dereferencable pointer.
|
||||
* This is called from xv_malloc/xv_free path, so it
|
||||
* needs to be fast.
|
||||
*/
|
||||
static void *get_ptr_atomic(struct page *page, u16 offset)
|
||||
{
|
||||
unsigned char *base;
|
||||
|
||||
base = kmap_atomic(page);
|
||||
return base + offset;
|
||||
}
|
||||
|
||||
static void put_ptr_atomic(void *ptr)
|
||||
{
|
||||
kunmap_atomic(ptr);
|
||||
}
|
||||
|
||||
static u32 get_blockprev(struct block_header *block)
|
||||
{
|
||||
return block->prev & PREV_MASK;
|
||||
}
|
||||
|
||||
static void set_blockprev(struct block_header *block, u16 new_offset)
|
||||
{
|
||||
block->prev = new_offset | (block->prev & FLAGS_MASK);
|
||||
}
|
||||
|
||||
static struct block_header *BLOCK_NEXT(struct block_header *block)
|
||||
{
|
||||
return (struct block_header *)
|
||||
((char *)block + block->size + XV_ALIGN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get index of free list containing blocks of maximum size
|
||||
* which is less than or equal to given size.
|
||||
*/
|
||||
static u32 get_index_for_insert(u32 size)
|
||||
{
|
||||
if (unlikely(size > XV_MAX_ALLOC_SIZE))
|
||||
size = XV_MAX_ALLOC_SIZE;
|
||||
size &= ~FL_DELTA_MASK;
|
||||
return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get index of free list having blocks of size greater than
|
||||
* or equal to requested size.
|
||||
*/
|
||||
static u32 get_index(u32 size)
|
||||
{
|
||||
if (unlikely(size < XV_MIN_ALLOC_SIZE))
|
||||
size = XV_MIN_ALLOC_SIZE;
|
||||
size = ALIGN(size, FL_DELTA);
|
||||
return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
|
||||
}
|
||||
|
||||
/**
|
||||
* find_block - find block of at least given size
|
||||
* @pool: memory pool to search from
|
||||
* @size: size of block required
|
||||
* @page: page containing required block
|
||||
* @offset: offset within the page where block is located.
|
||||
*
|
||||
* Searches two level bitmap to locate block of at least
|
||||
* the given size. If such a block is found, it provides
|
||||
* <page, offset> to identify this block and returns index
|
||||
* in freelist where we found this block.
|
||||
* Otherwise, returns 0 and <page, offset> params are not touched.
|
||||
*/
|
||||
static u32 find_block(struct xv_pool *pool, u32 size,
|
||||
struct page **page, u32 *offset)
|
||||
{
|
||||
ulong flbitmap, slbitmap;
|
||||
u32 flindex, slindex, slbitstart;
|
||||
|
||||
/* There are no free blocks in this pool */
|
||||
if (!pool->flbitmap)
|
||||
return 0;
|
||||
|
||||
/* Get freelist index corresponding to this size */
|
||||
slindex = get_index(size);
|
||||
slbitmap = pool->slbitmap[slindex / BITS_PER_LONG];
|
||||
slbitstart = slindex % BITS_PER_LONG;
|
||||
|
||||
/*
|
||||
* If freelist is not empty at this index, we found the
|
||||
* block - head of this list. This is approximate best-fit match.
|
||||
*/
|
||||
if (test_bit(slbitstart, &slbitmap)) {
|
||||
*page = pool->freelist[slindex].page;
|
||||
*offset = pool->freelist[slindex].offset;
|
||||
return slindex;
|
||||
}
|
||||
|
||||
/*
|
||||
* No best-fit found. Search a bit further in bitmap for a free block.
|
||||
* Second level bitmap consists of series of 32-bit chunks. Search
|
||||
* further in the chunk where we expected a best-fit, starting from
|
||||
* index location found above.
|
||||
*/
|
||||
slbitstart++;
|
||||
slbitmap >>= slbitstart;
|
||||
|
||||
/* Skip this search if we were already at end of this bitmap chunk */
|
||||
if ((slbitstart != BITS_PER_LONG) && slbitmap) {
|
||||
slindex += __ffs(slbitmap) + 1;
|
||||
*page = pool->freelist[slindex].page;
|
||||
*offset = pool->freelist[slindex].offset;
|
||||
return slindex;
|
||||
}
|
||||
|
||||
/* Now do a full two-level bitmap search to find next nearest fit */
|
||||
flindex = slindex / BITS_PER_LONG;
|
||||
|
||||
flbitmap = (pool->flbitmap) >> (flindex + 1);
|
||||
if (!flbitmap)
|
||||
return 0;
|
||||
|
||||
flindex += __ffs(flbitmap) + 1;
|
||||
slbitmap = pool->slbitmap[flindex];
|
||||
slindex = (flindex * BITS_PER_LONG) + __ffs(slbitmap);
|
||||
*page = pool->freelist[slindex].page;
|
||||
*offset = pool->freelist[slindex].offset;
|
||||
|
||||
return slindex;
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert block at <page, offset> in freelist of given pool.
|
||||
* freelist used depends on block size.
|
||||
*/
|
||||
static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
|
||||
struct block_header *block)
|
||||
{
|
||||
u32 flindex, slindex;
|
||||
struct block_header *nextblock;
|
||||
|
||||
slindex = get_index_for_insert(block->size);
|
||||
flindex = slindex / BITS_PER_LONG;
|
||||
|
||||
block->link.prev_page = NULL;
|
||||
block->link.prev_offset = 0;
|
||||
block->link.next_page = pool->freelist[slindex].page;
|
||||
block->link.next_offset = pool->freelist[slindex].offset;
|
||||
pool->freelist[slindex].page = page;
|
||||
pool->freelist[slindex].offset = offset;
|
||||
|
||||
if (block->link.next_page) {
|
||||
nextblock = get_ptr_atomic(block->link.next_page,
|
||||
block->link.next_offset);
|
||||
nextblock->link.prev_page = page;
|
||||
nextblock->link.prev_offset = offset;
|
||||
put_ptr_atomic(nextblock);
|
||||
/* If there was a next page then the free bits are set. */
|
||||
return;
|
||||
}
|
||||
|
||||
__set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
|
||||
__set_bit(flindex, &pool->flbitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove block from freelist. Index 'slindex' identifies the freelist.
|
||||
*/
|
||||
static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
|
||||
struct block_header *block, u32 slindex)
|
||||
{
|
||||
u32 flindex = slindex / BITS_PER_LONG;
|
||||
struct block_header *tmpblock;
|
||||
|
||||
if (block->link.prev_page) {
|
||||
tmpblock = get_ptr_atomic(block->link.prev_page,
|
||||
block->link.prev_offset);
|
||||
tmpblock->link.next_page = block->link.next_page;
|
||||
tmpblock->link.next_offset = block->link.next_offset;
|
||||
put_ptr_atomic(tmpblock);
|
||||
}
|
||||
|
||||
if (block->link.next_page) {
|
||||
tmpblock = get_ptr_atomic(block->link.next_page,
|
||||
block->link.next_offset);
|
||||
tmpblock->link.prev_page = block->link.prev_page;
|
||||
tmpblock->link.prev_offset = block->link.prev_offset;
|
||||
put_ptr_atomic(tmpblock);
|
||||
}
|
||||
|
||||
/* Is this block is at the head of the freelist? */
|
||||
if (pool->freelist[slindex].page == page
|
||||
&& pool->freelist[slindex].offset == offset) {
|
||||
|
||||
pool->freelist[slindex].page = block->link.next_page;
|
||||
pool->freelist[slindex].offset = block->link.next_offset;
|
||||
|
||||
if (pool->freelist[slindex].page) {
|
||||
struct block_header *tmpblock;
|
||||
tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
|
||||
pool->freelist[slindex].offset);
|
||||
tmpblock->link.prev_page = NULL;
|
||||
tmpblock->link.prev_offset = 0;
|
||||
put_ptr_atomic(tmpblock);
|
||||
} else {
|
||||
/* This freelist bucket is empty */
|
||||
__clear_bit(slindex % BITS_PER_LONG,
|
||||
&pool->slbitmap[flindex]);
|
||||
if (!pool->slbitmap[flindex])
|
||||
__clear_bit(flindex, &pool->flbitmap);
|
||||
}
|
||||
}
|
||||
|
||||
block->link.prev_page = NULL;
|
||||
block->link.prev_offset = 0;
|
||||
block->link.next_page = NULL;
|
||||
block->link.next_offset = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a page and add it to freelist of given pool.
|
||||
*/
|
||||
static int grow_pool(struct xv_pool *pool, gfp_t flags)
|
||||
{
|
||||
struct page *page;
|
||||
struct block_header *block;
|
||||
|
||||
page = alloc_page(flags);
|
||||
if (unlikely(!page))
|
||||
return -ENOMEM;
|
||||
|
||||
stat_inc(&pool->total_pages);
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
block = get_ptr_atomic(page, 0);
|
||||
|
||||
block->size = PAGE_SIZE - XV_ALIGN;
|
||||
set_flag(block, BLOCK_FREE);
|
||||
clear_flag(block, PREV_FREE);
|
||||
set_blockprev(block, 0);
|
||||
|
||||
insert_block(pool, page, 0, block);
|
||||
|
||||
put_ptr_atomic(block);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a memory pool. Allocates freelist, bitmaps and other
|
||||
* per-pool metadata.
|
||||
*/
|
||||
struct xv_pool *xv_create_pool(void)
|
||||
{
|
||||
u32 ovhd_size;
|
||||
struct xv_pool *pool;
|
||||
|
||||
ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
|
||||
pool = kzalloc(ovhd_size, GFP_KERNEL);
|
||||
if (!pool)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&pool->lock);
|
||||
|
||||
return pool;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xv_create_pool);
|
||||
|
||||
void xv_destroy_pool(struct xv_pool *pool)
|
||||
{
|
||||
kfree(pool);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xv_destroy_pool);
|
||||
|
||||
/**
|
||||
* xv_malloc - Allocate block of given size from pool.
|
||||
* @pool: pool to allocate from
|
||||
* @size: size of block to allocate
|
||||
* @page: page no. that holds the object
|
||||
* @offset: location of object within page
|
||||
*
|
||||
* On success, <page, offset> identifies block allocated
|
||||
* and 0 is returned. On failure, <page, offset> is set to
|
||||
* 0 and -ENOMEM is returned.
|
||||
*
|
||||
* Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
|
||||
*/
|
||||
int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
|
||||
u32 *offset, gfp_t flags)
|
||||
{
|
||||
int error;
|
||||
u32 index, tmpsize, origsize, tmpoffset;
|
||||
struct block_header *block, *tmpblock;
|
||||
|
||||
*page = NULL;
|
||||
*offset = 0;
|
||||
origsize = size;
|
||||
|
||||
if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
|
||||
return -ENOMEM;
|
||||
|
||||
size = ALIGN(size, XV_ALIGN);
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
|
||||
index = find_block(pool, size, page, offset);
|
||||
|
||||
if (!*page) {
|
||||
spin_unlock(&pool->lock);
|
||||
if (flags & GFP_NOWAIT)
|
||||
return -ENOMEM;
|
||||
error = grow_pool(pool, flags);
|
||||
if (unlikely(error))
|
||||
return error;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
index = find_block(pool, size, page, offset);
|
||||
}
|
||||
|
||||
if (!*page) {
|
||||
spin_unlock(&pool->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
block = get_ptr_atomic(*page, *offset);
|
||||
|
||||
remove_block(pool, *page, *offset, block, index);
|
||||
|
||||
/* Split the block if required */
|
||||
tmpoffset = *offset + size + XV_ALIGN;
|
||||
tmpsize = block->size - size;
|
||||
tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
|
||||
if (tmpsize) {
|
||||
tmpblock->size = tmpsize - XV_ALIGN;
|
||||
set_flag(tmpblock, BLOCK_FREE);
|
||||
clear_flag(tmpblock, PREV_FREE);
|
||||
|
||||
set_blockprev(tmpblock, *offset);
|
||||
if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
|
||||
insert_block(pool, *page, tmpoffset, tmpblock);
|
||||
|
||||
if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
|
||||
tmpblock = BLOCK_NEXT(tmpblock);
|
||||
set_blockprev(tmpblock, tmpoffset);
|
||||
}
|
||||
} else {
|
||||
/* This block is exact fit */
|
||||
if (tmpoffset != PAGE_SIZE)
|
||||
clear_flag(tmpblock, PREV_FREE);
|
||||
}
|
||||
|
||||
block->size = origsize;
|
||||
clear_flag(block, BLOCK_FREE);
|
||||
|
||||
put_ptr_atomic(block);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
*offset += XV_ALIGN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xv_malloc);
|
||||
|
||||
/*
|
||||
* Free block identified with <page, offset>
|
||||
*/
|
||||
void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
|
||||
{
|
||||
void *page_start;
|
||||
struct block_header *block, *tmpblock;
|
||||
|
||||
offset -= XV_ALIGN;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
|
||||
page_start = get_ptr_atomic(page, 0);
|
||||
block = (struct block_header *)((char *)page_start + offset);
|
||||
|
||||
/* Catch double free bugs */
|
||||
BUG_ON(test_flag(block, BLOCK_FREE));
|
||||
|
||||
block->size = ALIGN(block->size, XV_ALIGN);
|
||||
|
||||
tmpblock = BLOCK_NEXT(block);
|
||||
if (offset + block->size + XV_ALIGN == PAGE_SIZE)
|
||||
tmpblock = NULL;
|
||||
|
||||
/* Merge next block if its free */
|
||||
if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
|
||||
/*
|
||||
* Blocks smaller than XV_MIN_ALLOC_SIZE
|
||||
* are not inserted in any free list.
|
||||
*/
|
||||
if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
|
||||
remove_block(pool, page,
|
||||
offset + block->size + XV_ALIGN, tmpblock,
|
||||
get_index_for_insert(tmpblock->size));
|
||||
}
|
||||
block->size += tmpblock->size + XV_ALIGN;
|
||||
}
|
||||
|
||||
/* Merge previous block if its free */
|
||||
if (test_flag(block, PREV_FREE)) {
|
||||
tmpblock = (struct block_header *)((char *)(page_start) +
|
||||
get_blockprev(block));
|
||||
offset = offset - tmpblock->size - XV_ALIGN;
|
||||
|
||||
if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
|
||||
remove_block(pool, page, offset, tmpblock,
|
||||
get_index_for_insert(tmpblock->size));
|
||||
|
||||
tmpblock->size += block->size + XV_ALIGN;
|
||||
block = tmpblock;
|
||||
}
|
||||
|
||||
/* No used objects in this page. Free it. */
|
||||
if (block->size == PAGE_SIZE - XV_ALIGN) {
|
||||
put_ptr_atomic(page_start);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
__free_page(page);
|
||||
stat_dec(&pool->total_pages);
|
||||
return;
|
||||
}
|
||||
|
||||
set_flag(block, BLOCK_FREE);
|
||||
if (block->size >= XV_MIN_ALLOC_SIZE)
|
||||
insert_block(pool, page, offset, block);
|
||||
|
||||
if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
|
||||
tmpblock = BLOCK_NEXT(block);
|
||||
set_flag(tmpblock, PREV_FREE);
|
||||
set_blockprev(tmpblock, offset);
|
||||
}
|
||||
|
||||
put_ptr_atomic(page_start);
|
||||
spin_unlock(&pool->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xv_free);
|
||||
|
||||
u32 xv_get_object_size(void *obj)
|
||||
{
|
||||
struct block_header *blk;
|
||||
|
||||
blk = (struct block_header *)((char *)(obj) - XV_ALIGN);
|
||||
return blk->size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xv_get_object_size);
|
||||
|
||||
/*
|
||||
* Returns total memory used by allocator (userdata + metadata)
|
||||
*/
|
||||
u64 xv_get_total_size_bytes(struct xv_pool *pool)
|
||||
{
|
||||
return pool->total_pages << PAGE_SHIFT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xv_get_total_size_bytes);
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* xvmalloc memory allocator
|
||||
*
|
||||
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
|
||||
*
|
||||
* This code is released using a dual license strategy: BSD/GPL
|
||||
* You can choose the licence that better fits your requirements.
|
||||
*
|
||||
* Released under the terms of 3-clause BSD License
|
||||
* Released under the terms of GNU General Public License Version 2.0
|
||||
*/
|
||||
|
||||
#ifndef _XV_MALLOC_H_
|
||||
#define _XV_MALLOC_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xv_pool;
|
||||
|
||||
struct xv_pool *xv_create_pool(void);
|
||||
void xv_destroy_pool(struct xv_pool *pool);
|
||||
|
||||
int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
|
||||
u32 *offset, gfp_t flags);
|
||||
void xv_free(struct xv_pool *pool, struct page *page, u32 offset);
|
||||
|
||||
u32 xv_get_object_size(void *obj);
|
||||
u64 xv_get_total_size_bytes(struct xv_pool *pool);
|
||||
|
||||
#endif
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
* xvmalloc memory allocator
|
||||
*
|
||||
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
|
||||
*
|
||||
* This code is released using a dual license strategy: BSD/GPL
|
||||
* You can choose the licence that better fits your requirements.
|
||||
*
|
||||
* Released under the terms of 3-clause BSD License
|
||||
* Released under the terms of GNU General Public License Version 2.0
|
||||
*/
|
||||
|
||||
#ifndef _XV_MALLOC_INT_H_
|
||||
#define _XV_MALLOC_INT_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* User configurable params */
|
||||
|
||||
/* Must be power of two */
|
||||
#ifdef CONFIG_64BIT
|
||||
#define XV_ALIGN_SHIFT 3
|
||||
#else
|
||||
#define XV_ALIGN_SHIFT 2
|
||||
#endif
|
||||
#define XV_ALIGN (1 << XV_ALIGN_SHIFT)
|
||||
#define XV_ALIGN_MASK (XV_ALIGN - 1)
|
||||
|
||||
/* This must be greater than sizeof(link_free) */
|
||||
#define XV_MIN_ALLOC_SIZE 32
|
||||
#define XV_MAX_ALLOC_SIZE (PAGE_SIZE - XV_ALIGN)
|
||||
|
||||
/*
|
||||
* Free lists are separated by FL_DELTA bytes
|
||||
* This value is 3 for 4k pages and 4 for 64k pages, for any
|
||||
* other page size, a conservative (PAGE_SHIFT - 9) is used.
|
||||
*/
|
||||
#if PAGE_SHIFT == 16
|
||||
#define FL_DELTA_SHIFT 4
|
||||
#else
|
||||
#define FL_DELTA_SHIFT (PAGE_SHIFT - 9)
|
||||
#endif
|
||||
#define FL_DELTA (1 << FL_DELTA_SHIFT)
|
||||
#define FL_DELTA_MASK (FL_DELTA - 1)
|
||||
#define NUM_FREE_LISTS ((XV_MAX_ALLOC_SIZE - XV_MIN_ALLOC_SIZE) \
|
||||
/ FL_DELTA + 1)
|
||||
|
||||
#define MAX_FLI DIV_ROUND_UP(NUM_FREE_LISTS, BITS_PER_LONG)
|
||||
|
||||
/* End of user params */
|
||||
|
||||
enum blockflags {
|
||||
BLOCK_FREE,
|
||||
PREV_FREE,
|
||||
__NR_BLOCKFLAGS,
|
||||
};
|
||||
|
||||
#define FLAGS_MASK XV_ALIGN_MASK
|
||||
#define PREV_MASK (~FLAGS_MASK)
|
||||
|
||||
struct freelist_entry {
|
||||
struct page *page;
|
||||
u16 offset;
|
||||
u16 pad;
|
||||
};
|
||||
|
||||
struct link_free {
|
||||
struct page *prev_page;
|
||||
struct page *next_page;
|
||||
u16 prev_offset;
|
||||
u16 next_offset;
|
||||
};
|
||||
|
||||
struct block_header {
|
||||
union {
|
||||
/* This common header must be XV_ALIGN bytes */
|
||||
u8 common[XV_ALIGN];
|
||||
struct {
|
||||
u16 size;
|
||||
u16 prev;
|
||||
};
|
||||
};
|
||||
struct link_free link;
|
||||
};
|
||||
|
||||
struct xv_pool {
|
||||
ulong flbitmap;
|
||||
ulong slbitmap[MAX_FLI];
|
||||
u64 total_pages; /* stats */
|
||||
struct freelist_entry freelist[NUM_FREE_LISTS];
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -1,22 +0,0 @@
|
|||
/*
|
||||
* zcache.h
|
||||
*
|
||||
* External zcache functions
|
||||
*
|
||||
* Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
|
||||
*/
|
||||
|
||||
#ifndef _ZCACHE_H_
|
||||
#define _ZCACHE_H_
|
||||
|
||||
extern int zcache_put(int, int, struct tmem_oid *, uint32_t,
|
||||
char *, size_t, bool, int);
|
||||
extern int zcache_autocreate_pool(int, int, bool);
|
||||
extern int zcache_get(int, int, struct tmem_oid *, uint32_t,
|
||||
char *, size_t *, bool, int);
|
||||
extern int zcache_flush(int, int, struct tmem_oid *, uint32_t);
|
||||
extern int zcache_flush_object(int, int, struct tmem_oid *);
|
||||
extern int zcache_localify(int, struct tmem_oid *, uint32_t,
|
||||
char *, size_t, void *);
|
||||
|
||||
#endif /* _ZCACHE_H */
|
Loading…
Reference in New Issue