2019-05-24 18:04:05 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2021-05-07 09:06:44 +08:00
|
|
|
/*
|
2005-12-16 06:31:23 +08:00
|
|
|
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/configfs.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/crc32.h>
|
|
|
|
#include <linux/time.h>
|
2008-12-18 06:17:42 +08:00
|
|
|
#include <linux/debugfs.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2013-11-13 07:06:58 +08:00
|
|
|
#include <linux/bitmap.h>
|
2015-09-05 06:44:43 +08:00
|
|
|
#include <linux/ktime.h>
|
2005-12-16 06:31:23 +08:00
|
|
|
#include "heartbeat.h"
|
|
|
|
#include "tcp.h"
|
|
|
|
#include "nodemanager.h"
|
|
|
|
#include "quorum.h"
|
|
|
|
|
|
|
|
#include "masklog.h"
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The first heartbeat pass had one global thread that would serialize all hb
|
|
|
|
* callback calls. This global serializing sem should only be removed once
|
|
|
|
* we've made sure that all callees can deal with being called concurrently
|
|
|
|
* from multiple hb region threads.
|
|
|
|
*/
|
|
|
|
static DECLARE_RWSEM(o2hb_callback_sem);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* multiple hb threads are watching multiple regions. A node is live
|
|
|
|
* whenever any of the threads sees activity from the node in its region.
|
|
|
|
*/
|
2006-06-27 17:53:55 +08:00
|
|
|
static DEFINE_SPINLOCK(o2hb_live_lock);
|
2005-12-16 06:31:23 +08:00
|
|
|
static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
|
|
|
|
static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
|
|
|
static LIST_HEAD(o2hb_node_events);
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue);
|
|
|
|
|
2010-10-08 08:03:07 +08:00
|
|
|
/*
|
|
|
|
* In global heartbeat, we maintain a series of region bitmaps.
|
|
|
|
* - o2hb_region_bitmap allows us to limit the region number to max region.
|
2010-10-07 08:55:18 +08:00
|
|
|
* - o2hb_live_region_bitmap tracks live regions (seen steady iterations).
|
2010-10-07 08:55:16 +08:00
|
|
|
* - o2hb_quorum_region_bitmap tracks live regions that have seen all nodes
|
|
|
|
* heartbeat on it.
|
2010-10-08 08:05:52 +08:00
|
|
|
* - o2hb_failed_region_bitmap tracks the regions that have seen io timeouts.
|
2010-10-08 08:03:07 +08:00
|
|
|
*/
|
|
|
|
static unsigned long o2hb_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
|
2010-10-07 08:55:18 +08:00
|
|
|
static unsigned long o2hb_live_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
|
2010-10-07 08:55:16 +08:00
|
|
|
static unsigned long o2hb_quorum_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
|
2010-10-08 08:05:52 +08:00
|
|
|
static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
|
2010-10-08 08:03:07 +08:00
|
|
|
|
2010-10-08 08:01:27 +08:00
|
|
|
#define O2HB_DB_TYPE_LIVENODES 0
|
2010-10-07 08:55:13 +08:00
|
|
|
#define O2HB_DB_TYPE_LIVEREGIONS 1
|
|
|
|
#define O2HB_DB_TYPE_QUORUMREGIONS 2
|
|
|
|
#define O2HB_DB_TYPE_FAILEDREGIONS 3
|
2010-10-07 08:55:12 +08:00
|
|
|
#define O2HB_DB_TYPE_REGION_LIVENODES 4
|
|
|
|
#define O2HB_DB_TYPE_REGION_NUMBER 5
|
2010-10-07 08:55:09 +08:00
|
|
|
#define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6
|
2010-12-15 06:14:30 +08:00
|
|
|
#define O2HB_DB_TYPE_REGION_PINNED 7
|
2010-10-08 08:01:27 +08:00
|
|
|
struct o2hb_debug_buf {
|
|
|
|
int db_type;
|
|
|
|
int db_size;
|
|
|
|
int db_len;
|
|
|
|
void *db_data;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct o2hb_debug_buf *o2hb_db_livenodes;
|
2010-10-07 08:55:13 +08:00
|
|
|
static struct o2hb_debug_buf *o2hb_db_liveregions;
|
|
|
|
static struct o2hb_debug_buf *o2hb_db_quorumregions;
|
|
|
|
static struct o2hb_debug_buf *o2hb_db_failedregions;
|
2010-10-08 08:01:27 +08:00
|
|
|
|
2008-12-18 06:17:42 +08:00
|
|
|
#define O2HB_DEBUG_DIR "o2hb"
|
|
|
|
#define O2HB_DEBUG_LIVENODES "livenodes"
|
2010-10-07 08:55:13 +08:00
|
|
|
#define O2HB_DEBUG_LIVEREGIONS "live_regions"
|
|
|
|
#define O2HB_DEBUG_QUORUMREGIONS "quorum_regions"
|
|
|
|
#define O2HB_DEBUG_FAILEDREGIONS "failed_regions"
|
2010-10-07 08:55:12 +08:00
|
|
|
#define O2HB_DEBUG_REGION_NUMBER "num"
|
2010-10-07 08:55:09 +08:00
|
|
|
#define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms"
|
2010-12-15 06:14:30 +08:00
|
|
|
#define O2HB_DEBUG_REGION_PINNED "pinned"
|
2010-10-08 08:01:27 +08:00
|
|
|
|
2008-12-18 06:17:42 +08:00
|
|
|
static struct dentry *o2hb_debug_dir;
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
static LIST_HEAD(o2hb_all_regions);
|
|
|
|
|
|
|
|
static struct o2hb_callback {
|
|
|
|
struct list_head list;
|
|
|
|
} o2hb_callbacks[O2HB_NUM_CB];
|
|
|
|
|
|
|
|
static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
|
|
|
|
|
2010-10-08 06:26:08 +08:00
|
|
|
enum o2hb_heartbeat_modes {
|
|
|
|
O2HB_HEARTBEAT_LOCAL = 0,
|
|
|
|
O2HB_HEARTBEAT_GLOBAL,
|
|
|
|
O2HB_HEARTBEAT_NUM_MODES,
|
|
|
|
};
|
|
|
|
|
2018-08-18 06:44:31 +08:00
|
|
|
static const char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = {
|
|
|
|
"local", /* O2HB_HEARTBEAT_LOCAL */
|
|
|
|
"global", /* O2HB_HEARTBEAT_GLOBAL */
|
2010-10-08 06:26:08 +08:00
|
|
|
};
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
|
2018-08-18 06:44:31 +08:00
|
|
|
static unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
/*
|
|
|
|
* o2hb_dependent_users tracks the number of registered callbacks that depend
|
|
|
|
* on heartbeat. o2net and o2dlm are two entities that register this callback.
|
|
|
|
* However only o2dlm depends on the heartbeat. It does not want the heartbeat
|
|
|
|
* to stop while a dlm domain is still active.
|
|
|
|
*/
|
2018-08-18 06:44:31 +08:00
|
|
|
static unsigned int o2hb_dependent_users;
|
2010-12-15 06:14:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In global heartbeat mode, all regions are pinned if there are one or more
|
|
|
|
* dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All
|
|
|
|
* regions are unpinned if the region count exceeds the cut off or the number
|
|
|
|
* of dependent users falls to zero.
|
|
|
|
*/
|
|
|
|
#define O2HB_PIN_CUT_OFF 3
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In local heartbeat mode, we assume the dlm domain name to be the same as
|
|
|
|
* region uuid. This is true for domains created for the file system but not
|
|
|
|
* necessarily true for userdlm domains. This is a known limitation.
|
|
|
|
*
|
|
|
|
* In global heartbeat mode, we pin/unpin all o2hb regions. This solution
|
|
|
|
* works for both file system and userdlm domains.
|
|
|
|
*/
|
|
|
|
static int o2hb_region_pin(const char *region_uuid);
|
|
|
|
static void o2hb_region_unpin(const char *region_uuid);
|
|
|
|
|
2010-01-26 08:57:38 +08:00
|
|
|
/* Only sets a new threshold if there are no active regions.
|
2005-12-16 06:31:23 +08:00
|
|
|
*
|
|
|
|
* No locking or otherwise interesting code is required for reading
|
|
|
|
* o2hb_dead_threshold as it can't change once regions are active and
|
|
|
|
* it's not interesting to anyone until then anyway. */
|
|
|
|
static void o2hb_dead_threshold_set(unsigned int threshold)
|
|
|
|
{
|
|
|
|
if (threshold > O2HB_MIN_DEAD_THRESHOLD) {
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
if (list_empty(&o2hb_all_regions))
|
|
|
|
o2hb_dead_threshold = threshold;
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-04 06:01:06 +08:00
|
|
|
static int o2hb_global_heartbeat_mode_set(unsigned int hb_mode)
|
2010-10-08 06:26:08 +08:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (hb_mode < O2HB_HEARTBEAT_NUM_MODES) {
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
if (list_empty(&o2hb_all_regions)) {
|
|
|
|
o2hb_heartbeat_mode = hb_mode;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
struct o2hb_node_event {
|
|
|
|
struct list_head hn_item;
|
|
|
|
enum o2hb_callback_type hn_event_type;
|
|
|
|
struct o2nm_node *hn_node;
|
|
|
|
int hn_node_num;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct o2hb_disk_slot {
|
|
|
|
struct o2hb_disk_heartbeat_block *ds_raw_block;
|
|
|
|
u8 ds_node_num;
|
|
|
|
u64 ds_last_time;
|
|
|
|
u64 ds_last_generation;
|
|
|
|
u16 ds_equal_samples;
|
|
|
|
u16 ds_changed_samples;
|
|
|
|
struct list_head ds_live_item;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* each thread owns a region.. when we're asked to tear down the region
|
|
|
|
* we ask the thread to stop, who cleans up the region */
|
|
|
|
struct o2hb_region {
|
|
|
|
struct config_item hr_item;
|
|
|
|
|
|
|
|
struct list_head hr_all_item;
|
2010-12-15 06:14:29 +08:00
|
|
|
unsigned hr_unclean_stop:1,
|
2011-07-25 01:21:54 +08:00
|
|
|
hr_aborted_start:1,
|
2010-12-15 06:14:29 +08:00
|
|
|
hr_item_pinned:1,
|
2015-11-06 10:44:07 +08:00
|
|
|
hr_item_dropped:1,
|
|
|
|
hr_node_deleted:1;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
/* protected by the hr_callback_sem */
|
|
|
|
struct task_struct *hr_task;
|
|
|
|
|
|
|
|
unsigned int hr_blocks;
|
|
|
|
unsigned long long hr_start_block;
|
|
|
|
|
|
|
|
unsigned int hr_block_bits;
|
|
|
|
unsigned int hr_block_bytes;
|
|
|
|
|
|
|
|
unsigned int hr_slots_per_page;
|
|
|
|
unsigned int hr_num_pages;
|
|
|
|
|
|
|
|
struct page **hr_slot_data;
|
|
|
|
struct block_device *hr_bdev;
|
|
|
|
struct o2hb_disk_slot *hr_slots;
|
|
|
|
|
2010-10-07 08:55:21 +08:00
|
|
|
/* live node map of this region */
|
|
|
|
unsigned long hr_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
2010-10-08 08:03:07 +08:00
|
|
|
unsigned int hr_region_num;
|
2010-10-07 08:55:21 +08:00
|
|
|
|
2010-10-07 08:55:12 +08:00
|
|
|
struct dentry *hr_debug_dir;
|
|
|
|
struct o2hb_debug_buf *hr_db_livenodes;
|
|
|
|
struct o2hb_debug_buf *hr_db_regnum;
|
2010-10-07 08:55:09 +08:00
|
|
|
struct o2hb_debug_buf *hr_db_elapsed_time;
|
2010-12-15 06:14:30 +08:00
|
|
|
struct o2hb_debug_buf *hr_db_pinned;
|
2010-10-07 08:55:12 +08:00
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
/* let the person setting up hb wait for it to return until it
|
|
|
|
* has reached a 'steady' state. This will be fixed when we have
|
|
|
|
* a more complete api that doesn't lead to this sort of fragility. */
|
|
|
|
atomic_t hr_steady_iterations;
|
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
/* terminate o2hb thread if it does not reach steady state
|
|
|
|
* (hr_steady_iterations == 0) within hr_unsteady_iterations */
|
|
|
|
atomic_t hr_unsteady_iterations;
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
char hr_dev_name[BDEVNAME_SIZE];
|
|
|
|
|
|
|
|
unsigned int hr_timeout_ms;
|
|
|
|
|
|
|
|
/* randomized as the region goes up and down so that a node
|
|
|
|
* recognizes a node going up and down in one iteration */
|
|
|
|
u64 hr_generation;
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
struct delayed_work hr_write_timeout_work;
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned long hr_last_timeout_start;
|
|
|
|
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
/* negotiate timer, used to negotiate extending hb timeout. */
|
|
|
|
struct delayed_work hr_nego_timeout_work;
|
|
|
|
unsigned long hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
/* Used during o2hb_check_slot to hold a copy of the block
|
|
|
|
* being checked because we temporarily have to zero out the
|
|
|
|
* crc field. */
|
|
|
|
struct o2hb_disk_heartbeat_block *hr_tmp_block;
|
2016-05-28 05:27:01 +08:00
|
|
|
|
|
|
|
/* Message key for negotiate timeout message. */
|
|
|
|
unsigned int hr_key;
|
|
|
|
struct list_head hr_handler_list;
|
2016-05-28 05:27:10 +08:00
|
|
|
|
|
|
|
/* last hb status, 0 for success, other value for error. */
|
|
|
|
int hr_last_hb_status;
|
2005-12-16 06:31:23 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct o2hb_bio_wait_ctxt {
|
|
|
|
atomic_t wc_num_reqs;
|
|
|
|
struct completion wc_io_complete;
|
2006-03-25 06:20:17 +08:00
|
|
|
int wc_error;
|
2005-12-16 06:31:23 +08:00
|
|
|
};
|
|
|
|
|
2016-05-28 05:27:07 +08:00
|
|
|
#define O2HB_NEGO_TIMEOUT_MS (O2HB_MAX_WRITE_TIMEOUT_MS/2)
|
|
|
|
|
2016-05-28 05:27:01 +08:00
|
|
|
enum {
|
|
|
|
O2HB_NEGO_TIMEOUT_MSG = 1,
|
2016-05-28 05:27:04 +08:00
|
|
|
O2HB_NEGO_APPROVE_MSG = 2,
|
2016-05-28 05:27:01 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct o2hb_nego_msg {
|
|
|
|
u8 node_num;
|
|
|
|
};
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
static void o2hb_write_timeout(struct work_struct *work)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2010-10-08 08:05:52 +08:00
|
|
|
int failed, quorum;
|
2006-11-22 22:57:56 +08:00
|
|
|
struct o2hb_region *reg =
|
|
|
|
container_of(work, struct o2hb_region,
|
|
|
|
hr_write_timeout_work.work);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
|
|
|
|
"milliseconds\n", reg->hr_dev_name,
|
2010-01-26 08:57:38 +08:00
|
|
|
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
|
2010-10-08 08:05:52 +08:00
|
|
|
|
|
|
|
if (o2hb_global_heartbeat_active()) {
|
2016-03-16 05:52:58 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
2010-10-08 08:05:52 +08:00
|
|
|
if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
|
|
|
|
set_bit(reg->hr_region_num, o2hb_failed_region_bitmap);
|
2013-11-13 07:06:58 +08:00
|
|
|
failed = bitmap_weight(o2hb_failed_region_bitmap,
|
2010-10-08 08:05:52 +08:00
|
|
|
O2NM_MAX_REGIONS);
|
2013-11-13 07:06:58 +08:00
|
|
|
quorum = bitmap_weight(o2hb_quorum_region_bitmap,
|
2010-10-08 08:05:52 +08:00
|
|
|
O2NM_MAX_REGIONS);
|
2016-03-16 05:52:58 +08:00
|
|
|
spin_unlock(&o2hb_live_lock);
|
2010-10-08 08:05:52 +08:00
|
|
|
|
|
|
|
mlog(ML_HEARTBEAT, "Number of regions %d, failed regions %d\n",
|
|
|
|
quorum, failed);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fence if the number of failed regions >= half the number
|
|
|
|
* of quorum regions
|
|
|
|
*/
|
|
|
|
if ((failed << 1) < quorum)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
o2quo_disk_timeout();
|
|
|
|
}
|
|
|
|
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
static void o2hb_arm_timeout(struct o2hb_region *reg)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2011-07-25 01:21:54 +08:00
|
|
|
/* Arm writeout only after thread reaches steady state */
|
|
|
|
if (atomic_read(®->hr_steady_iterations) != 0)
|
|
|
|
return;
|
|
|
|
|
2009-12-22 10:32:15 +08:00
|
|
|
mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
|
|
|
|
O2HB_MAX_WRITE_TIMEOUT_MS);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2010-10-08 08:05:52 +08:00
|
|
|
if (o2hb_global_heartbeat_active()) {
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
clear_bit(reg->hr_region_num, o2hb_failed_region_bitmap);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
cancel_delayed_work(®->hr_write_timeout_work);
|
|
|
|
schedule_delayed_work(®->hr_write_timeout_work,
|
|
|
|
msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
|
|
|
|
cancel_delayed_work(®->hr_nego_timeout_work);
|
|
|
|
/* negotiate timeout must be less than write timeout. */
|
|
|
|
schedule_delayed_work(®->hr_nego_timeout_work,
|
2016-05-28 05:27:07 +08:00
|
|
|
msecs_to_jiffies(O2HB_NEGO_TIMEOUT_MS));
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
static void o2hb_disarm_timeout(struct o2hb_region *reg)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2010-12-24 22:59:06 +08:00
|
|
|
cancel_delayed_work_sync(®->hr_write_timeout_work);
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
cancel_delayed_work_sync(®->hr_nego_timeout_work);
|
|
|
|
}
|
|
|
|
|
2016-05-28 05:27:01 +08:00
|
|
|
static int o2hb_send_nego_msg(int key, int type, u8 target)
|
|
|
|
{
|
|
|
|
struct o2hb_nego_msg msg;
|
|
|
|
int status, ret;
|
|
|
|
|
|
|
|
msg.node_num = o2nm_this_node();
|
|
|
|
again:
|
|
|
|
ret = o2net_send_message(type, key, &msg, sizeof(msg),
|
|
|
|
target, &status);
|
|
|
|
|
|
|
|
if (ret == -EAGAIN || ret == -ENOMEM) {
|
|
|
|
msleep(100);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
static void o2hb_nego_timeout(struct work_struct *work)
|
|
|
|
{
|
|
|
|
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
2016-05-28 05:27:07 +08:00
|
|
|
int master_node, i, ret;
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
struct o2hb_region *reg;
|
|
|
|
|
|
|
|
reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
|
2016-05-28 05:27:10 +08:00
|
|
|
/* don't negotiate timeout if last hb failed since it is very
|
|
|
|
* possible io failed. Should let write timeout fence self.
|
|
|
|
*/
|
|
|
|
if (reg->hr_last_hb_status)
|
|
|
|
return;
|
|
|
|
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
|
|
|
|
/* lowest node as master node to make negotiate decision. */
|
|
|
|
master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
|
|
|
|
|
|
|
|
if (master_node == o2nm_this_node()) {
|
2016-05-28 05:27:07 +08:00
|
|
|
if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
|
|
|
|
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s).\n",
|
|
|
|
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000,
|
|
|
|
config_item_name(®->hr_item), reg->hr_dev_name);
|
|
|
|
set_bit(master_node, reg->hr_nego_node_bitmap);
|
|
|
|
}
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
|
|
|
|
sizeof(reg->hr_nego_node_bitmap))) {
|
|
|
|
/* check negotiate bitmap every second to do timeout
|
|
|
|
* approve decision.
|
|
|
|
*/
|
|
|
|
schedule_delayed_work(®->hr_nego_timeout_work,
|
|
|
|
msecs_to_jiffies(1000));
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-28 05:27:07 +08:00
|
|
|
printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%s) is down.\n",
|
|
|
|
config_item_name(®->hr_item), reg->hr_dev_name);
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
/* approve negotiate timeout request. */
|
2016-05-28 05:27:04 +08:00
|
|
|
o2hb_arm_timeout(reg);
|
|
|
|
|
|
|
|
i = -1;
|
|
|
|
while ((i = find_next_bit(live_node_bitmap,
|
|
|
|
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
|
|
|
|
if (i == master_node)
|
|
|
|
continue;
|
|
|
|
|
2016-05-28 05:27:07 +08:00
|
|
|
mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i);
|
|
|
|
ret = o2hb_send_nego_msg(reg->hr_key,
|
2016-05-28 05:27:04 +08:00
|
|
|
O2HB_NEGO_APPROVE_MSG, i);
|
2016-05-28 05:27:07 +08:00
|
|
|
if (ret)
|
|
|
|
mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n",
|
|
|
|
i, ret);
|
2016-05-28 05:27:04 +08:00
|
|
|
}
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
} else {
|
|
|
|
/* negotiate timeout with master node. */
|
2016-05-28 05:27:07 +08:00
|
|
|
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s), negotiate timeout with node %d.\n",
|
|
|
|
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(®->hr_item),
|
|
|
|
reg->hr_dev_name, master_node);
|
|
|
|
ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG,
|
|
|
|
master_node);
|
|
|
|
if (ret)
|
|
|
|
mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n",
|
|
|
|
master_node, ret);
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
}
|
2016-05-28 05:27:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data,
|
|
|
|
void **ret_data)
|
|
|
|
{
|
|
|
|
struct o2hb_region *reg = data;
|
|
|
|
struct o2hb_nego_msg *nego_msg;
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
|
2016-05-28 05:27:01 +08:00
|
|
|
nego_msg = (struct o2hb_nego_msg *)msg->buf;
|
2016-05-28 05:27:07 +08:00
|
|
|
printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%s).\n",
|
|
|
|
nego_msg->node_num, config_item_name(®->hr_item), reg->hr_dev_name);
|
2016-05-28 05:27:01 +08:00
|
|
|
if (nego_msg->node_num < O2NM_MAX_NODES)
|
|
|
|
set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap);
|
|
|
|
else
|
|
|
|
mlog(ML_ERROR, "got nego timeout message from bad node.\n");
|
|
|
|
|
|
|
|
return 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2016-05-28 05:27:04 +08:00
|
|
|
static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data,
|
|
|
|
void **ret_data)
|
|
|
|
{
|
2016-05-28 05:27:07 +08:00
|
|
|
struct o2hb_region *reg = data;
|
|
|
|
|
|
|
|
printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%s).\n",
|
|
|
|
config_item_name(®->hr_item), reg->hr_dev_name);
|
|
|
|
o2hb_arm_timeout(reg);
|
2016-05-28 05:27:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-01-11 17:58:10 +08:00
|
|
|
static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2007-01-11 17:58:10 +08:00
|
|
|
atomic_set(&wc->wc_num_reqs, 1);
|
2005-12-16 06:31:23 +08:00
|
|
|
init_completion(&wc->wc_io_complete);
|
2006-03-25 06:20:17 +08:00
|
|
|
wc->wc_error = 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Used in error paths too */
|
|
|
|
static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
|
|
|
|
unsigned int num)
|
|
|
|
{
|
|
|
|
/* sadly atomic_sub_and_test() isn't available on all platforms. The
|
|
|
|
* good news is that the fast path only completes one at a time */
|
|
|
|
while(num--) {
|
|
|
|
if (atomic_dec_and_test(&wc->wc_num_reqs)) {
|
|
|
|
BUG_ON(num > 0);
|
|
|
|
complete(&wc->wc_io_complete);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-07 07:19:11 +08:00
|
|
|
static void o2hb_wait_on_io(struct o2hb_bio_wait_ctxt *wc)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2007-01-11 17:58:10 +08:00
|
|
|
o2hb_bio_wait_dec(wc, 1);
|
2005-12-16 06:31:23 +08:00
|
|
|
wait_for_completion(&wc->wc_io_complete);
|
|
|
|
}
|
|
|
|
|
2015-07-20 21:29:37 +08:00
|
|
|
static void o2hb_bio_end_io(struct bio *bio)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
|
|
|
|
|
2017-06-03 15:38:06 +08:00
|
|
|
if (bio->bi_status) {
|
|
|
|
mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
|
|
|
|
wc->wc_error = blk_status_to_errno(bio->bi_status);
|
2006-03-25 06:20:17 +08:00
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
o2hb_bio_wait_dec(wc, 1);
|
2007-01-11 17:58:10 +08:00
|
|
|
bio_put(bio);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup a Bio to cover I/O against num_slots slots starting at
|
|
|
|
* start_slot. */
|
|
|
|
static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
|
|
|
|
struct o2hb_bio_wait_ctxt *wc,
|
2007-01-11 17:58:10 +08:00
|
|
|
unsigned int *current_slot,
|
2016-06-06 03:32:01 +08:00
|
|
|
unsigned int max_slots, int op,
|
|
|
|
int op_flags)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2007-01-11 17:58:10 +08:00
|
|
|
int len, current_page;
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned int vec_len, vec_start;
|
|
|
|
unsigned int bits = reg->hr_block_bits;
|
|
|
|
unsigned int spp = reg->hr_slots_per_page;
|
2007-01-11 17:58:10 +08:00
|
|
|
unsigned int cs = *current_slot;
|
2005-12-16 06:31:23 +08:00
|
|
|
struct bio *bio;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
/* Testing has shown this allocation to take long enough under
|
|
|
|
* GFP_KERNEL that the local node can get fenced. It would be
|
|
|
|
* nicest if we could pre-allocate these bios and avoid this
|
|
|
|
* all together. */
|
2007-01-11 17:58:10 +08:00
|
|
|
bio = bio_alloc(GFP_ATOMIC, 16);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (!bio) {
|
|
|
|
mlog(ML_ERROR, "Could not alloc slots BIO!\n");
|
|
|
|
bio = ERR_PTR(-ENOMEM);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must put everything in 512 byte sectors for the bio... */
|
2013-10-12 06:44:27 +08:00
|
|
|
bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
|
2017-08-24 01:10:32 +08:00
|
|
|
bio_set_dev(bio, reg->hr_bdev);
|
2005-12-16 06:31:23 +08:00
|
|
|
bio->bi_private = wc;
|
|
|
|
bio->bi_end_io = o2hb_bio_end_io;
|
2016-06-06 03:32:01 +08:00
|
|
|
bio_set_op_attrs(bio, op, op_flags);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
vec_start = (cs << bits) % PAGE_SIZE;
|
2007-01-11 17:58:10 +08:00
|
|
|
while(cs < max_slots) {
|
|
|
|
current_page = cs / spp;
|
|
|
|
page = reg->hr_slot_data[current_page];
|
2005-12-16 06:31:23 +08:00
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
vec_len = min(PAGE_SIZE - vec_start,
|
|
|
|
(max_slots-cs) * (PAGE_SIZE/spp) );
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
|
2007-01-11 17:58:10 +08:00
|
|
|
current_page, vec_len, vec_start);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
len = bio_add_page(bio, page, vec_len, vec_start);
|
ocfs2: revert "ocfs2/o2hb: check len for bio_add_page() to avoid getting incorrect bio"
This reverts commit ba16ddfbeb9d ("ocfs2/o2hb: check len for
bio_add_page() to avoid getting incorrect bio").
In my testing, this patch introduces a problem that mkfs can't have
slots more than 16 with 4k block size.
And the original logic is safe actually with the situation it mentions
so revert this commit.
Attach test log:
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 0, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 1, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 2, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 3, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 4, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 5, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 6, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 7, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 8, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 9, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 10, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 11, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 12, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 13, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 14, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 15, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:463 page 16, vec_len = 4096, vec_start = 0
(mkfs.ocfs2,27479,2):o2hb_setup_one_bio:471 ERROR: Adding page[16] to bio failed, page ffffea0002d7ed40, len 0, vec_len 4096, vec_start 0,bi_sector 8192
(mkfs.ocfs2,27479,2):o2hb_read_slots:500 ERROR: status = -5
(mkfs.ocfs2,27479,2):o2hb_populate_slot_data:1911 ERROR: status = -5
(mkfs.ocfs2,27479,2):o2hb_region_dev_write:2012 ERROR: status = -5
Link: http://lkml.kernel.org/r/SIXPR06MB0461721F398A5A92FC68C39ED5920@SIXPR06MB0461.apcprd06.prod.outlook.com
Signed-off-by: Changwei Ge <ge.changwei@h3c.com>
Cc: Jun Piao <piaojun@huawei.com>
Cc: Yiwen Jiang <jiangyiwen@huawei.com>
Cc: Joseph Qi <jiangqi903@gmail.com>
Cc: Mark Fasheh <mark@fasheh.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-05-26 05:47:20 +08:00
|
|
|
if (len != vec_len) break;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
cs += vec_len / (PAGE_SIZE/spp);
|
2005-12-16 06:31:23 +08:00
|
|
|
vec_start = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bail:
|
2007-01-11 17:58:10 +08:00
|
|
|
*current_slot = cs;
|
2005-12-16 06:31:23 +08:00
|
|
|
return bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int o2hb_read_slots(struct o2hb_region *reg,
|
2018-12-28 16:32:35 +08:00
|
|
|
unsigned int begin_slot,
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned int max_slots)
|
|
|
|
{
|
2018-12-28 16:32:35 +08:00
|
|
|
unsigned int current_slot = begin_slot;
|
2007-01-11 17:58:10 +08:00
|
|
|
int status;
|
2005-12-16 06:31:23 +08:00
|
|
|
struct o2hb_bio_wait_ctxt wc;
|
|
|
|
struct bio *bio;
|
|
|
|
|
2007-01-11 17:58:10 +08:00
|
|
|
o2hb_bio_wait_init(&wc);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2007-01-11 17:58:10 +08:00
|
|
|
while(current_slot < max_slots) {
|
2016-06-06 03:31:41 +08:00
|
|
|
bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots,
|
2016-06-06 03:32:01 +08:00
|
|
|
REQ_OP_READ, 0);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (IS_ERR(bio)) {
|
|
|
|
status = PTR_ERR(bio);
|
|
|
|
mlog_errno(status);
|
|
|
|
goto bail_and_wait;
|
|
|
|
}
|
|
|
|
|
2007-01-11 17:58:10 +08:00
|
|
|
atomic_inc(&wc.wc_num_reqs);
|
2016-06-06 03:31:41 +08:00
|
|
|
submit_bio(bio);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
status = 0;
|
|
|
|
|
|
|
|
bail_and_wait:
|
2017-09-07 07:19:11 +08:00
|
|
|
o2hb_wait_on_io(&wc);
|
2006-03-25 06:20:17 +08:00
|
|
|
if (wc.wc_error && !status)
|
|
|
|
status = wc.wc_error;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int o2hb_issue_node_write(struct o2hb_region *reg,
|
|
|
|
struct o2hb_bio_wait_ctxt *write_wc)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
unsigned int slot;
|
|
|
|
struct bio *bio;
|
|
|
|
|
2007-01-11 17:58:10 +08:00
|
|
|
o2hb_bio_wait_init(write_wc);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
slot = o2nm_this_node();
|
|
|
|
|
2016-06-06 03:32:01 +08:00
|
|
|
bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
|
2016-11-01 21:40:10 +08:00
|
|
|
REQ_SYNC);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (IS_ERR(bio)) {
|
|
|
|
status = PTR_ERR(bio);
|
|
|
|
mlog_errno(status);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2007-01-11 17:58:10 +08:00
|
|
|
atomic_inc(&write_wc->wc_num_reqs);
|
2016-06-06 03:31:41 +08:00
|
|
|
submit_bio(bio);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
status = 0;
|
|
|
|
bail:
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg,
|
|
|
|
struct o2hb_disk_heartbeat_block *hb_block)
|
|
|
|
{
|
|
|
|
__le32 old_cksum;
|
|
|
|
u32 ret;
|
|
|
|
|
|
|
|
/* We want to compute the block crc with a 0 value in the
|
|
|
|
* hb_cksum field. Save it off here and replace after the
|
|
|
|
* crc. */
|
|
|
|
old_cksum = hb_block->hb_cksum;
|
|
|
|
hb_block->hb_cksum = 0;
|
|
|
|
|
|
|
|
ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes);
|
|
|
|
|
|
|
|
hb_block->hb_cksum = old_cksum;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block)
|
|
|
|
{
|
2006-03-03 03:10:05 +08:00
|
|
|
mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, "
|
|
|
|
"cksum = 0x%x, generation 0x%llx\n",
|
|
|
|
(long long)le64_to_cpu(hb_block->hb_seq),
|
|
|
|
hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum),
|
|
|
|
(long long)le64_to_cpu(hb_block->hb_generation));
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int o2hb_verify_crc(struct o2hb_region *reg,
|
|
|
|
struct o2hb_disk_heartbeat_block *hb_block)
|
|
|
|
{
|
|
|
|
u32 read, computed;
|
|
|
|
|
|
|
|
read = le32_to_cpu(hb_block->hb_cksum);
|
|
|
|
computed = o2hb_compute_block_crc_le(reg, hb_block);
|
|
|
|
|
|
|
|
return read == computed;
|
|
|
|
}
|
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
/*
|
|
|
|
* Compare the slot data with what we wrote in the last iteration.
|
|
|
|
* If the match fails, print an appropriate error message. This is to
|
|
|
|
* detect errors like... another node hearting on the same slot,
|
|
|
|
* flaky device that is losing writes, etc.
|
|
|
|
* Returns 1 if check succeeds, 0 otherwise.
|
|
|
|
*/
|
|
|
|
static int o2hb_check_own_slot(struct o2hb_region *reg)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
struct o2hb_disk_slot *slot;
|
|
|
|
struct o2hb_disk_heartbeat_block *hb_block;
|
2011-05-05 01:28:01 +08:00
|
|
|
char *errstr;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-05-05 01:28:01 +08:00
|
|
|
slot = ®->hr_slots[o2nm_this_node()];
|
2005-12-16 06:31:23 +08:00
|
|
|
/* Don't check on our 1st timestamp */
|
2011-05-05 01:28:01 +08:00
|
|
|
if (!slot->ds_last_time)
|
2011-07-25 01:21:54 +08:00
|
|
|
return 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-05-05 01:28:01 +08:00
|
|
|
hb_block = slot->ds_raw_block;
|
|
|
|
if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
|
|
|
|
le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
|
|
|
|
hb_block->hb_node == slot->ds_node_num)
|
2011-07-25 01:21:54 +08:00
|
|
|
return 1;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-05-05 01:28:01 +08:00
|
|
|
#define ERRSTR1 "Another node is heartbeating on device"
|
|
|
|
#define ERRSTR2 "Heartbeat generation mismatch on device"
|
|
|
|
#define ERRSTR3 "Heartbeat sequence mismatch on device"
|
|
|
|
|
|
|
|
if (hb_block->hb_node != slot->ds_node_num)
|
|
|
|
errstr = ERRSTR1;
|
|
|
|
else if (le64_to_cpu(hb_block->hb_generation) !=
|
|
|
|
slot->ds_last_generation)
|
|
|
|
errstr = ERRSTR2;
|
|
|
|
else
|
|
|
|
errstr = ERRSTR3;
|
|
|
|
|
|
|
|
mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), "
|
|
|
|
"ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name,
|
|
|
|
slot->ds_node_num, (unsigned long long)slot->ds_last_generation,
|
|
|
|
(unsigned long long)slot->ds_last_time, hb_block->hb_node,
|
|
|
|
(unsigned long long)le64_to_cpu(hb_block->hb_generation),
|
|
|
|
(unsigned long long)le64_to_cpu(hb_block->hb_seq));
|
2011-07-25 01:21:54 +08:00
|
|
|
|
|
|
|
return 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void o2hb_prepare_block(struct o2hb_region *reg,
|
|
|
|
u64 generation)
|
|
|
|
{
|
|
|
|
int node_num;
|
|
|
|
u64 cputime;
|
|
|
|
struct o2hb_disk_slot *slot;
|
|
|
|
struct o2hb_disk_heartbeat_block *hb_block;
|
|
|
|
|
|
|
|
node_num = o2nm_this_node();
|
|
|
|
slot = ®->hr_slots[node_num];
|
|
|
|
|
|
|
|
hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block;
|
|
|
|
memset(hb_block, 0, reg->hr_block_bytes);
|
|
|
|
/* TODO: time stuff */
|
2016-12-13 08:41:26 +08:00
|
|
|
cputime = ktime_get_real_seconds();
|
2005-12-16 06:31:23 +08:00
|
|
|
if (!cputime)
|
|
|
|
cputime = 1;
|
|
|
|
|
|
|
|
hb_block->hb_seq = cpu_to_le64(cputime);
|
|
|
|
hb_block->hb_node = node_num;
|
|
|
|
hb_block->hb_generation = cpu_to_le64(generation);
|
2006-05-10 06:09:35 +08:00
|
|
|
hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
/* This step must always happen last! */
|
|
|
|
hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
|
|
|
|
hb_block));
|
|
|
|
|
2006-03-03 03:10:05 +08:00
|
|
|
mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n",
|
2007-04-28 07:50:03 +08:00
|
|
|
(long long)generation,
|
2006-03-03 03:10:05 +08:00
|
|
|
le32_to_cpu(hb_block->hb_cksum));
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
|
|
|
|
struct o2nm_node *node,
|
|
|
|
int idx)
|
|
|
|
{
|
|
|
|
struct o2hb_callback_func *f;
|
|
|
|
|
2013-09-12 05:19:50 +08:00
|
|
|
list_for_each_entry(f, &hbcall->list, hc_item) {
|
2005-12-16 06:31:23 +08:00
|
|
|
mlog(ML_HEARTBEAT, "calling funcs %p\n", f);
|
|
|
|
(f->hc_func)(node, idx, f->hc_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Will run the list in order until we process the passed event */
|
|
|
|
static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
|
|
|
|
{
|
|
|
|
struct o2hb_callback *hbcall;
|
|
|
|
struct o2hb_node_event *event;
|
|
|
|
|
|
|
|
/* Holding callback sem assures we don't alter the callback
|
|
|
|
* lists when doing this, and serializes ourselves with other
|
|
|
|
* processes wanting callbacks. */
|
|
|
|
down_write(&o2hb_callback_sem);
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
while (!list_empty(&o2hb_node_events)
|
|
|
|
&& !list_empty(&queued_event->hn_item)) {
|
|
|
|
event = list_entry(o2hb_node_events.next,
|
|
|
|
struct o2hb_node_event,
|
|
|
|
hn_item);
|
|
|
|
list_del_init(&event->hn_item);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
mlog(ML_HEARTBEAT, "Node %s event for %d\n",
|
|
|
|
event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN",
|
|
|
|
event->hn_node_num);
|
|
|
|
|
|
|
|
hbcall = hbcall_from_type(event->hn_event_type);
|
|
|
|
|
|
|
|
/* We should *never* have gotten on to the list with a
|
|
|
|
* bad type... This isn't something that we should try
|
|
|
|
* to recover from. */
|
|
|
|
BUG_ON(IS_ERR(hbcall));
|
|
|
|
|
|
|
|
o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num);
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
up_write(&o2hb_callback_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void o2hb_queue_node_event(struct o2hb_node_event *event,
|
|
|
|
enum o2hb_callback_type type,
|
|
|
|
struct o2nm_node *node,
|
|
|
|
int node_num)
|
|
|
|
{
|
|
|
|
assert_spin_locked(&o2hb_live_lock);
|
|
|
|
|
2010-10-08 08:00:16 +08:00
|
|
|
BUG_ON((!node) && (type != O2HB_NODE_DOWN_CB));
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
event->hn_event_type = type;
|
|
|
|
event->hn_node = node;
|
|
|
|
event->hn_node_num = node_num;
|
|
|
|
|
|
|
|
mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n",
|
|
|
|
type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num);
|
|
|
|
|
|
|
|
list_add_tail(&event->hn_item, &o2hb_node_events);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
|
|
|
|
{
|
|
|
|
struct o2hb_node_event event =
|
|
|
|
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
|
|
|
|
struct o2nm_node *node;
|
2013-09-12 05:20:03 +08:00
|
|
|
int queued = 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
node = o2nm_get_node_by_num(slot->ds_node_num);
|
|
|
|
if (!node)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
if (!list_empty(&slot->ds_live_item)) {
|
|
|
|
mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n",
|
|
|
|
slot->ds_node_num);
|
|
|
|
|
|
|
|
list_del_init(&slot->ds_live_item);
|
|
|
|
|
|
|
|
if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
|
|
|
|
clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
|
|
|
|
|
|
|
|
o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
|
|
|
|
slot->ds_node_num);
|
2013-09-12 05:20:03 +08:00
|
|
|
queued = 1;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
2013-09-12 05:20:03 +08:00
|
|
|
if (queued)
|
|
|
|
o2hb_run_event_list(&event);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
o2nm_node_put(node);
|
|
|
|
}
|
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
static void o2hb_set_quorum_device(struct o2hb_region *reg)
|
2010-10-07 08:55:16 +08:00
|
|
|
{
|
|
|
|
if (!o2hb_global_heartbeat_active())
|
|
|
|
return;
|
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
/* Prevent race with o2hb_heartbeat_group_drop_item() */
|
|
|
|
if (kthread_should_stop())
|
2010-10-07 08:55:16 +08:00
|
|
|
return;
|
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
/* Tag region as quorum only after thread reaches steady state */
|
|
|
|
if (atomic_read(®->hr_steady_iterations) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
|
|
|
|
goto unlock;
|
|
|
|
|
2010-10-07 08:55:16 +08:00
|
|
|
/*
|
|
|
|
* A region can be added to the quorum only when it sees all
|
|
|
|
* live nodes heartbeat on it. In other words, the region has been
|
|
|
|
* added to all nodes.
|
|
|
|
*/
|
|
|
|
if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
|
|
|
|
sizeof(o2hb_live_node_bitmap)))
|
2011-07-25 01:21:54 +08:00
|
|
|
goto unlock;
|
2010-10-07 08:55:16 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
|
|
|
|
config_item_name(®->hr_item), reg->hr_dev_name);
|
2010-10-07 08:55:16 +08:00
|
|
|
|
|
|
|
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
|
2010-12-15 06:14:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If global heartbeat active, unpin all regions if the
|
|
|
|
* region count > CUT_OFF
|
|
|
|
*/
|
2013-11-13 07:06:58 +08:00
|
|
|
if (bitmap_weight(o2hb_quorum_region_bitmap,
|
2010-12-15 06:14:29 +08:00
|
|
|
O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
|
|
|
|
o2hb_region_unpin(NULL);
|
2011-07-25 01:21:54 +08:00
|
|
|
unlock:
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
2010-10-07 08:55:16 +08:00
|
|
|
}
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
static int o2hb_check_slot(struct o2hb_region *reg,
|
|
|
|
struct o2hb_disk_slot *slot)
|
|
|
|
{
|
|
|
|
int changed = 0, gen_changed = 0;
|
|
|
|
struct o2hb_node_event event =
|
|
|
|
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
|
|
|
|
struct o2nm_node *node;
|
|
|
|
struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block;
|
|
|
|
u64 cputime;
|
2006-05-10 06:09:35 +08:00
|
|
|
unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
|
|
|
|
unsigned int slot_dead_ms;
|
2010-10-08 08:00:16 +08:00
|
|
|
int tmp;
|
2013-09-12 05:20:03 +08:00
|
|
|
int queued = 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
|
|
|
|
|
2010-10-08 08:00:16 +08:00
|
|
|
/*
|
|
|
|
* If a node is no longer configured but is still in the livemap, we
|
|
|
|
* may need to clear that bit from the livemap.
|
|
|
|
*/
|
2005-12-16 06:31:23 +08:00
|
|
|
node = o2nm_get_node_by_num(slot->ds_node_num);
|
2010-10-08 08:00:16 +08:00
|
|
|
if (!node) {
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
tmp = test_bit(slot->ds_node_num, o2hb_live_node_bitmap);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
if (!tmp)
|
|
|
|
return 0;
|
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
if (!o2hb_verify_crc(reg, hb_block)) {
|
|
|
|
/* all paths from here will drop o2hb_live_lock for
|
|
|
|
* us. */
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
/* Don't print an error on the console in this case -
|
|
|
|
* a freshly formatted heartbeat area will not have a
|
|
|
|
* crc set on it. */
|
|
|
|
if (list_empty(&slot->ds_live_item))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* The node is live but pushed out a bad crc. We
|
|
|
|
* consider it a transient miss but don't populate any
|
|
|
|
* other values as they may be junk. */
|
|
|
|
mlog(ML_ERROR, "Node %d has written a bad crc to %s\n",
|
|
|
|
slot->ds_node_num, reg->hr_dev_name);
|
|
|
|
o2hb_dump_slot(hb_block);
|
|
|
|
|
|
|
|
slot->ds_equal_samples++;
|
|
|
|
goto fire_callbacks;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we don't care if these wrap.. the state transitions below
|
|
|
|
* clear at the right places */
|
|
|
|
cputime = le64_to_cpu(hb_block->hb_seq);
|
|
|
|
if (slot->ds_last_time != cputime)
|
|
|
|
slot->ds_changed_samples++;
|
|
|
|
else
|
|
|
|
slot->ds_equal_samples++;
|
|
|
|
slot->ds_last_time = cputime;
|
|
|
|
|
|
|
|
/* The node changed heartbeat generations. We assume this to
|
|
|
|
* mean it dropped off but came back before we timed out. We
|
|
|
|
* want to consider it down for the time being but don't want
|
|
|
|
* to lose any changed_samples state we might build up to
|
|
|
|
* considering it live again. */
|
|
|
|
if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) {
|
|
|
|
gen_changed = 1;
|
|
|
|
slot->ds_equal_samples = 0;
|
2006-03-03 03:10:05 +08:00
|
|
|
mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx "
|
|
|
|
"to 0x%llx)\n", slot->ds_node_num,
|
|
|
|
(long long)slot->ds_last_generation,
|
|
|
|
(long long)le64_to_cpu(hb_block->hb_generation));
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
|
|
|
|
|
2006-03-03 03:10:05 +08:00
|
|
|
mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x "
|
|
|
|
"seq %llu last %llu changed %u equal %u\n",
|
|
|
|
slot->ds_node_num, (long long)slot->ds_last_generation,
|
|
|
|
le32_to_cpu(hb_block->hb_cksum),
|
2010-01-26 08:57:38 +08:00
|
|
|
(unsigned long long)le64_to_cpu(hb_block->hb_seq),
|
2006-03-03 03:10:05 +08:00
|
|
|
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
|
2005-12-16 06:31:23 +08:00
|
|
|
slot->ds_equal_samples);
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
fire_callbacks:
|
|
|
|
/* dead nodes only come to life after some number of
|
|
|
|
* changes at any time during their dead time */
|
|
|
|
if (list_empty(&slot->ds_live_item) &&
|
|
|
|
slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) {
|
2006-03-03 03:10:05 +08:00
|
|
|
mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n",
|
|
|
|
slot->ds_node_num, (long long)slot->ds_last_generation);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2010-10-07 08:55:21 +08:00
|
|
|
set_bit(slot->ds_node_num, reg->hr_live_node_bitmap);
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
/* first on the list generates a callback */
|
|
|
|
if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
|
2010-10-07 09:50:50 +08:00
|
|
|
mlog(ML_HEARTBEAT, "o2hb: Add node %d to live nodes "
|
|
|
|
"bitmap\n", slot->ds_node_num);
|
2005-12-16 06:31:23 +08:00
|
|
|
set_bit(slot->ds_node_num, o2hb_live_node_bitmap);
|
|
|
|
|
|
|
|
o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node,
|
|
|
|
slot->ds_node_num);
|
|
|
|
|
|
|
|
changed = 1;
|
2013-09-12 05:20:03 +08:00
|
|
|
queued = 1;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&slot->ds_live_item,
|
|
|
|
&o2hb_live_slots[slot->ds_node_num]);
|
|
|
|
|
|
|
|
slot->ds_equal_samples = 0;
|
2006-05-10 06:09:35 +08:00
|
|
|
|
|
|
|
/* We want to be sure that all nodes agree on the
|
|
|
|
* number of milliseconds before a node will be
|
|
|
|
* considered dead. The self-fencing timeout is
|
|
|
|
* computed from this value, and a discrepancy might
|
|
|
|
* result in heartbeat calling a node dead when it
|
|
|
|
* hasn't self-fenced yet. */
|
|
|
|
slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms);
|
|
|
|
if (slot_dead_ms && slot_dead_ms != dead_ms) {
|
|
|
|
/* TODO: Perhaps we can fail the region here. */
|
|
|
|
mlog(ML_ERROR, "Node %d on device %s has a dead count "
|
|
|
|
"of %u ms, but our count is %u ms.\n"
|
|
|
|
"Please double check your configuration values "
|
|
|
|
"for 'O2CB_HEARTBEAT_THRESHOLD'\n",
|
|
|
|
slot->ds_node_num, reg->hr_dev_name, slot_dead_ms,
|
|
|
|
dead_ms);
|
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if the list is dead, we're done.. */
|
|
|
|
if (list_empty(&slot->ds_live_item))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* live nodes only go dead after enough consequtive missed
|
|
|
|
* samples.. reset the missed counter whenever we see
|
|
|
|
* activity */
|
|
|
|
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
|
|
|
|
mlog(ML_HEARTBEAT, "Node %d left my region\n",
|
|
|
|
slot->ds_node_num);
|
|
|
|
|
2010-10-07 08:55:21 +08:00
|
|
|
clear_bit(slot->ds_node_num, reg->hr_live_node_bitmap);
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
/* last off the live_slot generates a callback */
|
|
|
|
list_del_init(&slot->ds_live_item);
|
|
|
|
if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
|
2010-10-07 09:50:50 +08:00
|
|
|
mlog(ML_HEARTBEAT, "o2hb: Remove node %d from live "
|
|
|
|
"nodes bitmap\n", slot->ds_node_num);
|
2005-12-16 06:31:23 +08:00
|
|
|
clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
|
|
|
|
|
2010-10-08 08:00:16 +08:00
|
|
|
/* node can be null */
|
|
|
|
o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB,
|
|
|
|
node, slot->ds_node_num);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
changed = 1;
|
2013-09-12 05:20:03 +08:00
|
|
|
queued = 1;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't clear this because the node is still
|
|
|
|
* actually writing new blocks. */
|
|
|
|
if (!gen_changed)
|
|
|
|
slot->ds_changed_samples = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (slot->ds_changed_samples) {
|
|
|
|
slot->ds_changed_samples = 0;
|
|
|
|
slot->ds_equal_samples = 0;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
2013-09-12 05:20:03 +08:00
|
|
|
if (queued)
|
|
|
|
o2hb_run_event_list(&event);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2010-10-08 08:00:16 +08:00
|
|
|
if (node)
|
|
|
|
o2nm_node_put(node);
|
2005-12-16 06:31:23 +08:00
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:07:01 +08:00
|
|
|
static int o2hb_highest_node(unsigned long *nodes, int numbits)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2013-11-13 07:07:01 +08:00
|
|
|
return find_last_bit(nodes, numbits);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2018-12-28 16:32:35 +08:00
|
|
|
static int o2hb_lowest_node(unsigned long *nodes, int numbits)
|
|
|
|
{
|
|
|
|
return find_first_bit(nodes, numbits);
|
|
|
|
}
|
|
|
|
|
2006-03-25 06:20:17 +08:00
|
|
|
static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2018-12-28 16:32:35 +08:00
|
|
|
int i, ret, highest_node, lowest_node;
|
2011-07-25 01:21:54 +08:00
|
|
|
int membership_change = 0, own_slot_ok = 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
2010-10-08 08:00:16 +08:00
|
|
|
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
2005-12-16 06:31:23 +08:00
|
|
|
struct o2hb_bio_wait_ctxt write_wc;
|
|
|
|
|
2006-03-25 06:20:17 +08:00
|
|
|
ret = o2nm_configured_node_map(configured_nodes,
|
|
|
|
sizeof(configured_nodes));
|
|
|
|
if (ret) {
|
|
|
|
mlog_errno(ret);
|
2011-07-25 01:21:54 +08:00
|
|
|
goto bail;
|
2006-03-25 06:20:17 +08:00
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2010-10-08 08:00:16 +08:00
|
|
|
/*
|
|
|
|
* If a node is not configured but is in the livemap, we still need
|
|
|
|
* to read the slot so as to be able to remove it from the livemap.
|
|
|
|
*/
|
|
|
|
o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
|
|
|
|
i = -1;
|
|
|
|
while ((i = find_next_bit(live_node_bitmap,
|
|
|
|
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
|
|
|
|
set_bit(i, configured_nodes);
|
|
|
|
}
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
|
2018-12-28 16:32:35 +08:00
|
|
|
lowest_node = o2hb_lowest_node(configured_nodes, O2NM_MAX_NODES);
|
|
|
|
if (highest_node >= O2NM_MAX_NODES || lowest_node >= O2NM_MAX_NODES) {
|
2011-07-25 01:21:54 +08:00
|
|
|
mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto bail;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* No sense in reading the slots of nodes that don't exist
|
|
|
|
* yet. Of course, if the node definitions have holes in them
|
|
|
|
* then we're reading an empty slot anyway... Consider this
|
|
|
|
* best-effort. */
|
2018-12-28 16:32:35 +08:00
|
|
|
ret = o2hb_read_slots(reg, lowest_node, highest_node + 1);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
mlog_errno(ret);
|
2011-07-25 01:21:54 +08:00
|
|
|
goto bail;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* With an up to date view of the slots, we can check that no
|
|
|
|
* other node has been improperly configured to heartbeat in
|
|
|
|
* our slot. */
|
2011-07-25 01:21:54 +08:00
|
|
|
own_slot_ok = o2hb_check_own_slot(reg);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
/* fill in the proper info for our next heartbeat */
|
|
|
|
o2hb_prepare_block(reg, reg->hr_generation);
|
|
|
|
|
2007-01-11 17:58:10 +08:00
|
|
|
ret = o2hb_issue_node_write(reg, &write_wc);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
mlog_errno(ret);
|
2011-07-25 01:21:54 +08:00
|
|
|
goto bail;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
i = -1;
|
2011-05-05 01:28:01 +08:00
|
|
|
while((i = find_next_bit(configured_nodes,
|
|
|
|
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
|
2011-07-25 01:21:54 +08:00
|
|
|
membership_change |= o2hb_check_slot(reg, ®->hr_slots[i]);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to be sure we've advertised ourselves on disk
|
|
|
|
* before we can go to steady state. This ensures that
|
|
|
|
* people we find in our steady state have seen us.
|
|
|
|
*/
|
2017-09-07 07:19:11 +08:00
|
|
|
o2hb_wait_on_io(&write_wc);
|
2006-03-25 06:20:17 +08:00
|
|
|
if (write_wc.wc_error) {
|
|
|
|
/* Do not re-arm the write timeout on I/O error - we
|
|
|
|
* can't be sure that the new block ever made it to
|
|
|
|
* disk */
|
|
|
|
mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
|
|
|
|
write_wc.wc_error, reg->hr_dev_name);
|
2011-07-25 01:21:54 +08:00
|
|
|
ret = write_wc.wc_error;
|
|
|
|
goto bail;
|
2006-03-25 06:20:17 +08:00
|
|
|
}
|
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
/* Skip disarming the timeout if own slot has stale/bad data */
|
|
|
|
if (own_slot_ok) {
|
|
|
|
o2hb_set_quorum_device(reg);
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
o2hb_arm_timeout(reg);
|
2016-05-28 05:27:13 +08:00
|
|
|
reg->hr_last_timeout_start = jiffies;
|
2011-07-25 01:21:54 +08:00
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
bail:
|
2005-12-16 06:31:23 +08:00
|
|
|
/* let the person who launched us know when things are steady */
|
2011-07-25 01:21:54 +08:00
|
|
|
if (atomic_read(®->hr_steady_iterations) != 0) {
|
|
|
|
if (!ret && own_slot_ok && !membership_change) {
|
|
|
|
if (atomic_dec_and_test(®->hr_steady_iterations))
|
|
|
|
wake_up(&o2hb_steady_queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (atomic_read(®->hr_steady_iterations) != 0) {
|
|
|
|
if (atomic_dec_and_test(®->hr_unsteady_iterations)) {
|
|
|
|
printk(KERN_NOTICE "o2hb: Unable to stabilize "
|
2019-07-12 11:52:55 +08:00
|
|
|
"heartbeat on region %s (%s)\n",
|
2011-07-25 01:21:54 +08:00
|
|
|
config_item_name(®->hr_item),
|
|
|
|
reg->hr_dev_name);
|
|
|
|
atomic_set(®->hr_steady_iterations, 0);
|
|
|
|
reg->hr_aborted_start = 1;
|
2005-12-16 06:31:23 +08:00
|
|
|
wake_up(&o2hb_steady_queue);
|
2011-07-25 01:21:54 +08:00
|
|
|
ret = -EIO;
|
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
2006-03-25 06:20:17 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
return ret;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we ride the region ref that the region dir holds. before the region
|
|
|
|
* dir is removed and drops it ref it will wait to tear down this
|
|
|
|
* thread.
|
|
|
|
*/
|
|
|
|
static int o2hb_thread(void *data)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
struct o2hb_region *reg = data;
|
|
|
|
struct o2hb_bio_wait_ctxt write_wc;
|
2015-09-05 06:44:43 +08:00
|
|
|
ktime_t before_hb, after_hb;
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned int elapsed_msec;
|
|
|
|
|
|
|
|
mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
|
|
|
|
|
2014-03-11 18:09:12 +08:00
|
|
|
set_user_nice(current, MIN_NICE);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2010-12-15 06:14:31 +08:00
|
|
|
/* Pin node */
|
2015-11-06 10:44:07 +08:00
|
|
|
ret = o2nm_depend_this_node();
|
|
|
|
if (ret) {
|
|
|
|
mlog(ML_ERROR, "Node has been deleted, ret = %d\n", ret);
|
|
|
|
reg->hr_node_deleted = 1;
|
|
|
|
wake_up(&o2hb_steady_queue);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-12-15 06:14:31 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
while (!kthread_should_stop() &&
|
|
|
|
!reg->hr_unclean_stop && !reg->hr_aborted_start) {
|
2005-12-16 06:31:23 +08:00
|
|
|
/* We track the time spent inside
|
2008-10-17 01:02:37 +08:00
|
|
|
* o2hb_do_disk_heartbeat so that we avoid more than
|
2005-12-16 06:31:23 +08:00
|
|
|
* hr_timeout_ms between disk writes. On busy systems
|
|
|
|
* this should result in a heartbeat which is less
|
|
|
|
* likely to time itself out. */
|
2015-09-05 06:44:43 +08:00
|
|
|
before_hb = ktime_get_real();
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
ret = o2hb_do_disk_heartbeat(reg);
|
2016-05-28 05:27:10 +08:00
|
|
|
reg->hr_last_hb_status = ret;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2015-09-05 06:44:43 +08:00
|
|
|
after_hb = ktime_get_real();
|
|
|
|
|
|
|
|
elapsed_msec = (unsigned int)
|
|
|
|
ktime_ms_delta(after_hb, before_hb);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2009-12-22 10:32:15 +08:00
|
|
|
mlog(ML_HEARTBEAT,
|
2015-09-05 06:44:43 +08:00
|
|
|
"start = %lld, end = %lld, msec = %u, ret = %d\n",
|
2016-12-25 18:38:40 +08:00
|
|
|
before_hb, after_hb, elapsed_msec, ret);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
if (!kthread_should_stop() &&
|
|
|
|
elapsed_msec < reg->hr_timeout_ms) {
|
2005-12-16 06:31:23 +08:00
|
|
|
/* the kthread api has blocked signals for us so no
|
|
|
|
* need to record the return value. */
|
|
|
|
msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
o2hb_disarm_timeout(reg);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
/* unclean stop is only used in very bad situation */
|
|
|
|
for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
|
|
|
|
o2hb_shutdown_slot(®->hr_slots[i]);
|
|
|
|
|
|
|
|
/* Explicit down notification - avoid forcing the other nodes
|
|
|
|
* to timeout on this region when we could just as easily
|
|
|
|
* write a clear generation - thus indicating to them that
|
|
|
|
* this node has left this region.
|
2011-07-25 01:21:54 +08:00
|
|
|
*/
|
|
|
|
if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
|
|
|
|
o2hb_prepare_block(reg, 0);
|
|
|
|
ret = o2hb_issue_node_write(reg, &write_wc);
|
|
|
|
if (ret == 0)
|
2017-09-07 07:19:11 +08:00
|
|
|
o2hb_wait_on_io(&write_wc);
|
2011-07-25 01:21:54 +08:00
|
|
|
else
|
|
|
|
mlog_errno(ret);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2010-12-15 06:14:31 +08:00
|
|
|
/* Unpin node */
|
|
|
|
o2nm_undepend_this_node();
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-12-18 06:17:42 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
static int o2hb_debug_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2010-10-08 08:01:27 +08:00
|
|
|
struct o2hb_debug_buf *db = inode->i_private;
|
2010-10-07 08:55:12 +08:00
|
|
|
struct o2hb_region *reg;
|
2008-12-18 06:17:42 +08:00
|
|
|
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
2011-07-25 01:31:54 +08:00
|
|
|
unsigned long lts;
|
2008-12-18 06:17:42 +08:00
|
|
|
char *buf = NULL;
|
|
|
|
int i = -1;
|
|
|
|
int out = 0;
|
|
|
|
|
2010-10-08 08:01:27 +08:00
|
|
|
/* max_nodes should be the largest bitmap we pass here */
|
|
|
|
BUG_ON(sizeof(map) < db->db_size);
|
|
|
|
|
2008-12-18 06:17:42 +08:00
|
|
|
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
goto bail;
|
|
|
|
|
2010-10-08 08:01:27 +08:00
|
|
|
switch (db->db_type) {
|
|
|
|
case O2HB_DB_TYPE_LIVENODES:
|
2010-10-07 08:55:13 +08:00
|
|
|
case O2HB_DB_TYPE_LIVEREGIONS:
|
|
|
|
case O2HB_DB_TYPE_QUORUMREGIONS:
|
|
|
|
case O2HB_DB_TYPE_FAILEDREGIONS:
|
2010-10-08 08:01:27 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
memcpy(map, db->db_data, db->db_size);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
break;
|
2008-12-18 06:17:42 +08:00
|
|
|
|
2010-10-07 08:55:12 +08:00
|
|
|
case O2HB_DB_TYPE_REGION_LIVENODES:
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
reg = (struct o2hb_region *)db->db_data;
|
|
|
|
memcpy(map, reg->hr_live_node_bitmap, db->db_size);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case O2HB_DB_TYPE_REGION_NUMBER:
|
|
|
|
reg = (struct o2hb_region *)db->db_data;
|
2020-04-02 12:04:05 +08:00
|
|
|
out += scnprintf(buf + out, PAGE_SIZE - out, "%d\n",
|
2010-10-07 08:55:12 +08:00
|
|
|
reg->hr_region_num);
|
|
|
|
goto done;
|
|
|
|
|
2010-10-07 08:55:09 +08:00
|
|
|
case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
|
|
|
|
reg = (struct o2hb_region *)db->db_data;
|
2011-07-25 01:31:54 +08:00
|
|
|
lts = reg->hr_last_timeout_start;
|
|
|
|
/* If 0, it has never been set before */
|
|
|
|
if (lts)
|
|
|
|
lts = jiffies_to_msecs(jiffies - lts);
|
2020-04-02 12:04:05 +08:00
|
|
|
out += scnprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
|
2010-10-07 08:55:09 +08:00
|
|
|
goto done;
|
|
|
|
|
2010-12-15 06:14:30 +08:00
|
|
|
case O2HB_DB_TYPE_REGION_PINNED:
|
|
|
|
reg = (struct o2hb_region *)db->db_data;
|
2020-04-02 12:04:05 +08:00
|
|
|
out += scnprintf(buf + out, PAGE_SIZE - out, "%u\n",
|
2010-12-15 06:14:30 +08:00
|
|
|
!!reg->hr_item_pinned);
|
|
|
|
goto done;
|
|
|
|
|
2010-10-08 08:01:27 +08:00
|
|
|
default:
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len)
|
2020-04-02 12:04:05 +08:00
|
|
|
out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i);
|
|
|
|
out += scnprintf(buf + out, PAGE_SIZE - out, "\n");
|
2008-12-18 06:17:42 +08:00
|
|
|
|
2010-10-08 08:01:27 +08:00
|
|
|
done:
|
2008-12-18 06:17:42 +08:00
|
|
|
i_size_write(inode, out);
|
|
|
|
|
|
|
|
file->private_data = buf;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
bail:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int o2hb_debug_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
kfree(file->private_data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
|
|
|
|
size_t nbytes, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
|
|
|
|
i_size_read(file->f_mapping->host));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int o2hb_debug_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static int o2hb_debug_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
|
|
|
|
size_t nbytes, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
|
2009-10-02 06:43:56 +08:00
|
|
|
static const struct file_operations o2hb_debug_fops = {
|
2008-12-18 06:17:42 +08:00
|
|
|
.open = o2hb_debug_open,
|
|
|
|
.release = o2hb_debug_release,
|
|
|
|
.read = o2hb_debug_read,
|
|
|
|
.llseek = generic_file_llseek,
|
|
|
|
};
|
|
|
|
|
|
|
|
void o2hb_exit(void)
|
|
|
|
{
|
2019-07-12 11:53:12 +08:00
|
|
|
debugfs_remove_recursive(o2hb_debug_dir);
|
2016-02-03 08:57:21 +08:00
|
|
|
kfree(o2hb_db_livenodes);
|
|
|
|
kfree(o2hb_db_liveregions);
|
|
|
|
kfree(o2hb_db_quorumregions);
|
|
|
|
kfree(o2hb_db_failedregions);
|
2010-10-08 08:01:27 +08:00
|
|
|
}
|
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
static void o2hb_debug_create(const char *name, struct dentry *dir,
|
|
|
|
struct o2hb_debug_buf **db, int db_len, int type,
|
|
|
|
int size, int len, void *data)
|
2010-10-08 08:01:27 +08:00
|
|
|
{
|
|
|
|
*db = kmalloc(db_len, GFP_KERNEL);
|
|
|
|
if (!*db)
|
2019-09-24 06:33:15 +08:00
|
|
|
return;
|
2010-10-08 08:01:27 +08:00
|
|
|
|
|
|
|
(*db)->db_type = type;
|
|
|
|
(*db)->db_size = size;
|
|
|
|
(*db)->db_len = len;
|
|
|
|
(*db)->db_data = data;
|
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db, &o2hb_debug_fops);
|
2010-10-08 08:01:27 +08:00
|
|
|
}
|
|
|
|
|
2019-07-12 11:53:12 +08:00
|
|
|
static void o2hb_debug_init(void)
|
2010-10-08 08:01:27 +08:00
|
|
|
{
|
|
|
|
o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL);
|
2010-10-07 08:55:13 +08:00
|
|
|
|
2019-07-12 11:53:12 +08:00
|
|
|
o2hb_debug_create(O2HB_DEBUG_LIVENODES, o2hb_debug_dir,
|
|
|
|
&o2hb_db_livenodes, sizeof(*o2hb_db_livenodes),
|
|
|
|
O2HB_DB_TYPE_LIVENODES, sizeof(o2hb_live_node_bitmap),
|
|
|
|
O2NM_MAX_NODES, o2hb_live_node_bitmap);
|
|
|
|
|
|
|
|
o2hb_debug_create(O2HB_DEBUG_LIVEREGIONS, o2hb_debug_dir,
|
|
|
|
&o2hb_db_liveregions, sizeof(*o2hb_db_liveregions),
|
|
|
|
O2HB_DB_TYPE_LIVEREGIONS,
|
|
|
|
sizeof(o2hb_live_region_bitmap), O2NM_MAX_REGIONS,
|
|
|
|
o2hb_live_region_bitmap);
|
|
|
|
|
|
|
|
o2hb_debug_create(O2HB_DEBUG_QUORUMREGIONS, o2hb_debug_dir,
|
|
|
|
&o2hb_db_quorumregions,
|
|
|
|
sizeof(*o2hb_db_quorumregions),
|
|
|
|
O2HB_DB_TYPE_QUORUMREGIONS,
|
|
|
|
sizeof(o2hb_quorum_region_bitmap), O2NM_MAX_REGIONS,
|
|
|
|
o2hb_quorum_region_bitmap);
|
|
|
|
|
|
|
|
o2hb_debug_create(O2HB_DEBUG_FAILEDREGIONS, o2hb_debug_dir,
|
|
|
|
&o2hb_db_failedregions,
|
|
|
|
sizeof(*o2hb_db_failedregions),
|
|
|
|
O2HB_DB_TYPE_FAILEDREGIONS,
|
|
|
|
sizeof(o2hb_failed_region_bitmap), O2NM_MAX_REGIONS,
|
|
|
|
o2hb_failed_region_bitmap);
|
2008-12-18 06:17:42 +08:00
|
|
|
}
|
|
|
|
|
2019-07-12 11:53:12 +08:00
|
|
|
void o2hb_init(void)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++)
|
|
|
|
INIT_LIST_HEAD(&o2hb_callbacks[i].list);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++)
|
|
|
|
INIT_LIST_HEAD(&o2hb_live_slots[i]);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&o2hb_node_events);
|
|
|
|
|
|
|
|
memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap));
|
2010-10-08 08:03:07 +08:00
|
|
|
memset(o2hb_region_bitmap, 0, sizeof(o2hb_region_bitmap));
|
2010-10-07 08:55:18 +08:00
|
|
|
memset(o2hb_live_region_bitmap, 0, sizeof(o2hb_live_region_bitmap));
|
2010-10-07 08:55:16 +08:00
|
|
|
memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap));
|
2010-10-08 08:05:52 +08:00
|
|
|
memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap));
|
2008-12-18 06:17:42 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
o2hb_dependent_users = 0;
|
|
|
|
|
2019-07-12 11:53:12 +08:00
|
|
|
o2hb_debug_init();
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if we're already in a callback then we're already serialized by the sem */
|
|
|
|
static void o2hb_fill_node_map_from_callback(unsigned long *map,
|
|
|
|
unsigned bytes)
|
|
|
|
{
|
|
|
|
BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
|
|
|
|
|
|
|
|
memcpy(map, &o2hb_live_node_bitmap, bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get a map of all nodes that are heartbeating in any regions
|
|
|
|
*/
|
|
|
|
void o2hb_fill_node_map(unsigned long *map, unsigned bytes)
|
|
|
|
{
|
|
|
|
/* callers want to serialize this map and callbacks so that they
|
|
|
|
* can trust that they don't miss nodes coming to the party */
|
|
|
|
down_read(&o2hb_callback_sem);
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
o2hb_fill_node_map_from_callback(map, bytes);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
up_read(&o2hb_callback_sem);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_fill_node_map);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* heartbeat configfs bits. The heartbeat set is a default set under
|
|
|
|
* the cluster set in nodemanager.c.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct o2hb_region *to_o2hb_region(struct config_item *item)
|
|
|
|
{
|
|
|
|
return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* drop_item only drops its ref after killing the thread, nothing should
|
|
|
|
* be using the region anymore. this has to clean up any state that
|
|
|
|
* attributes might have built up. */
|
|
|
|
static void o2hb_region_release(struct config_item *item)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct page *page;
|
|
|
|
struct o2hb_region *reg = to_o2hb_region(item);
|
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
|
|
|
|
|
2013-02-22 08:42:44 +08:00
|
|
|
kfree(reg->hr_tmp_block);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
if (reg->hr_slot_data) {
|
|
|
|
for (i = 0; i < reg->hr_num_pages; i++) {
|
|
|
|
page = reg->hr_slot_data[i];
|
|
|
|
if (page)
|
|
|
|
__free_page(page);
|
|
|
|
}
|
|
|
|
kfree(reg->hr_slot_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reg->hr_bdev)
|
2008-02-23 09:40:24 +08:00
|
|
|
blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2013-02-22 08:42:44 +08:00
|
|
|
kfree(reg->hr_slots);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
debugfs_remove_recursive(reg->hr_debug_dir);
|
2016-02-03 08:57:21 +08:00
|
|
|
kfree(reg->hr_db_livenodes);
|
|
|
|
kfree(reg->hr_db_regnum);
|
2016-03-26 05:20:50 +08:00
|
|
|
kfree(reg->hr_db_elapsed_time);
|
|
|
|
kfree(reg->hr_db_pinned);
|
2010-10-07 08:55:12 +08:00
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
list_del(®->hr_all_item);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
2016-05-28 05:27:01 +08:00
|
|
|
o2net_unregister_handler_list(®->hr_handler_list);
|
2005-12-16 06:31:23 +08:00
|
|
|
kfree(reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int o2hb_read_block_input(struct o2hb_region *reg,
|
|
|
|
const char *page,
|
|
|
|
unsigned long *ret_bytes,
|
|
|
|
unsigned int *ret_bits)
|
|
|
|
{
|
|
|
|
unsigned long bytes;
|
|
|
|
char *p = (char *)page;
|
|
|
|
|
|
|
|
bytes = simple_strtoul(p, &p, 0);
|
|
|
|
if (!p || (*p && (*p != '\n')))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Heartbeat and fs min / max block sizes are the same. */
|
|
|
|
if (bytes > 4096 || bytes < 512)
|
|
|
|
return -ERANGE;
|
|
|
|
if (hweight16(bytes) != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (ret_bytes)
|
|
|
|
*ret_bytes = bytes;
|
|
|
|
if (ret_bits)
|
|
|
|
*ret_bits = ffs(bytes) - 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_block_bytes_show(struct config_item *item,
|
2005-12-16 06:31:23 +08:00
|
|
|
char *page)
|
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
return sprintf(page, "%u\n", to_o2hb_region(item)->hr_block_bytes);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_block_bytes_store(struct config_item *item,
|
2005-12-16 06:31:23 +08:00
|
|
|
const char *page,
|
|
|
|
size_t count)
|
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
struct o2hb_region *reg = to_o2hb_region(item);
|
2005-12-16 06:31:23 +08:00
|
|
|
int status;
|
|
|
|
unsigned long block_bytes;
|
|
|
|
unsigned int block_bits;
|
|
|
|
|
|
|
|
if (reg->hr_bdev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-20 08:09:50 +08:00
|
|
|
status = o2hb_read_block_input(reg, page, &block_bytes,
|
|
|
|
&block_bits);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
reg->hr_block_bytes = (unsigned int)block_bytes;
|
|
|
|
reg->hr_block_bits = block_bits;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_start_block_show(struct config_item *item,
|
2005-12-16 06:31:23 +08:00
|
|
|
char *page)
|
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
return sprintf(page, "%llu\n", to_o2hb_region(item)->hr_start_block);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_start_block_store(struct config_item *item,
|
2005-12-16 06:31:23 +08:00
|
|
|
const char *page,
|
|
|
|
size_t count)
|
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
struct o2hb_region *reg = to_o2hb_region(item);
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned long long tmp;
|
|
|
|
char *p = (char *)page;
|
|
|
|
|
|
|
|
if (reg->hr_bdev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tmp = simple_strtoull(p, &p, 0);
|
|
|
|
if (!p || (*p && (*p != '\n')))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
reg->hr_start_block = tmp;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_blocks_show(struct config_item *item, char *page)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
return sprintf(page, "%d\n", to_o2hb_region(item)->hr_blocks);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_blocks_store(struct config_item *item,
|
2005-12-16 06:31:23 +08:00
|
|
|
const char *page,
|
|
|
|
size_t count)
|
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
struct o2hb_region *reg = to_o2hb_region(item);
|
2005-12-16 06:31:23 +08:00
|
|
|
unsigned long tmp;
|
|
|
|
char *p = (char *)page;
|
|
|
|
|
|
|
|
if (reg->hr_bdev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tmp = simple_strtoul(p, &p, 0);
|
|
|
|
if (!p || (*p && (*p != '\n')))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (tmp > O2NM_MAX_NODES || tmp == 0)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
reg->hr_blocks = (unsigned int)tmp;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
unsigned int ret = 0;
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
if (to_o2hb_region(item)->hr_bdev)
|
|
|
|
ret = sprintf(page, "%s\n", to_o2hb_region(item)->hr_dev_name);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void o2hb_init_region_params(struct o2hb_region *reg)
|
|
|
|
{
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
|
2005-12-16 06:31:23 +08:00
|
|
|
reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
|
|
|
|
|
|
|
|
mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
|
|
|
|
reg->hr_start_block, reg->hr_blocks);
|
|
|
|
mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n",
|
|
|
|
reg->hr_block_bytes, reg->hr_block_bits);
|
|
|
|
mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms);
|
|
|
|
mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int o2hb_map_slot_data(struct o2hb_region *reg)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
unsigned int last_slot;
|
|
|
|
unsigned int spp = reg->hr_slots_per_page;
|
|
|
|
struct page *page;
|
|
|
|
char *raw;
|
|
|
|
struct o2hb_disk_slot *slot;
|
|
|
|
|
|
|
|
reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
|
2015-09-05 06:43:46 +08:00
|
|
|
if (reg->hr_tmp_block == NULL)
|
2005-12-16 06:31:23 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
reg->hr_slots = kcalloc(reg->hr_blocks,
|
|
|
|
sizeof(struct o2hb_disk_slot), GFP_KERNEL);
|
2015-09-05 06:43:46 +08:00
|
|
|
if (reg->hr_slots == NULL)
|
2005-12-16 06:31:23 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for(i = 0; i < reg->hr_blocks; i++) {
|
|
|
|
slot = ®->hr_slots[i];
|
|
|
|
slot->ds_node_num = i;
|
|
|
|
INIT_LIST_HEAD(&slot->ds_live_item);
|
|
|
|
slot->ds_raw_block = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp;
|
|
|
|
mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks "
|
|
|
|
"at %u blocks per page\n",
|
|
|
|
reg->hr_num_pages, reg->hr_blocks, spp);
|
|
|
|
|
|
|
|
reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
|
|
|
|
GFP_KERNEL);
|
2015-09-05 06:43:46 +08:00
|
|
|
if (!reg->hr_slot_data)
|
2005-12-16 06:31:23 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for(i = 0; i < reg->hr_num_pages; i++) {
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
2015-09-05 06:43:46 +08:00
|
|
|
if (!page)
|
2005-12-16 06:31:23 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
reg->hr_slot_data[i] = page;
|
|
|
|
|
|
|
|
last_slot = i * spp;
|
|
|
|
raw = page_address(page);
|
|
|
|
for (j = 0;
|
|
|
|
(j < spp) && ((j + last_slot) < reg->hr_blocks);
|
|
|
|
j++) {
|
|
|
|
BUG_ON((j + last_slot) >= reg->hr_blocks);
|
|
|
|
|
|
|
|
slot = ®->hr_slots[j + last_slot];
|
|
|
|
slot->ds_raw_block =
|
|
|
|
(struct o2hb_disk_heartbeat_block *) raw;
|
|
|
|
|
|
|
|
raw += reg->hr_block_bytes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read in all the slots available and populate the tracking
|
|
|
|
* structures so that we can start with a baseline idea of what's
|
|
|
|
* there. */
|
|
|
|
static int o2hb_populate_slot_data(struct o2hb_region *reg)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
struct o2hb_disk_slot *slot;
|
|
|
|
struct o2hb_disk_heartbeat_block *hb_block;
|
|
|
|
|
2018-12-28 16:32:35 +08:00
|
|
|
ret = o2hb_read_slots(reg, 0, reg->hr_blocks);
|
2015-09-05 06:43:46 +08:00
|
|
|
if (ret)
|
2005-12-16 06:31:23 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* We only want to get an idea of the values initially in each
|
|
|
|
* slot, so we do no verification - o2hb_check_slot will
|
|
|
|
* actually determine if each configured slot is valid and
|
|
|
|
* whether any values have changed. */
|
|
|
|
for(i = 0; i < reg->hr_blocks; i++) {
|
|
|
|
slot = ®->hr_slots[i];
|
|
|
|
hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block;
|
|
|
|
|
|
|
|
/* Only fill the values that o2hb_check_slot uses to
|
|
|
|
* determine changing slots */
|
|
|
|
slot->ds_last_time = le64_to_cpu(hb_block->hb_seq);
|
|
|
|
slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_dev_store(struct config_item *item,
|
2005-12-16 06:31:23 +08:00
|
|
|
const char *page,
|
|
|
|
size_t count)
|
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
struct o2hb_region *reg = to_o2hb_region(item);
|
2007-02-03 19:04:20 +08:00
|
|
|
struct task_struct *hb_task;
|
2005-12-16 06:31:23 +08:00
|
|
|
long fd;
|
|
|
|
int sectsize;
|
|
|
|
char *p = (char *)page;
|
2012-08-29 00:52:22 +08:00
|
|
|
struct fd f;
|
2005-12-16 06:31:23 +08:00
|
|
|
ssize_t ret = -EINVAL;
|
2011-05-05 01:28:00 +08:00
|
|
|
int live_threshold;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
if (reg->hr_bdev)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* We can't heartbeat without having had our node number
|
|
|
|
* configured yet. */
|
|
|
|
if (o2nm_this_node() == O2NM_MAX_NODES)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fd = simple_strtol(p, &p, 0);
|
|
|
|
if (!p || (*p && (*p != '\n')))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (fd < 0 || fd >= INT_MAX)
|
|
|
|
goto out;
|
|
|
|
|
2012-08-29 00:52:22 +08:00
|
|
|
f = fdget(fd);
|
|
|
|
if (f.file == NULL)
|
2005-12-16 06:31:23 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
|
|
|
|
reg->hr_block_bytes == 0)
|
2012-08-29 00:52:22 +08:00
|
|
|
goto out2;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2020-09-21 15:19:53 +08:00
|
|
|
if (!S_ISBLK(f.file->f_mapping->host->i_mode))
|
2012-08-29 00:52:22 +08:00
|
|
|
goto out2;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2020-09-21 15:19:53 +08:00
|
|
|
reg->hr_bdev = blkdev_get_by_dev(f.file->f_mapping->host->i_rdev,
|
|
|
|
FMODE_WRITE | FMODE_READ, NULL);
|
|
|
|
if (IS_ERR(reg->hr_bdev)) {
|
|
|
|
ret = PTR_ERR(reg->hr_bdev);
|
2005-12-16 06:31:23 +08:00
|
|
|
reg->hr_bdev = NULL;
|
2020-09-21 15:19:53 +08:00
|
|
|
goto out2;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bdevname(reg->hr_bdev, reg->hr_dev_name);
|
|
|
|
|
2009-05-23 05:17:49 +08:00
|
|
|
sectsize = bdev_logical_block_size(reg->hr_bdev);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (sectsize != reg->hr_block_bytes) {
|
|
|
|
mlog(ML_ERROR,
|
|
|
|
"blocksize %u incorrect for device, expected %d",
|
|
|
|
reg->hr_block_bytes, sectsize);
|
|
|
|
ret = -EINVAL;
|
2012-08-29 00:52:22 +08:00
|
|
|
goto out3;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
o2hb_init_region_params(reg);
|
|
|
|
|
|
|
|
/* Generation of zero is invalid */
|
|
|
|
do {
|
|
|
|
get_random_bytes(®->hr_generation,
|
|
|
|
sizeof(reg->hr_generation));
|
|
|
|
} while (reg->hr_generation == 0);
|
|
|
|
|
|
|
|
ret = o2hb_map_slot_data(reg);
|
|
|
|
if (ret) {
|
|
|
|
mlog_errno(ret);
|
2012-08-29 00:52:22 +08:00
|
|
|
goto out3;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = o2hb_populate_slot_data(reg);
|
|
|
|
if (ret) {
|
|
|
|
mlog_errno(ret);
|
2012-08-29 00:52:22 +08:00
|
|
|
goto out3;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout);
|
ocfs2: o2hb: add negotiate timer
This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.
With this patch set, all nodes will keep going until storage back
online, except if the following issue happens, then all nodes will do as
before to fence self.
1. io error got
2. network between nodes down
3. nodes panic
This patch (of 6):
When storage down, all nodes will fence self due to write timeout. The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.
Negotiate timer working in the following way:
1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.
2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will
re-queue write timeout timer and negotiate timer. For any node doesn't
receive this message or meets some issue when handling this message, it
will be fenced. If storage up at any time, o2hb_thread will run and
re-queue all the timer, nothing will be affected by these two steps.
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Reviewed-by: Mark Fasheh <mfasheh@suse.de>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:26:58 +08:00
|
|
|
INIT_DELAYED_WORK(®->hr_nego_timeout_work, o2hb_nego_timeout);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A node is considered live after it has beat LIVE_THRESHOLD
|
|
|
|
* times. We're not steady until we've given them a chance
|
|
|
|
* _after_ our first read.
|
2011-05-05 01:28:00 +08:00
|
|
|
* The default threshold is bare minimum so as to limit the delay
|
|
|
|
* during mounts. For global heartbeat, the threshold doubled for the
|
|
|
|
* first region.
|
2005-12-16 06:31:23 +08:00
|
|
|
*/
|
2011-05-05 01:28:00 +08:00
|
|
|
live_threshold = O2HB_LIVE_THRESHOLD;
|
|
|
|
if (o2hb_global_heartbeat_active()) {
|
|
|
|
spin_lock(&o2hb_live_lock);
|
2013-11-13 07:06:58 +08:00
|
|
|
if (bitmap_weight(o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
|
2011-05-05 01:28:00 +08:00
|
|
|
live_threshold <<= 1;
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
}
|
2011-07-25 01:21:54 +08:00
|
|
|
++live_threshold;
|
|
|
|
atomic_set(®->hr_steady_iterations, live_threshold);
|
2016-01-15 07:17:15 +08:00
|
|
|
/* unsteady_iterations is triple the steady_iterations */
|
|
|
|
atomic_set(®->hr_unsteady_iterations, (live_threshold * 3));
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2007-02-03 19:04:20 +08:00
|
|
|
hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
|
|
|
|
reg->hr_item.ci_name);
|
|
|
|
if (IS_ERR(hb_task)) {
|
|
|
|
ret = PTR_ERR(hb_task);
|
2005-12-16 06:31:23 +08:00
|
|
|
mlog_errno(ret);
|
2012-08-29 00:52:22 +08:00
|
|
|
goto out3;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2007-02-03 19:04:20 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
reg->hr_task = hb_task;
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
ret = wait_event_interruptible(o2hb_steady_queue,
|
2015-11-06 10:44:07 +08:00
|
|
|
atomic_read(®->hr_steady_iterations) == 0 ||
|
|
|
|
reg->hr_node_deleted);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (ret) {
|
2011-07-25 01:21:54 +08:00
|
|
|
atomic_set(®->hr_steady_iterations, 0);
|
|
|
|
reg->hr_aborted_start = 1;
|
|
|
|
}
|
2007-02-03 19:04:20 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
if (reg->hr_aborted_start) {
|
|
|
|
ret = -EIO;
|
2012-08-29 00:52:22 +08:00
|
|
|
goto out3;
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2015-11-06 10:44:07 +08:00
|
|
|
if (reg->hr_node_deleted) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out3;
|
|
|
|
}
|
|
|
|
|
2007-02-07 07:45:39 +08:00
|
|
|
/* Ok, we were woken. Make sure it wasn't by drop_item() */
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
hb_task = reg->hr_task;
|
2010-10-07 08:55:18 +08:00
|
|
|
if (o2hb_global_heartbeat_active())
|
|
|
|
set_bit(reg->hr_region_num, o2hb_live_region_bitmap);
|
2007-02-07 07:45:39 +08:00
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
if (hb_task)
|
|
|
|
ret = count;
|
|
|
|
else
|
|
|
|
ret = -EIO;
|
|
|
|
|
2010-10-07 09:26:59 +08:00
|
|
|
if (hb_task && o2hb_global_heartbeat_active())
|
2011-07-25 01:21:54 +08:00
|
|
|
printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
|
|
|
|
config_item_name(®->hr_item), reg->hr_dev_name);
|
2010-10-07 09:26:59 +08:00
|
|
|
|
2012-08-29 00:52:22 +08:00
|
|
|
out3:
|
2020-09-21 15:19:53 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
blkdev_put(reg->hr_bdev, FMODE_READ | FMODE_WRITE);
|
|
|
|
reg->hr_bdev = NULL;
|
|
|
|
}
|
2012-08-29 00:52:22 +08:00
|
|
|
out2:
|
|
|
|
fdput(f);
|
2005-12-16 06:31:23 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_region_pid_show(struct config_item *item, char *page)
|
2006-12-08 15:48:17 +08:00
|
|
|
{
|
2015-10-03 21:32:58 +08:00
|
|
|
struct o2hb_region *reg = to_o2hb_region(item);
|
2007-02-03 19:04:20 +08:00
|
|
|
pid_t pid = 0;
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
if (reg->hr_task)
|
2007-10-19 14:40:40 +08:00
|
|
|
pid = task_pid_nr(reg->hr_task);
|
2007-02-03 19:04:20 +08:00
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
if (!pid)
|
2006-12-08 15:48:17 +08:00
|
|
|
return 0;
|
|
|
|
|
2007-02-03 19:04:20 +08:00
|
|
|
return sprintf(page, "%u\n", pid);
|
2006-12-08 15:48:17 +08:00
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
CONFIGFS_ATTR(o2hb_region_, block_bytes);
|
|
|
|
CONFIGFS_ATTR(o2hb_region_, start_block);
|
|
|
|
CONFIGFS_ATTR(o2hb_region_, blocks);
|
|
|
|
CONFIGFS_ATTR(o2hb_region_, dev);
|
|
|
|
CONFIGFS_ATTR_RO(o2hb_region_, pid);
|
2006-12-08 15:48:17 +08:00
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
static struct configfs_attribute *o2hb_region_attrs[] = {
|
2015-10-03 21:32:58 +08:00
|
|
|
&o2hb_region_attr_block_bytes,
|
|
|
|
&o2hb_region_attr_start_block,
|
|
|
|
&o2hb_region_attr_blocks,
|
|
|
|
&o2hb_region_attr_dev,
|
|
|
|
&o2hb_region_attr_pid,
|
2005-12-16 06:31:23 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct configfs_item_operations o2hb_region_item_ops = {
|
|
|
|
.release = o2hb_region_release,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:44 +08:00
|
|
|
static const struct config_item_type o2hb_region_type = {
|
2005-12-16 06:31:23 +08:00
|
|
|
.ct_item_ops = &o2hb_region_item_ops,
|
|
|
|
.ct_attrs = o2hb_region_attrs,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* heartbeat set */
|
|
|
|
|
|
|
|
struct o2hb_heartbeat_group {
|
|
|
|
struct config_group hs_group;
|
|
|
|
/* some stuff? */
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group)
|
|
|
|
{
|
|
|
|
return group ?
|
|
|
|
container_of(group, struct o2hb_heartbeat_group, hs_group)
|
|
|
|
: NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
static void o2hb_debug_region_init(struct o2hb_region *reg,
|
|
|
|
struct dentry *parent)
|
2010-10-07 08:55:12 +08:00
|
|
|
{
|
2019-09-24 06:33:15 +08:00
|
|
|
struct dentry *dir;
|
2010-10-07 08:55:12 +08:00
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
dir = debugfs_create_dir(config_item_name(®->hr_item), parent);
|
|
|
|
reg->hr_debug_dir = dir;
|
2010-10-07 08:55:12 +08:00
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
o2hb_debug_create(O2HB_DEBUG_LIVENODES, dir, &(reg->hr_db_livenodes),
|
|
|
|
sizeof(*(reg->hr_db_livenodes)),
|
|
|
|
O2HB_DB_TYPE_REGION_LIVENODES,
|
|
|
|
sizeof(reg->hr_live_node_bitmap), O2NM_MAX_NODES,
|
|
|
|
reg);
|
2010-10-07 08:55:12 +08:00
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, dir, &(reg->hr_db_regnum),
|
|
|
|
sizeof(*(reg->hr_db_regnum)),
|
|
|
|
O2HB_DB_TYPE_REGION_NUMBER, 0, O2NM_MAX_NODES, reg);
|
2010-10-07 08:55:12 +08:00
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME, dir,
|
|
|
|
&(reg->hr_db_elapsed_time),
|
|
|
|
sizeof(*(reg->hr_db_elapsed_time)),
|
|
|
|
O2HB_DB_TYPE_REGION_ELAPSED_TIME, 0, 0, reg);
|
2010-10-07 08:55:09 +08:00
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, dir, &(reg->hr_db_pinned),
|
|
|
|
sizeof(*(reg->hr_db_pinned)),
|
|
|
|
O2HB_DB_TYPE_REGION_PINNED, 0, 0, reg);
|
2010-12-15 06:14:30 +08:00
|
|
|
|
2010-10-07 08:55:12 +08:00
|
|
|
}
|
|
|
|
|
2008-07-18 05:53:48 +08:00
|
|
|
static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
|
|
|
|
const char *name)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
struct o2hb_region *reg = NULL;
|
2010-10-07 08:55:12 +08:00
|
|
|
int ret;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2006-12-13 16:34:52 +08:00
|
|
|
reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL);
|
2008-07-18 05:53:48 +08:00
|
|
|
if (reg == NULL)
|
2008-07-18 06:21:29 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2010-11-06 17:06:52 +08:00
|
|
|
if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) {
|
|
|
|
ret = -ENAMETOOLONG;
|
|
|
|
goto free;
|
|
|
|
}
|
2010-10-08 05:31:06 +08:00
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
2010-10-08 08:03:07 +08:00
|
|
|
reg->hr_region_num = 0;
|
|
|
|
if (o2hb_global_heartbeat_active()) {
|
|
|
|
reg->hr_region_num = find_first_zero_bit(o2hb_region_bitmap,
|
|
|
|
O2NM_MAX_REGIONS);
|
|
|
|
if (reg->hr_region_num >= O2NM_MAX_REGIONS) {
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
2010-11-06 17:06:52 +08:00
|
|
|
ret = -EFBIG;
|
|
|
|
goto free;
|
2010-10-08 08:03:07 +08:00
|
|
|
}
|
|
|
|
set_bit(reg->hr_region_num, o2hb_region_bitmap);
|
|
|
|
}
|
2005-12-16 06:31:23 +08:00
|
|
|
list_add_tail(®->hr_all_item, &o2hb_all_regions);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
2010-10-08 08:03:07 +08:00
|
|
|
config_item_init_type_name(®->hr_item, name, &o2hb_region_type);
|
|
|
|
|
2016-05-28 05:27:01 +08:00
|
|
|
/* this is the same way to generate msg key as dlm, for local heartbeat,
|
|
|
|
* name is also the same, so make initial crc value different to avoid
|
|
|
|
* message key conflict.
|
|
|
|
*/
|
|
|
|
reg->hr_key = crc32_le(reg->hr_region_num + O2NM_MAX_REGIONS,
|
|
|
|
name, strlen(name));
|
|
|
|
INIT_LIST_HEAD(®->hr_handler_list);
|
|
|
|
ret = o2net_register_handler(O2HB_NEGO_TIMEOUT_MSG, reg->hr_key,
|
|
|
|
sizeof(struct o2hb_nego_msg),
|
|
|
|
o2hb_nego_timeout_handler,
|
|
|
|
reg, NULL, ®->hr_handler_list);
|
|
|
|
if (ret)
|
2021-02-25 04:00:41 +08:00
|
|
|
goto remove_item;
|
2016-05-28 05:27:01 +08:00
|
|
|
|
2016-05-28 05:27:04 +08:00
|
|
|
ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
|
|
|
|
sizeof(struct o2hb_nego_msg),
|
|
|
|
o2hb_nego_approve_handler,
|
|
|
|
reg, NULL, ®->hr_handler_list);
|
|
|
|
if (ret)
|
|
|
|
goto unregister_handler;
|
|
|
|
|
2019-09-24 06:33:15 +08:00
|
|
|
o2hb_debug_region_init(reg, o2hb_debug_dir);
|
2010-10-07 08:55:12 +08:00
|
|
|
|
2008-07-18 06:21:29 +08:00
|
|
|
return ®->hr_item;
|
2016-05-28 05:27:01 +08:00
|
|
|
|
|
|
|
unregister_handler:
|
|
|
|
o2net_unregister_handler_list(®->hr_handler_list);
|
2021-02-25 04:00:41 +08:00
|
|
|
remove_item:
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
list_del(®->hr_all_item);
|
|
|
|
if (o2hb_global_heartbeat_active())
|
|
|
|
clear_bit(reg->hr_region_num, o2hb_region_bitmap);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
2010-11-06 17:06:52 +08:00
|
|
|
free:
|
|
|
|
kfree(reg);
|
|
|
|
return ERR_PTR(ret);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void o2hb_heartbeat_group_drop_item(struct config_group *group,
|
|
|
|
struct config_item *item)
|
|
|
|
{
|
2007-02-03 19:04:20 +08:00
|
|
|
struct task_struct *hb_task;
|
2005-12-16 06:31:23 +08:00
|
|
|
struct o2hb_region *reg = to_o2hb_region(item);
|
2010-12-15 06:14:29 +08:00
|
|
|
int quorum_region = 0;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
/* stop the thread when the user removes the region dir */
|
2007-02-03 19:04:20 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
hb_task = reg->hr_task;
|
|
|
|
reg->hr_task = NULL;
|
2010-12-15 06:14:29 +08:00
|
|
|
reg->hr_item_dropped = 1;
|
2007-02-03 19:04:20 +08:00
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
if (hb_task)
|
|
|
|
kthread_stop(hb_task);
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2011-07-25 01:21:54 +08:00
|
|
|
if (o2hb_global_heartbeat_active()) {
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
clear_bit(reg->hr_region_num, o2hb_region_bitmap);
|
|
|
|
clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
|
|
|
|
if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
|
|
|
|
quorum_region = 1;
|
|
|
|
clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
|
|
|
|
((atomic_read(®->hr_steady_iterations) == 0) ?
|
|
|
|
"stopped" : "start aborted"), config_item_name(item),
|
|
|
|
reg->hr_dev_name);
|
|
|
|
}
|
|
|
|
|
2007-02-07 07:45:39 +08:00
|
|
|
/*
|
|
|
|
* If we're racing a dev_write(), we need to wake them. They will
|
|
|
|
* check reg->hr_task
|
|
|
|
*/
|
|
|
|
if (atomic_read(®->hr_steady_iterations) != 0) {
|
2011-07-25 01:21:54 +08:00
|
|
|
reg->hr_aborted_start = 1;
|
2007-02-07 07:45:39 +08:00
|
|
|
atomic_set(®->hr_steady_iterations, 0);
|
|
|
|
wake_up(&o2hb_steady_queue);
|
|
|
|
}
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
config_item_put(item);
|
2010-12-15 06:14:29 +08:00
|
|
|
|
|
|
|
if (!o2hb_global_heartbeat_active() || !quorum_region)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If global heartbeat active and there are dependent users,
|
|
|
|
* pin all regions if quorum region count <= CUT_OFF
|
|
|
|
*/
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
if (!o2hb_dependent_users)
|
|
|
|
goto unlock;
|
|
|
|
|
2013-11-13 07:06:58 +08:00
|
|
|
if (bitmap_weight(o2hb_quorum_region_bitmap,
|
2010-12-15 06:14:29 +08:00
|
|
|
O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
|
|
|
|
o2hb_region_pin(NULL);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
2005-12-16 06:31:23 +08:00
|
|
|
}
|
|
|
|
|
2017-05-04 05:51:41 +08:00
|
|
|
static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
|
2015-10-03 21:32:58 +08:00
|
|
|
char *page)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
return sprintf(page, "%u\n", o2hb_dead_threshold);
|
|
|
|
}
|
|
|
|
|
2017-05-04 05:51:41 +08:00
|
|
|
static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
|
2015-10-03 21:32:58 +08:00
|
|
|
const char *page, size_t count)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
unsigned long tmp;
|
|
|
|
char *p = (char *)page;
|
|
|
|
|
|
|
|
tmp = simple_strtoul(p, &p, 10);
|
|
|
|
if (!p || (*p && (*p != '\n')))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* this will validate ranges for us. */
|
|
|
|
o2hb_dead_threshold_set((unsigned int) tmp);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_heartbeat_group_mode_show(struct config_item *item,
|
|
|
|
char *page)
|
2010-10-08 06:26:08 +08:00
|
|
|
{
|
|
|
|
return sprintf(page, "%s\n",
|
|
|
|
o2hb_heartbeat_mode_desc[o2hb_heartbeat_mode]);
|
|
|
|
}
|
|
|
|
|
2015-10-03 21:32:58 +08:00
|
|
|
static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
2010-10-08 06:26:08 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
len = (page[count - 1] == '\n') ? count - 1 : count;
|
|
|
|
if (!len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
for (i = 0; i < O2HB_HEARTBEAT_NUM_MODES; ++i) {
|
2014-10-14 06:54:37 +08:00
|
|
|
if (strncasecmp(page, o2hb_heartbeat_mode_desc[i], len))
|
2010-10-08 06:26:08 +08:00
|
|
|
continue;
|
|
|
|
|
2013-07-04 06:01:06 +08:00
|
|
|
ret = o2hb_global_heartbeat_mode_set(i);
|
2010-10-08 06:26:08 +08:00
|
|
|
if (!ret)
|
2010-10-07 09:26:59 +08:00
|
|
|
printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n",
|
2010-10-08 06:26:08 +08:00
|
|
|
o2hb_heartbeat_mode_desc[i]);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-05-04 05:51:41 +08:00
|
|
|
CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
|
2015-10-03 21:32:58 +08:00
|
|
|
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
|
2010-10-08 06:26:08 +08:00
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
|
2017-05-04 05:51:41 +08:00
|
|
|
&o2hb_heartbeat_group_attr_dead_threshold,
|
2015-10-03 21:32:58 +08:00
|
|
|
&o2hb_heartbeat_group_attr_mode,
|
2005-12-16 06:31:23 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
|
|
|
|
.make_item = o2hb_heartbeat_group_make_item,
|
|
|
|
.drop_item = o2hb_heartbeat_group_drop_item,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:44 +08:00
|
|
|
static const struct config_item_type o2hb_heartbeat_group_type = {
|
2005-12-16 06:31:23 +08:00
|
|
|
.ct_group_ops = &o2hb_heartbeat_group_group_ops,
|
|
|
|
.ct_attrs = o2hb_heartbeat_group_attrs,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* this is just here to avoid touching group in heartbeat.h which the
|
|
|
|
* entire damn world #includes */
|
|
|
|
struct config_group *o2hb_alloc_hb_set(void)
|
|
|
|
{
|
|
|
|
struct o2hb_heartbeat_group *hs = NULL;
|
|
|
|
struct config_group *ret = NULL;
|
|
|
|
|
2006-12-13 16:34:52 +08:00
|
|
|
hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
|
2005-12-16 06:31:23 +08:00
|
|
|
if (hs == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
config_group_init_type_name(&hs->hs_group, "heartbeat",
|
|
|
|
&o2hb_heartbeat_group_type);
|
|
|
|
|
|
|
|
ret = &hs->hs_group;
|
|
|
|
out:
|
|
|
|
if (ret == NULL)
|
|
|
|
kfree(hs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void o2hb_free_hb_set(struct config_group *group)
|
|
|
|
{
|
|
|
|
struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group);
|
|
|
|
kfree(hs);
|
|
|
|
}
|
|
|
|
|
2011-03-31 09:57:33 +08:00
|
|
|
/* hb callback registration and issuing */
|
2005-12-16 06:31:23 +08:00
|
|
|
|
|
|
|
static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
|
|
|
|
{
|
|
|
|
if (type == O2HB_NUM_CB)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
return &o2hb_callbacks[type];
|
|
|
|
}
|
|
|
|
|
|
|
|
void o2hb_setup_callback(struct o2hb_callback_func *hc,
|
|
|
|
enum o2hb_callback_type type,
|
|
|
|
o2hb_cb_func *func,
|
|
|
|
void *data,
|
|
|
|
int priority)
|
|
|
|
{
|
|
|
|
INIT_LIST_HEAD(&hc->hc_item);
|
|
|
|
hc->hc_func = func;
|
|
|
|
hc->hc_data = data;
|
|
|
|
hc->hc_priority = priority;
|
|
|
|
hc->hc_type = type;
|
|
|
|
hc->hc_magic = O2HB_CB_MAGIC;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_setup_callback);
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
/*
|
|
|
|
* In local heartbeat mode, region_uuid passed matches the dlm domain name.
|
|
|
|
* In global heartbeat mode, region_uuid passed is NULL.
|
|
|
|
*
|
|
|
|
* In local, we only pin the matching region. In global we pin all the active
|
|
|
|
* regions.
|
|
|
|
*/
|
|
|
|
static int o2hb_region_pin(const char *region_uuid)
|
2007-06-15 12:40:49 +08:00
|
|
|
{
|
2010-12-15 06:14:29 +08:00
|
|
|
int ret = 0, found = 0;
|
|
|
|
struct o2hb_region *reg;
|
|
|
|
char *uuid;
|
2007-06-15 12:40:49 +08:00
|
|
|
|
|
|
|
assert_spin_locked(&o2hb_live_lock);
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
|
2013-07-04 06:01:10 +08:00
|
|
|
if (reg->hr_item_dropped)
|
|
|
|
continue;
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
uuid = config_item_name(®->hr_item);
|
|
|
|
|
|
|
|
/* local heartbeat */
|
|
|
|
if (region_uuid) {
|
|
|
|
if (strcmp(region_uuid, uuid))
|
|
|
|
continue;
|
|
|
|
found = 1;
|
2007-06-15 12:40:49 +08:00
|
|
|
}
|
2010-12-15 06:14:29 +08:00
|
|
|
|
|
|
|
if (reg->hr_item_pinned || reg->hr_item_dropped)
|
|
|
|
goto skip_pin;
|
|
|
|
|
|
|
|
/* Ignore ENOENT only for local hb (userdlm domain) */
|
|
|
|
ret = o2nm_depend_item(®->hr_item);
|
|
|
|
if (!ret) {
|
|
|
|
mlog(ML_CLUSTER, "Pin region %s\n", uuid);
|
|
|
|
reg->hr_item_pinned = 1;
|
|
|
|
} else {
|
|
|
|
if (ret == -ENOENT && found)
|
|
|
|
ret = 0;
|
|
|
|
else {
|
|
|
|
mlog(ML_ERROR, "Pin region %s fails with %d\n",
|
|
|
|
uuid, ret);
|
|
|
|
break;
|
|
|
|
}
|
2007-06-15 12:40:49 +08:00
|
|
|
}
|
2010-12-15 06:14:29 +08:00
|
|
|
skip_pin:
|
|
|
|
if (found)
|
|
|
|
break;
|
2007-06-15 12:40:49 +08:00
|
|
|
}
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
return ret;
|
2007-06-15 12:40:49 +08:00
|
|
|
}
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
/*
|
|
|
|
* In local heartbeat mode, region_uuid passed matches the dlm domain name.
|
|
|
|
* In global heartbeat mode, region_uuid passed is NULL.
|
|
|
|
*
|
|
|
|
* In local, we only unpin the matching region. In global we unpin all the
|
|
|
|
* active regions.
|
|
|
|
*/
|
|
|
|
static void o2hb_region_unpin(const char *region_uuid)
|
2007-06-15 12:40:49 +08:00
|
|
|
{
|
|
|
|
struct o2hb_region *reg;
|
2010-12-15 06:14:29 +08:00
|
|
|
char *uuid;
|
|
|
|
int found = 0;
|
2007-06-15 12:40:49 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
assert_spin_locked(&o2hb_live_lock);
|
2007-06-15 12:40:49 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
|
2013-07-04 06:01:10 +08:00
|
|
|
if (reg->hr_item_dropped)
|
|
|
|
continue;
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
uuid = config_item_name(®->hr_item);
|
|
|
|
if (region_uuid) {
|
|
|
|
if (strcmp(region_uuid, uuid))
|
|
|
|
continue;
|
|
|
|
found = 1;
|
|
|
|
}
|
2007-06-15 12:40:49 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
if (reg->hr_item_pinned) {
|
|
|
|
mlog(ML_CLUSTER, "Unpin region %s\n", uuid);
|
|
|
|
o2nm_undepend_item(®->hr_item);
|
|
|
|
reg->hr_item_pinned = 0;
|
|
|
|
}
|
|
|
|
if (found)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-06-20 02:34:03 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
static int o2hb_region_inc_user(const char *region_uuid)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
2007-06-15 12:40:49 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
2007-06-20 02:34:03 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
/* local heartbeat */
|
|
|
|
if (!o2hb_global_heartbeat_active()) {
|
|
|
|
ret = o2hb_region_pin(region_uuid);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if global heartbeat active and this is the first dependent user,
|
|
|
|
* pin all regions if quorum region count <= CUT_OFF
|
|
|
|
*/
|
|
|
|
o2hb_dependent_users++;
|
|
|
|
if (o2hb_dependent_users > 1)
|
|
|
|
goto unlock;
|
|
|
|
|
2013-11-13 07:06:58 +08:00
|
|
|
if (bitmap_weight(o2hb_quorum_region_bitmap,
|
2010-12-15 06:14:29 +08:00
|
|
|
O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
|
|
|
|
ret = o2hb_region_pin(NULL);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
2007-06-15 12:40:49 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-08-18 06:44:31 +08:00
|
|
|
static void o2hb_region_dec_user(const char *region_uuid)
|
2007-06-15 12:40:49 +08:00
|
|
|
{
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
/* local heartbeat */
|
|
|
|
if (!o2hb_global_heartbeat_active()) {
|
|
|
|
o2hb_region_unpin(region_uuid);
|
|
|
|
goto unlock;
|
|
|
|
}
|
2007-06-15 12:40:49 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
/*
|
|
|
|
* if global heartbeat active and there are no dependent users,
|
|
|
|
* unpin all quorum regions
|
|
|
|
*/
|
|
|
|
o2hb_dependent_users--;
|
|
|
|
if (!o2hb_dependent_users)
|
|
|
|
o2hb_region_unpin(NULL);
|
2007-06-15 12:40:49 +08:00
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
unlock:
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
2007-06-15 12:40:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int o2hb_register_callback(const char *region_uuid,
|
|
|
|
struct o2hb_callback_func *hc)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
2013-09-12 05:19:50 +08:00
|
|
|
struct o2hb_callback_func *f;
|
2005-12-16 06:31:23 +08:00
|
|
|
struct o2hb_callback *hbcall;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
|
|
|
|
BUG_ON(!list_empty(&hc->hc_item));
|
|
|
|
|
|
|
|
hbcall = hbcall_from_type(hc->hc_type);
|
|
|
|
if (IS_ERR(hbcall)) {
|
|
|
|
ret = PTR_ERR(hbcall);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-06-15 12:40:49 +08:00
|
|
|
if (region_uuid) {
|
2010-12-15 06:14:29 +08:00
|
|
|
ret = o2hb_region_inc_user(region_uuid);
|
|
|
|
if (ret) {
|
|
|
|
mlog_errno(ret);
|
2007-06-15 12:40:49 +08:00
|
|
|
goto out;
|
2010-12-15 06:14:29 +08:00
|
|
|
}
|
2007-06-15 12:40:49 +08:00
|
|
|
}
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
down_write(&o2hb_callback_sem);
|
|
|
|
|
2013-09-12 05:19:50 +08:00
|
|
|
list_for_each_entry(f, &hbcall->list, hc_item) {
|
|
|
|
if (hc->hc_priority < f->hc_priority) {
|
|
|
|
list_add_tail(&hc->hc_item, &f->hc_item);
|
2005-12-16 06:31:23 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (list_empty(&hc->hc_item))
|
|
|
|
list_add_tail(&hc->hc_item, &hbcall->list);
|
|
|
|
|
|
|
|
up_write(&o2hb_callback_sem);
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2010-12-15 06:14:29 +08:00
|
|
|
mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
|
2005-12-16 06:31:23 +08:00
|
|
|
ret, __builtin_return_address(0), hc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_register_callback);
|
|
|
|
|
2007-06-15 12:40:49 +08:00
|
|
|
void o2hb_unregister_callback(const char *region_uuid,
|
|
|
|
struct o2hb_callback_func *hc)
|
2005-12-16 06:31:23 +08:00
|
|
|
{
|
|
|
|
BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
|
|
|
|
|
2010-12-15 06:14:29 +08:00
|
|
|
mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
|
2005-12-16 06:31:23 +08:00
|
|
|
__builtin_return_address(0), hc);
|
|
|
|
|
2007-06-15 12:40:49 +08:00
|
|
|
/* XXX Can this happen _with_ a region reference? */
|
2005-12-16 06:31:23 +08:00
|
|
|
if (list_empty(&hc->hc_item))
|
2007-02-03 19:14:30 +08:00
|
|
|
return;
|
2005-12-16 06:31:23 +08:00
|
|
|
|
2007-06-15 12:40:49 +08:00
|
|
|
if (region_uuid)
|
2010-12-15 06:14:29 +08:00
|
|
|
o2hb_region_dec_user(region_uuid);
|
2007-06-15 12:40:49 +08:00
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
down_write(&o2hb_callback_sem);
|
|
|
|
|
|
|
|
list_del_init(&hc->hc_item);
|
|
|
|
|
|
|
|
up_write(&o2hb_callback_sem);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_unregister_callback);
|
|
|
|
|
2014-10-10 06:25:13 +08:00
|
|
|
int o2hb_check_node_heartbeating_no_sem(u8 node_num)
|
|
|
|
{
|
|
|
|
unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
|
|
|
|
2016-03-16 05:52:58 +08:00
|
|
|
spin_lock(&o2hb_live_lock);
|
2014-10-10 06:25:13 +08:00
|
|
|
o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
|
2016-03-16 05:52:58 +08:00
|
|
|
spin_unlock(&o2hb_live_lock);
|
2014-10-10 06:25:13 +08:00
|
|
|
if (!test_bit(node_num, testing_map)) {
|
|
|
|
mlog(ML_HEARTBEAT,
|
|
|
|
"node (%u) does not have heartbeating enabled.\n",
|
|
|
|
node_num);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_no_sem);
|
|
|
|
|
2005-12-16 06:31:23 +08:00
|
|
|
int o2hb_check_node_heartbeating_from_callback(u8 node_num)
|
|
|
|
{
|
|
|
|
unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
|
|
|
|
|
|
|
o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
|
|
|
|
if (!test_bit(node_num, testing_map)) {
|
|
|
|
mlog(ML_HEARTBEAT,
|
|
|
|
"node (%u) does not have heartbeating enabled.\n",
|
|
|
|
node_num);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this is just a hack until we get the plumbing which flips file systems
|
|
|
|
* read only and drops the hb ref instead of killing the node dead.
|
|
|
|
*/
|
|
|
|
void o2hb_stop_all_regions(void)
|
|
|
|
{
|
|
|
|
struct o2hb_region *reg;
|
|
|
|
|
|
|
|
mlog(ML_ERROR, "stopping heartbeat on all active regions.\n");
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item)
|
|
|
|
reg->hr_unclean_stop = 1;
|
|
|
|
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_stop_all_regions);
|
2010-10-08 05:31:06 +08:00
|
|
|
|
|
|
|
int o2hb_get_all_regions(char *region_uuids, u8 max_regions)
|
|
|
|
{
|
|
|
|
struct o2hb_region *reg;
|
|
|
|
int numregs = 0;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
spin_lock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
p = region_uuids;
|
|
|
|
list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
|
2013-07-04 06:01:10 +08:00
|
|
|
if (reg->hr_item_dropped)
|
|
|
|
continue;
|
|
|
|
|
2010-10-08 05:31:06 +08:00
|
|
|
mlog(0, "Region: %s\n", config_item_name(®->hr_item));
|
|
|
|
if (numregs < max_regions) {
|
|
|
|
memcpy(p, config_item_name(®->hr_item),
|
|
|
|
O2HB_MAX_REGION_NAME_LEN);
|
|
|
|
p += O2HB_MAX_REGION_NAME_LEN;
|
|
|
|
}
|
|
|
|
numregs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&o2hb_live_lock);
|
|
|
|
|
|
|
|
return numregs;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(o2hb_get_all_regions);
|
|
|
|
|
|
|
|
int o2hb_global_heartbeat_active(void)
|
|
|
|
{
|
2010-10-10 01:27:04 +08:00
|
|
|
return (o2hb_heartbeat_mode == O2HB_HEARTBEAT_GLOBAL);
|
2010-10-08 05:31:06 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(o2hb_global_heartbeat_active);
|