drbd: differentiate between normal and forced detach

Aborting local requests (not waiting for completion from the lower level
disk) is dangerous: if the master bio has been completed to upper
layers, data pages may be re-used for other things already.
If local IO is still pending and later completes,
this may cause crashes or corrupt unrelated data.

Only abort local IO if explicitly requested.
Intended use case is a lower level device that turned into a tarpit,
not completing io requests, not even doing error completion.

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
Lars Ellenberg 2012-06-14 14:21:32 +02:00 committed by Philipp Reisner
parent d264580145
commit 383606e0de
7 changed files with 42 additions and 15 deletions

View File

@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+ mdev->ldev->md.al_offset + mdev->al_tr_pos; + mdev->ldev->md.al_offset + mdev->al_tr_pos;
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
drbd_chk_io_error(mdev, 1, true); drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
if (++mdev->al_tr_pos > if (++mdev->al_tr_pos >
div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))

View File

@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (ctx->error) { if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, true); drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
err = -EIO; /* ctx->error ? */ err = -EIO; /* ctx->error ? */
} }
@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
if (ctx->error) if (ctx->error)
drbd_chk_io_error(mdev, 1, true); drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
/* that should force detach, so the in memory bitmap will be /* that should force detach, so the in memory bitmap will be
* gone in a moment as well. */ * gone in a moment as well. */

View File

@ -832,6 +832,7 @@ enum {
BITMAP_IO_QUEUED, /* Started bitmap IO */ BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
WAS_IO_ERROR, /* Local disk failed returned IO error */ WAS_IO_ERROR, /* Local disk failed returned IO error */
FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */ NET_CONGESTED, /* The data socket is congested */
@ -1838,12 +1839,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
} }
enum drbd_force_detach_flags {
DRBD_IO_ERROR,
DRBD_META_IO_ERROR,
DRBD_FORCE_DETACH,
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
enum drbd_force_detach_flags forcedetach,
const char *where)
{ {
switch (mdev->ldev->dc.on_io_error) { switch (mdev->ldev->dc.on_io_error) {
case EP_PASS_ON: case EP_PASS_ON:
if (!forcedetach) { if (forcedetach == DRBD_IO_ERROR) {
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where); dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT) if (mdev->state.disk > D_INCONSISTENT)
@ -1854,6 +1863,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
case EP_DETACH: case EP_DETACH:
case EP_CALL_HELPER: case EP_CALL_HELPER:
set_bit(WAS_IO_ERROR, &mdev->flags); set_bit(WAS_IO_ERROR, &mdev->flags);
if (forcedetach == DRBD_FORCE_DETACH)
set_bit(FORCE_DETACH, &mdev->flags);
if (mdev->state.disk > D_FAILED) { if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV, dev_err(DEV,
@ -1873,7 +1884,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
*/ */
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
static inline void drbd_chk_io_error_(struct drbd_conf *mdev, static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
int error, int forcedetach, const char *where) int error, enum drbd_force_detach_flags forcedetach, const char *where)
{ {
if (error) { if (error) {
unsigned long flags; unsigned long flags;

View File

@ -1630,8 +1630,20 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
eh = mdev->ldev->dc.on_io_error; eh = mdev->ldev->dc.on_io_error;
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
/* Immediately allow completion of all application IO, that waits /* Immediately allow completion of all application IO,
for completion from the local disk. */ * that waits for completion from the local disk,
* if this was a force-detach due to disk_timeout
* or administrator request (drbdsetup detach --force).
* Do NOT abort otherwise.
* Aborting local requests may cause serious problems,
* if requests are completed to upper layers already,
* and then later the already submitted local bio completes.
* This can cause DMA into former bio pages that meanwhile
* have been re-used for other things.
* So aborting local requests may cause crashes,
* or even worse, silent data corruption.
*/
if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
tl_abort_disk_io(mdev); tl_abort_disk_io(mdev);
/* current state still has to be D_FAILED, /* current state still has to be D_FAILED,
@ -3870,7 +3882,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */ /* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n"); dev_err(DEV, "meta data update failed!\n");
drbd_chk_io_error(mdev, 1, true); drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
} }
/* Update mdev->ldev->md.la_size_sect, /* Update mdev->ldev->md.la_size_sect,

View File

@ -950,6 +950,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* to realize a "hot spare" feature (not that I'd recommend that) */ * to realize a "hot spare" feature (not that I'd recommend that) */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* make sure there is no leftover from previous force-detach attempts */
clear_bit(FORCE_DETACH, &mdev->flags);
/* allocation not in the IO path, cqueue thread context */ /* allocation not in the IO path, cqueue thread context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) { if (!nbc) {
@ -1345,6 +1348,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
} }
if (dt.detach_force) { if (dt.detach_force) {
set_bit(FORCE_DETACH, &mdev->flags);
drbd_force_state(mdev, NS(disk, D_FAILED)); drbd_force_state(mdev, NS(disk, D_FAILED));
reply->ret_code = SS_SUCCESS; reply->ret_code = SS_SUCCESS;
goto out; goto out;

View File

@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING; req->rq_state &= ~RQ_LOCAL_PENDING;
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
_req_may_be_done_not_susp(req, m); _req_may_be_done_not_susp(req, m);
break; break;
@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
} }
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
goto_queue_for_net_read: goto_queue_for_net_read:
@ -1275,7 +1275,7 @@ void request_timer_fn(unsigned long data)
time_after(now, req->start_time + dt) && time_after(now, req->start_time + dt) &&
!time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(mdev, 1); __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
} }
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);

View File

@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
if (list_empty(&mdev->read_ee)) if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait); wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags)) if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
spin_unlock_irqrestore(&mdev->req_lock, flags); spin_unlock_irqrestore(&mdev->req_lock, flags);
drbd_queue_work(&mdev->data.work, &e->w); drbd_queue_work(&mdev->data.work, &e->w);
@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
: list_empty(&mdev->active_ee); : list_empty(&mdev->active_ee);
if (test_bit(__EE_WAS_ERROR, &e->flags)) if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
spin_unlock_irqrestore(&mdev->req_lock, flags); spin_unlock_irqrestore(&mdev->req_lock, flags);
if (is_syncer_req) if (is_syncer_req)