From 26525618863afcc4aab8b2a83451d37c6f513460 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 09:56:33 +0100 Subject: [PATCH 001/122] drbd: only reset online-verify start sector if verify completed For network hickups during online-verify, on the next verify triggered, we by default want to resume where it left off. After any replication link interruption, there will be a (possibly empty) resync. Do not reset online-verify start sector if some resync completed, that would defeats the purpose. Only reset the start sector once a verify run is completed. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index e027446590d3..6cfc5417da8e 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -782,6 +782,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) union drbd_state os, ns; struct drbd_work *w; char *khelper_cmd = NULL; + int verify_done = 0; /* Remove all elements from the resync LRU. Since future actions * might set bits in the (main) bitmap, then the entries in the @@ -818,6 +819,8 @@ int drbd_resync_finished(struct drbd_conf *mdev) spin_lock_irq(&mdev->req_lock); os = mdev->state; + verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); + /* This protects us against multiple calls (that can happen in the presence of application IO), and against connectivity loss just before we arrive here. */ if (os.conn <= C_CONNECTED) @@ -827,8 +830,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) ns.conn = C_CONNECTED; dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", - (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ? - "Online verify " : "Resync", + verify_done ? "Online verify " : "Resync", dt + mdev->rs_paused, mdev->rs_paused, dbdt); n_oos = drbd_bm_total_weight(mdev); @@ -905,7 +907,8 @@ out: mdev->rs_total = 0; mdev->rs_failed = 0; mdev->rs_paused = 0; - mdev->ov_start_sector = 0; + if (verify_done) + mdev->ov_start_sector = 0; drbd_md_sync(mdev); From 30b743a2d532af39c9ed13c85d5473f6f620f6c8 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 09:39:06 +0100 Subject: [PATCH 002/122] drbd: improve online-verify progress tracking For a partial (resumed) online-verify, initialize rs_total not to total bits, but to number of bits to check in this run, to match the meaning rs_total has for actual resync. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 23 ++++++++++++----------- drivers/block/drbd/drbd_proc.c | 11 ++++++----- drivers/block/drbd/drbd_receiver.c | 3 ++- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8a43ce0edeed..a1a2cb1eadf1 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -961,6 +961,10 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state /* helper for __drbd_set_state */ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) { + if (mdev->agreed_pro_version < 90) + mdev->ov_start_sector = 0; + mdev->rs_total = drbd_bm_bits(mdev); + mdev->ov_position = 0; if (cs == C_VERIFY_T) { /* starting online verify from an arbitrary position * does not fit well into the existing protocol. @@ -970,11 +974,15 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) mdev->ov_start_sector = ~(sector_t)0; } else { unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector); - if (bit >= mdev->rs_total) + if (bit >= mdev->rs_total) { mdev->ov_start_sector = BM_BIT_TO_SECT(mdev->rs_total - 1); + mdev->rs_total = 1; + } else + mdev->rs_total -= bit; mdev->ov_position = mdev->ov_start_sector; } + mdev->ov_left = mdev->rs_total; } static void drbd_resume_al(struct drbd_conf *mdev) @@ -1081,7 +1089,7 @@ int __drbd_set_state(struct drbd_conf *mdev, if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && ns.conn < C_CONNECTED) { mdev->ov_start_sector = - BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left); + BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left); dev_info(DEV, "Online Verify reached sector %llu\n", (unsigned long long)mdev->ov_start_sector); } @@ -1106,14 +1114,7 @@ int __drbd_set_state(struct drbd_conf *mdev, unsigned long now = jiffies; int i; - mdev->ov_position = 0; - mdev->rs_total = drbd_bm_bits(mdev); - if (mdev->agreed_pro_version >= 90) - set_ov_position(mdev, ns.conn); - else - mdev->ov_start_sector = 0; - mdev->ov_left = mdev->rs_total - - BM_SECT_TO_BIT(mdev->ov_position); + set_ov_position(mdev, ns.conn); mdev->rs_start = now; mdev->rs_last_events = 0; mdev->rs_last_sect_ev = 0; @@ -1121,7 +1122,7 @@ int __drbd_set_state(struct drbd_conf *mdev, mdev->ov_last_oos_start = 0; for (i = 0; i < DRBD_SYNC_MARKS; i++) { - mdev->rs_mark_left[i] = mdev->rs_total; + mdev->rs_mark_left[i] = mdev->ov_left; mdev->rs_mark_time[i] = now; } diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 7e6ac307e2de..0b20aa837022 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -239,12 +239,13 @@ static int drbd_seq_show(struct seq_file *seq, void *v) mdev->state.conn == C_SYNC_TARGET) drbd_syncer_progress(mdev, seq); - if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) + if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) { + unsigned long bm_bits = drbd_bm_bits(mdev); seq_printf(seq, "\t%3d%% %lu/%lu\n", - (int)((mdev->rs_total-mdev->ov_left) / - (mdev->rs_total/100+1)), - mdev->rs_total - mdev->ov_left, - mdev->rs_total); + (int)((bm_bits-mdev->ov_left) / + (bm_bits/100+1)), + bm_bits - mdev->ov_left, bm_bits); + } if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { lc_seq_printf_stats(seq, mdev->resync); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 8e68be939deb..88856a7f42a0 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2005,7 +2005,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un mdev->agreed_pro_version >= 90) { mdev->ov_start_sector = sector; mdev->ov_position = sector; - mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector); + mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); + mdev->rs_total = mdev->ov_left; dev_info(DEV, "Online Verify start sector: %llu\n", (unsigned long long)sector); } From de228bba676e1f76e5e5f9444e51bb2db003cefb Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 09:43:15 +0100 Subject: [PATCH 003/122] drbd: initialize online-verify progress tracking on verify target For partial (resumed) online verify, initialize the resync step marks once we know what the online verify start sector is. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 88856a7f42a0..3b95eef07546 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2003,10 +2003,16 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un case P_OV_REQUEST: if (mdev->ov_start_sector == ~(sector_t)0 && mdev->agreed_pro_version >= 90) { + unsigned long now = jiffies; + int i; mdev->ov_start_sector = sector; mdev->ov_position = sector; mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); mdev->rs_total = mdev->ov_left; + for (i = 0; i < DRBD_SYNC_MARKS; i++) { + mdev->rs_mark_left[i] = mdev->ov_left; + mdev->rs_mark_time[i] = now; + } dev_info(DEV, "Online Verify start sector: %llu\n", (unsigned long long)sector); } From c6ea14dfa391da58e309d0d5041564b3d9dbb82f Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 09:23:37 +0100 Subject: [PATCH 004/122] drbd: factor out advancement of resync marks for progress reporting This is in preparation to unify progress reporting of online-verify and resync requests. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index aca302492ff2..b3f18545b469 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -919,6 +919,22 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, } } +void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go) +{ + unsigned long now = jiffies; + unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; + int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; + if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { + if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go && + mdev->state.conn != C_PAUSED_SYNC_T && + mdev->state.conn != C_PAUSED_SYNC_S) { + mdev->rs_mark_time[next] = now; + mdev->rs_mark_left[next] = still_to_go; + mdev->rs_last_mark = next; + } + } +} + /* clear the bit corresponding to the piece of storage in question: * size byte of data starting from sector. Only clear a bits of the affected * one ore more _aligned_ BM_BLOCK_SIZE blocks. @@ -969,19 +985,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, */ count = drbd_bm_clear_bits(mdev, sbnr, ebnr); if (count && get_ldev(mdev)) { - unsigned long now = jiffies; - unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; - int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; - if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { - unsigned long tw = drbd_bm_total_weight(mdev); - if (mdev->rs_mark_left[mdev->rs_last_mark] != tw && - mdev->state.conn != C_PAUSED_SYNC_T && - mdev->state.conn != C_PAUSED_SYNC_S) { - mdev->rs_mark_time[next] = now; - mdev->rs_mark_left[next] = tw; - mdev->rs_last_mark = next; - } - } + drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev)); spin_lock_irqsave(&mdev->al_lock, flags); drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); spin_unlock_irqrestore(&mdev->al_lock, flags); From ea5442aff68c559c951373739201721185191748 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 09:48:01 +0100 Subject: [PATCH 005/122] drbd: advance progress step marks for online-verify Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 1 + drivers/block/drbd/drbd_receiver.c | 8 +++++++- drivers/block/drbd/drbd_worker.c | 8 +++++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index b0bd27dfc1e8..6cba131011d5 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1619,6 +1619,7 @@ extern int drbd_rs_del_all(struct drbd_conf *mdev); extern void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size); extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); +extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line); #define drbd_set_in_sync(mdev, sector, size) \ diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 3b95eef07546..10db70a23376 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -4340,7 +4340,13 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) drbd_rs_complete_io(mdev, sector); dec_rs_pending(mdev); - if (--mdev->ov_left == 0) { + --mdev->ov_left; + + /* let's advance progress step marks only for every other megabyte */ + if ((mdev->ov_left & 0x200) == 0x200) + drbd_advance_rs_marks(mdev, mdev->ov_left); + + if (mdev->ov_left == 0) { w = kmalloc(sizeof(*w), GFP_NOIO); if (w) { w->cb = w_ov_finished; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 6cfc5417da8e..a050eb0d93ba 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1185,7 +1185,13 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) drbd_free_ee(mdev, e); - if (--mdev->ov_left == 0) { + --mdev->ov_left; + + /* let's advance progress step marks only for every other megabyte */ + if ((mdev->ov_left & 0x200) == 0x200) + drbd_advance_rs_marks(mdev, mdev->ov_left); + + if (mdev->ov_left == 0) { ov_oos_print(mdev); drbd_resync_finished(mdev); } From 439d595379f87ec95249da21122eb085866f8ba9 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 09:52:46 +0100 Subject: [PATCH 006/122] drbd: show progress bar and ETA for online-verify Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 5 +++- drivers/block/drbd/drbd_proc.c | 52 ++++++++++++++++++++++++++-------- 2 files changed, 44 insertions(+), 13 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 6cba131011d5..7131bf2af6ae 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -2157,7 +2157,10 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, * units of BM_BLOCK_SIZE. * for the percentage, we don't care. */ - *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; + if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) + *bits_left = mdev->ov_left; + else + *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; /* >> 10 to prevent overflow, * +1 to prevent division by zero */ if (*bits_left > mdev->rs_total) { diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 0b20aa837022..0ec6f4b6a4a8 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -45,6 +45,19 @@ const struct file_operations drbd_proc_fops = { .release = single_release, }; +void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) +{ + /* v is in kB/sec. We don't expect TiByte/sec yet. */ + if (unlikely(v >= 1000000)) { + /* cool: > GiByte/s */ + seq_printf(seq, "%ld,", v / 1000000); + v /= 1000000; + seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000); + } else if (likely(v >= 1000)) + seq_printf(seq, "%ld,%03ld", v/1000, v % 1000); + else + seq_printf(seq, "%ld", v); +} /*lge * progress bars shamelessly adapted from driver/md/md.c @@ -94,6 +107,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at * least DRBD_SYNC_MARK_STEP time before it will be modified. */ + /* ------------------------ ~18s average ------------------------ */ i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; dt = (jiffies - mdev->rs_mark_time[i]) / HZ; if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) @@ -107,14 +121,29 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) seq_printf(seq, "finish: %lu:%02lu:%02lu", rt / 3600, (rt % 3600) / 60, rt % 60); - /* current speed average over (SYNC_MARKS * SYNC_MARK_STEP) jiffies */ dbdt = Bit2KB(db/dt); - if (dbdt > 1000) - seq_printf(seq, " speed: %ld,%03ld", - dbdt/1000, dbdt % 1000); - else - seq_printf(seq, " speed: %ld", dbdt); + seq_printf(seq, " speed: "); + seq_printf_with_thousands_grouping(seq, dbdt); + seq_printf(seq, " ("); + /* ------------------------- ~3s average ------------------------ */ + if (proc_details >= 1) { + /* this is what drbd_rs_should_slow_down() uses */ + i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; + dt = (jiffies - mdev->rs_mark_time[i]) / HZ; + if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) + stalled = 1; + if (!dt) + dt++; + db = mdev->rs_mark_left[i] - rs_left; + rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */ + + dbdt = Bit2KB(db/dt); + seq_printf_with_thousands_grouping(seq, dbdt); + seq_printf(seq, " -- "); + } + + /* --------------------- long term average ---------------------- */ /* mean speed since syncer started * we do account for PausedSync periods */ dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; @@ -122,11 +151,8 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) dt = 1; db = mdev->rs_total - rs_left; dbdt = Bit2KB(db/dt); - if (dbdt > 1000) - seq_printf(seq, " (%ld,%03ld)", - dbdt/1000, dbdt % 1000); - else - seq_printf(seq, " (%ld)", dbdt); + seq_printf_with_thousands_grouping(seq, dbdt); + seq_printf(seq, ")"); if (mdev->state.conn == C_SYNC_TARGET) { if (mdev->c_sync_rate > 1000) @@ -236,7 +262,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v) Bit2KB(drbd_bm_total_weight(mdev))); } if (mdev->state.conn == C_SYNC_SOURCE || - mdev->state.conn == C_SYNC_TARGET) + mdev->state.conn == C_SYNC_TARGET || + mdev->state.conn == C_VERIFY_S || + mdev->state.conn == C_VERIFY_T) drbd_syncer_progress(mdev, seq); if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) { From 9bd28d3c90c80c7ec46085de281b38f67331da41 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 09:55:18 +0100 Subject: [PATCH 007/122] drbd: factor out drbd_rs_controller_reset Preparation patch to be able to use the auto-throttling resync controller for online-verify requests as well. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 1 + drivers/block/drbd/drbd_worker.c | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7131bf2af6ae..85207b275e41 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1499,6 +1499,7 @@ extern int drbd_resync_finished(struct drbd_conf *mdev); extern int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, sector_t sector, int rw); extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); +extern void drbd_rs_controller_reset(struct drbd_conf *mdev); static inline void ov_oos_print(struct drbd_conf *mdev) { diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index a050eb0d93ba..971e9b3b13ba 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1439,6 +1439,17 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na) return retcode; } +void drbd_rs_controller_reset(struct drbd_conf *mdev) +{ + atomic_set(&mdev->rs_sect_in, 0); + atomic_set(&mdev->rs_sect_ev, 0); + mdev->rs_in_flight = 0; + mdev->rs_planed = 0; + spin_lock(&mdev->peer_seq_lock); + fifo_set(&mdev->rs_plan_s, 0); + spin_unlock(&mdev->peer_seq_lock); +} + /** * drbd_start_resync() - Start the resync process * @mdev: DRBD device. @@ -1556,13 +1567,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) drbd_resync_finished(mdev); } - atomic_set(&mdev->rs_sect_in, 0); - atomic_set(&mdev->rs_sect_ev, 0); - mdev->rs_in_flight = 0; - mdev->rs_planed = 0; - spin_lock(&mdev->peer_seq_lock); - fifo_set(&mdev->rs_plan_s, 0); - spin_unlock(&mdev->peer_seq_lock); + drbd_rs_controller_reset(mdev); /* ns.conn may already be != mdev->state.conn, * we may have been paused in between, or become paused until * the timer triggers. From e65f440d474d7d6a6fd8a2c844e851d8c96ed9c5 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 10:04:07 +0100 Subject: [PATCH 008/122] drbd: factor out drbd_rs_number_requests Preparation patch to be able to use the auto-throttling resync controller for online-verify requests as well. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 61 +++++++++++++++----------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 971e9b3b13ba..6d111c8515f7 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -492,6 +492,32 @@ int drbd_rs_controller(struct drbd_conf *mdev) return req_sect; } +int drbd_rs_number_requests(struct drbd_conf *mdev) +{ + int number; + if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ + number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); + mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; + } else { + mdev->c_sync_rate = mdev->sync_conf.rate; + number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); + } + + /* Throttle resync on lower level disk activity, which may also be + * caused by application IO on Primary/SyncTarget. + * Keep this after the call to drbd_rs_controller, as that assumes + * to be called as precisely as possible every SLEEP_TIME, + * and would be confused otherwise. */ + if (number && drbd_rs_should_slow_down(mdev)) { + mdev->c_sync_rate = 1; + number = 0; + } + + /* ignore the amount of pending requests, the resync controller should + * throttle down to incoming reply rate soon enough anyways. */ + return number; +} + int w_make_resync_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { @@ -499,7 +525,7 @@ int w_make_resync_request(struct drbd_conf *mdev, sector_t sector; const sector_t capacity = drbd_get_capacity(mdev->this_bdev); int max_segment_size; - int number, rollback_i, size, pe, mx; + int number, rollback_i, size; int align, queued, sndbuf; int i = 0; @@ -537,39 +563,10 @@ int w_make_resync_request(struct drbd_conf *mdev, mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; - if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ - number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); - mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; - } else { - mdev->c_sync_rate = mdev->sync_conf.rate; - number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); - } - - /* Throttle resync on lower level disk activity, which may also be - * caused by application IO on Primary/SyncTarget. - * Keep this after the call to drbd_rs_controller, as that assumes - * to be called as precisely as possible every SLEEP_TIME, - * and would be confused otherwise. */ - if (drbd_rs_should_slow_down(mdev)) + number = drbd_rs_number_requests(mdev); + if (number == 0) goto requeue; - mutex_lock(&mdev->data.mutex); - if (mdev->data.socket) - mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req); - else - mx = 1; - mutex_unlock(&mdev->data.mutex); - - /* For resync rates >160MB/sec, allow more pending RS requests */ - if (number > mx) - mx = number; - - /* Limit the number of pending RS requests to no more than the peer's receive buffer */ - pe = atomic_read(&mdev->rs_pending_cnt); - if ((pe + number) > mx) { - number = mx - pe; - } - for (i = 0; i < number; i++) { /* Stop generating RS requests, when half of the send buffer is filled */ mutex_lock(&mdev->data.mutex); From 2649f0809f55e4df98c333a2b85c6fc8fee04804 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 5 Nov 2010 10:05:47 +0100 Subject: [PATCH 009/122] drbd: use the resync controller for online-verify requests as well Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 2 ++ drivers/block/drbd/drbd_proc.c | 3 ++- drivers/block/drbd/drbd_receiver.c | 10 ++++++++-- drivers/block/drbd/drbd_worker.c | 8 ++------ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a1a2cb1eadf1..6afb81f807bd 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1126,6 +1126,8 @@ int __drbd_set_state(struct drbd_conf *mdev, mdev->rs_mark_time[i] = now; } + drbd_rs_controller_reset(mdev); + if (ns.conn == C_VERIFY_S) { dev_info(DEV, "Starting Online Verify from sector %llu\n", (unsigned long long)mdev->ov_position); diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 0ec6f4b6a4a8..fab3fde3477b 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -154,7 +154,8 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) seq_printf_with_thousands_grouping(seq, dbdt); seq_printf(seq, ")"); - if (mdev->state.conn == C_SYNC_TARGET) { + if (mdev->state.conn == C_SYNC_TARGET || + mdev->state.conn == C_VERIFY_S) { if (mdev->c_sync_rate > 1000) seq_printf(seq, " want: %d,%03d", mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 10db70a23376..1c56af03c38b 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1883,8 +1883,12 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, * approx. */ - i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS; - rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; + i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; + + if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) + rs_left = mdev->ov_left; + else + rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; if (!dt) @@ -1992,6 +1996,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un D_ASSERT(mdev->agreed_pro_version >= 89); e->w.cb = w_e_end_csum_rs_req; } else if (cmd == P_OV_REPLY) { + /* track progress, we may need to throttle */ + atomic_add(size >> 9, &mdev->rs_sect_in); e->w.cb = w_e_end_ov_reply; dec_rs_pending(mdev); /* drbd_rs_begin_io done when we sent this request, diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 6d111c8515f7..af805efc94d7 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -708,11 +708,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca return 0; } - number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ); - if (atomic_read(&mdev->rs_pending_cnt) > number) - goto requeue; - - number -= atomic_read(&mdev->rs_pending_cnt); + number = drbd_rs_number_requests(mdev); sector = mdev->ov_position; for (i = 0; i < number; i++) { @@ -741,11 +737,11 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca mdev->ov_position = sector; requeue: + mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); return 1; } - int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { kfree(w); From 18edc0b9d7dac2f74117a0bdb98f2e705eb74d82 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Tue, 9 Nov 2010 14:12:10 +0100 Subject: [PATCH 010/122] drbd: fix potential wrap of 32bit oos:%lu display in /proc/drbd When converting bits (4k resolution, still) to kB, we shift left. If it was a large number of bits on a 32bit box (>= 4 TiB storage), we may wrap the 32bit unsigned long base type, resulting in incorrect display. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_proc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index fab3fde3477b..07368b75392a 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -259,8 +259,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v) mdev->epochs, write_ordering_chars[mdev->write_ordering] ); - seq_printf(seq, " oos:%lu\n", - Bit2KB(drbd_bm_total_weight(mdev))); + seq_printf(seq, " oos:%llu\n", + Bit2KB((unsigned long long) + drbd_bm_total_weight(mdev))); } if (mdev->state.conn == C_SYNC_SOURCE || mdev->state.conn == C_SYNC_TARGET || From 5f9915bbb8e0975ce99f893c29b8e89100b33399 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Tue, 9 Nov 2010 14:15:24 +0100 Subject: [PATCH 011/122] drbd: further converge progress display of resync and online-verify Show progressbar and ETA always, with proc_details >= 1 also show the current sector position for both resync and online-verify on both nodes. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_proc.c | 46 ++++++++++++++++++------------ drivers/block/drbd/drbd_receiver.c | 4 +++ 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 07368b75392a..329b66a91e44 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -84,7 +84,12 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) seq_printf(seq, "."); seq_printf(seq, "] "); - seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10); + if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) + seq_printf(seq, "verified:"); + else + seq_printf(seq, "sync'ed:"); + seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); + /* if more than 1 GB display in MB */ if (mdev->rs_total > 0x100000L) seq_printf(seq, "(%lu/%lu)M\n\t", @@ -130,14 +135,9 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) /* this is what drbd_rs_should_slow_down() uses */ i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; dt = (jiffies - mdev->rs_mark_time[i]) / HZ; - if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) - stalled = 1; - if (!dt) dt++; db = mdev->rs_mark_left[i] - rs_left; - rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */ - dbdt = Bit2KB(db/dt); seq_printf_with_thousands_grouping(seq, dbdt); seq_printf(seq, " -- "); @@ -156,13 +156,29 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) if (mdev->state.conn == C_SYNC_TARGET || mdev->state.conn == C_VERIFY_S) { - if (mdev->c_sync_rate > 1000) - seq_printf(seq, " want: %d,%03d", - mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000); - else - seq_printf(seq, " want: %d", mdev->c_sync_rate); + seq_printf(seq, " want: "); + seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate); } seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); + + if (proc_details >= 1) { + /* 64 bit: + * we convert to sectors in the display below. */ + u64 bm_bits = drbd_bm_bits(mdev); + u64 bit_pos; + if (mdev->state.conn == C_VERIFY_S || + mdev->state.conn == C_VERIFY_T) + bit_pos = bm_bits - mdev->ov_left; + else + bit_pos = mdev->bm_resync_fo; + /* Total sectors may be slightly off for oddly + * sized devices. So what. */ + seq_printf(seq, + "\t%3d%% sector pos: %llu/%llu\n", + (int)(bit_pos / (bm_bits/100+1)), + (unsigned long long) BM_BIT_TO_SECT(bit_pos), + (unsigned long long) BM_BIT_TO_SECT(bm_bits)); + } } static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) @@ -269,14 +285,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v) mdev->state.conn == C_VERIFY_T) drbd_syncer_progress(mdev, seq); - if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) { - unsigned long bm_bits = drbd_bm_bits(mdev); - seq_printf(seq, "\t%3d%% %lu/%lu\n", - (int)((bm_bits-mdev->ov_left) / - (bm_bits/100+1)), - bm_bits - mdev->ov_left, bm_bits); - } - if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { lc_seq_printf_stats(seq, mdev->resync); lc_seq_printf_stats(seq, mdev->act_log); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 1c56af03c38b..d0e19a242af4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1974,6 +1974,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un case P_RS_DATA_REQUEST: e->w.cb = w_e_end_rsdata_req; fault_type = DRBD_FAULT_RS_RD; + /* used in the sector offset progress display */ + mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); break; case P_OV_REPLY: @@ -1995,6 +1997,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un if (cmd == P_CSUM_RS_REQUEST) { D_ASSERT(mdev->agreed_pro_version >= 89); e->w.cb = w_e_end_csum_rs_req; + /* used in the sector offset progress display */ + mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); } else if (cmd == P_OV_REPLY) { /* track progress, we may need to throttle */ atomic_add(size >> 9, &mdev->rs_sect_in); From 470be44ab1841f3261a4d758450a42e6b79e9551 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 10 Nov 2010 10:36:52 +0100 Subject: [PATCH 012/122] drbd: detect modification of in-flight buffers With data-integrity digest enabled, double-check on the sending side for modifications by upper layers of buffers under write back, so we can tell it appart from corruption on the "wire". Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 28 +++++++++++++++++++++++++++- drivers/block/drbd/drbd_receiver.c | 3 ++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 6afb81f807bd..451fc36a85cb 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2537,10 +2537,36 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); } if (ok) { - if (mdev->net_conf->wire_protocol == DRBD_PROT_A) + /* For protocol A, we have to memcpy the payload into + * socket buffers, as we may complete right away + * as soon as we handed it over to tcp, at which point the data + * pages may become invalid. + * + * For data-integrity enabled, we copy it as well, so we can be + * sure that even if the bio pages may still be modified, it + * won't change the data on the wire, thus if the digest checks + * out ok after sending on this side, but does not fit on the + * receiving side, we sure have detected corruption elsewhere. + */ + if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs) ok = _drbd_send_bio(mdev, req->master_bio); else ok = _drbd_send_zc_bio(mdev, req->master_bio); + + /* double check digest, sometimes buffers have been modified in flight. */ + if (dgs > 0 && dgs <= 64) { + /* 64 byte, 512 bit, is the larges digest size + * currently supported in kernel crypto. */ + unsigned char digest[64]; + drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); + if (memcmp(mdev->int_dig_out, digest, dgs)) { + dev_warn(DEV, + "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", + (unsigned long long)req->sector, req->size); + } + } /* else if (dgs > 64) { + ... Be noisy about digest too large ... + } */ } drbd_put_data_sock(mdev); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index d0e19a242af4..ca213c6e5f9d 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1281,7 +1281,8 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ if (dgs) { drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); if (memcmp(dig_in, dig_vv, dgs)) { - dev_err(DEV, "Digest integrity check FAILED.\n"); + dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", + (unsigned long long)sector, data_size); drbd_bcast_ee(mdev, "digest failed", dgs, dig_in, dig_vv, e); drbd_free_ee(mdev, e); From 3129b1b9aed15bbebde1b2a5719434273feb295d Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 11 Nov 2010 10:47:05 +0100 Subject: [PATCH 013/122] drbd: debug: limit nelink-broadcast of request on digest mismatch to 32k We used to be limited to 32k requests, but have increased that limit to 128k now. This part of the code can only deal with 32k, it would scramble arbitrary pages for larger requests. As it is used for debugging only anyways, it is ok to simply truncate the dumped data here. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 8cbfaa687d72..dad559810ed6 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2398,10 +2398,11 @@ void drbd_bcast_ee(struct drbd_conf *mdev, tl = tl_add_int(tl, T_ee_sector, &e->sector); tl = tl_add_int(tl, T_ee_block_id, &e->block_id); + /* dump the first 32k */ + len = min_t(unsigned, e->size, 32 << 10); put_unaligned(T_ee_data, tl++); - put_unaligned(e->size, tl++); + put_unaligned(len, tl++); - len = e->size; page = e->pages; page_chain_for_each(page) { void *d = kmap_atomic(page, KM_USER0); @@ -2410,6 +2411,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev, kunmap_atomic(d, KM_USER0); tl = (unsigned short*)((char*)tl + l); len -= l; + if (len == 0) + break; } put_unaligned(TT_END, tl++); /* Close the tag list */ From 1816a2b47afae838e53a177d5d166cc7be97d6b5 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 11 Nov 2010 15:19:07 +0100 Subject: [PATCH 014/122] drbd: properly use max_hw_sectors to limit the our bio size To ease tracking of bios in some hash tables, we want it to not cross certain boundaries (128k, used to be 32k). We limit the maximum bio size using queue parameters. Historically some defines and variables we use there have been named max_segment_size, which was misguided. Rename them to max_bio_size, and use [blk_]queue_max_hw_sectors where appropriate. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 8 ++++---- drivers/block/drbd/drbd_int.h | 4 ++-- drivers/block/drbd/drbd_main.c | 6 +++--- drivers/block/drbd/drbd_nl.c | 27 +++++++++++++-------------- drivers/block/drbd/drbd_receiver.c | 18 +++++++++--------- drivers/block/drbd/drbd_req.c | 8 ++++---- drivers/block/drbd/drbd_worker.c | 12 ++++++------ 7 files changed, 41 insertions(+), 42 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index b3f18545b469..b4adb58c7472 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -952,7 +952,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, int wake_up = 0; unsigned long flags; - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", (unsigned long long)sector, size); return; @@ -1002,7 +1002,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, /* * this is intended to set one request worth of data out of sync. * affects at least 1 bit, - * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. + * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits. * * called by tl_clear and drbd_send_dblock (==drbd_make_request). * so this can be _any_ process. @@ -1015,7 +1015,7 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, unsigned int enr, count; struct lc_element *e; - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return; @@ -1387,7 +1387,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) sector_t esector, nr_sectors; int wake_up = 0; - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", (unsigned long long)sector, size); return; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 85207b275e41..bcba2742cfba 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -512,7 +512,7 @@ struct p_sizes { u64 d_size; /* size of disk */ u64 u_size; /* user requested size */ u64 c_size; /* current exported size */ - u32 max_segment_size; /* Maximal size of a BIO */ + u32 max_bio_size; /* Maximal size of a BIO */ u16 queue_order_type; /* not yet implemented in DRBD*/ u16 dds_flags; /* use enum dds_flags here. */ } __packed; @@ -1398,7 +1398,7 @@ struct bm_extent { * With a value of 8 all IO in one 128K block make it to the same slot of the * hash table. */ #define HT_SHIFT 8 -#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) +#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 451fc36a85cb..9d9c2ed31e9a 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1924,7 +1924,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl p.d_size = cpu_to_be64(d_size); p.u_size = cpu_to_be64(u_size); p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); - p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); + p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9); p.queue_order_type = cpu_to_be16(q_order_type); p.dds_flags = cpu_to_be16(flags); @@ -2952,7 +2952,7 @@ static void drbd_destroy_mempools(void) static int drbd_create_mempools(void) { struct page *page; - const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count; + const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; int i; /* prepare our caches and mempools */ @@ -3218,7 +3218,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request_26); - blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); + blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_merge_bvec(q, drbd_merge_bvec); q->queue_lock = &mdev->req_lock; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index dad559810ed6..9e27d82a9a19 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -765,22 +765,21 @@ static int drbd_check_al_size(struct drbd_conf *mdev) return 0; } -void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) +void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) { struct request_queue * const q = mdev->rq_queue; struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; int max_segments = mdev->ldev->dc.max_bio_bvecs; + int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); - max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); - - blk_queue_max_hw_sectors(q, max_seg_s >> 9); - blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); - blk_queue_max_segment_size(q, max_seg_s); blk_queue_logical_block_size(q, 512); - blk_queue_segment_boundary(q, PAGE_SIZE-1); - blk_stack_limits(&q->limits, &b->limits, 0); + blk_queue_max_hw_sectors(q, max_hw_sectors); + /* This is the workaround for "bio would need to, but cannot, be split" */ + blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); + blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); + blk_queue_stack_limits(q, b); - dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); + dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", @@ -858,7 +857,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp struct block_device *bdev; struct lru_cache *resync_lru = NULL; union drbd_state ns, os; - unsigned int max_seg_s; + unsigned int max_bio_size; int rv; int cp_discovered = 0; int logical_block_size; @@ -1109,20 +1108,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp mdev->read_cnt = 0; mdev->writ_cnt = 0; - max_seg_s = DRBD_MAX_SEGMENT_SIZE; + max_bio_size = DRBD_MAX_BIO_SIZE; if (mdev->state.conn == C_CONNECTED) { /* We are Primary, Connected, and now attach a new local * backing store. We must not increase the user visible maximum * bio size on this device to something the peer may not be * able to handle. */ if (mdev->agreed_pro_version < 94) - max_seg_s = queue_max_segment_size(mdev->rq_queue); + max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; else if (mdev->agreed_pro_version == 94) - max_seg_s = DRBD_MAX_SIZE_H80_PACKET; + max_bio_size = DRBD_MAX_SIZE_H80_PACKET; /* else: drbd 8.3.9 and later, stay with default */ } - drbd_setup_queue_param(mdev, max_seg_s); + drbd_setup_queue_param(mdev, max_bio_size); /* If I am currently not R_PRIMARY, * but meta data primary indicator is set, diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index ca213c6e5f9d..79e7b57006b1 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -277,7 +277,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; - if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) i = page_chain_free(page); else { struct page *tmp; @@ -1240,7 +1240,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ data_size -= dgs; ERR_IF(data_size & 0x1ff) return NULL; - ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; + ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; /* even though we trust out peer, * we sometimes have to double check. */ @@ -1917,7 +1917,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un sector = be64_to_cpu(p->sector); size = be32_to_cpu(p->blksize); - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, (unsigned long long)sector, size); return FALSE; @@ -2897,7 +2897,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned { struct p_sizes *p = &mdev->data.rbuf.sizes; enum determine_dev_size dd = unchanged; - unsigned int max_seg_s; + unsigned int max_bio_size; sector_t p_size, p_usize, my_usize; int ldsc = 0; /* local disk size changed */ enum dds_flags ddsf; @@ -2970,14 +2970,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned } if (mdev->agreed_pro_version < 94) - max_seg_s = be32_to_cpu(p->max_segment_size); + max_bio_size = be32_to_cpu(p->max_bio_size); else if (mdev->agreed_pro_version == 94) - max_seg_s = DRBD_MAX_SIZE_H80_PACKET; + max_bio_size = DRBD_MAX_SIZE_H80_PACKET; else /* drbd 8.3.8 onwards */ - max_seg_s = DRBD_MAX_SEGMENT_SIZE; + max_bio_size = DRBD_MAX_BIO_SIZE; - if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) - drbd_setup_queue_param(mdev, max_seg_s); + if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9) + drbd_setup_queue_param(mdev, max_bio_size); drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); put_ldev(mdev); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index ad3fc6228f27..08f53ce9b88f 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1047,7 +1047,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) /* can this bio be split generically? * Maybe add our own split-arbitrary-bios function. */ - if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) { + if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) { /* rather error out here than BUG in bio_split */ dev_err(DEV, "bio would need to, but cannot, be split: " "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", @@ -1098,7 +1098,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) } /* This is called by bio_add_page(). With this function we reduce - * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs + * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs * units (was AL_EXTENTs). * * we do the calculation within the lower 32bit of the byte offsets, @@ -1118,8 +1118,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct unsigned int bio_size = bvm->bi_size; int limit, backing_limit; - limit = DRBD_MAX_SEGMENT_SIZE - - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size); + limit = DRBD_MAX_BIO_SIZE + - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size); if (limit < 0) limit = 0; if (bio_size == 0) { diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index af805efc94d7..782d87237cb4 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -524,7 +524,7 @@ int w_make_resync_request(struct drbd_conf *mdev, unsigned long bit; sector_t sector; const sector_t capacity = drbd_get_capacity(mdev->this_bdev); - int max_segment_size; + int max_bio_size; int number, rollback_i, size; int align, queued, sndbuf; int i = 0; @@ -559,9 +559,9 @@ int w_make_resync_request(struct drbd_conf *mdev, /* starting with drbd 8.3.8, we can handle multi-bio EEs, * if it should be necessary */ - max_segment_size = - mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : - mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; + max_bio_size = + mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 : + mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE; number = drbd_rs_number_requests(mdev); if (number == 0) @@ -605,7 +605,7 @@ next_sector: goto next_sector; } -#if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE +#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE /* try to find some adjacent bits. * we stop if we have already the maximum req size. * @@ -615,7 +615,7 @@ next_sector: align = 1; rollback_i = i; for (;;) { - if (size + BM_BLOCK_SIZE > max_segment_size) + if (size + BM_BLOCK_SIZE > max_bio_size) break; /* Be always aligned */ From 4896e8c1b8fb7e46a65a6676e271fc047a260a3e Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 11 Nov 2010 22:41:04 +0100 Subject: [PATCH 015/122] drbd: restore compatibility with 32bit kernels With commit drbd: further converge progress display of resync and online-verify accidentally an u64/u64 div was introduced, causing an unresolvable symbol __udivdi3 to be reference. Actually for that division, 32bit are still suficient for now, so we can revert to unsigned long instead. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_proc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 329b66a91e44..49d817cfe865 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -164,8 +164,8 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) if (proc_details >= 1) { /* 64 bit: * we convert to sectors in the display below. */ - u64 bm_bits = drbd_bm_bits(mdev); - u64 bit_pos; + unsigned long bm_bits = drbd_bm_bits(mdev); + unsigned long bit_pos; if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) bit_pos = bm_bits - mdev->ov_left; @@ -176,8 +176,8 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) seq_printf(seq, "\t%3d%% sector pos: %llu/%llu\n", (int)(bit_pos / (bm_bits/100+1)), - (unsigned long long) BM_BIT_TO_SECT(bit_pos), - (unsigned long long) BM_BIT_TO_SECT(bm_bits)); + (unsigned long long)bit_pos * BM_SECT_PER_BIT, + (unsigned long long)bm_bits * BM_SECT_PER_BIT); } } From 688593c5a82068aea64df0b836793dfbbaa646d7 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 17 Nov 2010 22:25:03 +0100 Subject: [PATCH 016/122] drbd: Renamed write_flags_to_bio() to wire_flags_to_bio() Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 79e7b57006b1..f3052d871d31 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1612,15 +1612,15 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) return ret; } -static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) +/* see also bio_flags_to_wire() + * DRBD_REQ_*, because we need to semantically map the flags to data packet + * flags and back. We may replicate to other kernel versions. */ +static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) { - if (mdev->agreed_pro_version >= 95) - return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | - (dpf & DP_FUA ? REQ_FUA : 0) | - (dpf & DP_FLUSH ? REQ_FUA : 0) | - (dpf & DP_DISCARD ? REQ_DISCARD : 0); - else - return dpf & DP_RW_SYNC ? REQ_SYNC : 0; + return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | + (dpf & DP_FUA ? REQ_FUA : 0) | + (dpf & DP_FLUSH ? REQ_FLUSH : 0) | + (dpf & DP_DISCARD ? REQ_DISCARD : 0); } /* mirrored write */ @@ -1660,18 +1660,18 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned e->w.cb = e_end_block; + dp_flags = be32_to_cpu(p->dp_flags); + rw |= wire_flags_to_bio(mdev, dp_flags); + + if (dp_flags & DP_MAY_SET_IN_SYNC) + e->flags |= EE_MAY_SET_IN_SYNC; + spin_lock(&mdev->epoch_lock); e->epoch = mdev->current_epoch; atomic_inc(&e->epoch->epoch_size); atomic_inc(&e->epoch->active); spin_unlock(&mdev->epoch_lock); - dp_flags = be32_to_cpu(p->dp_flags); - rw |= write_flags_to_bio(mdev, dp_flags); - - if (dp_flags & DP_MAY_SET_IN_SYNC) - e->flags |= EE_MAY_SET_IN_SYNC; - /* I'm the receiver, I do hold a net_cnt reference. */ if (!mdev->net_conf->two_primaries) { spin_lock_irq(&mdev->req_lock); From 759fbdfba66e620aceb3e73167e6003d1b8b0b0b Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 26 Oct 2010 16:02:27 +0200 Subject: [PATCH 017/122] drbd: Track the numbers of sectors in flight Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 1 + drivers/block/drbd/drbd_main.c | 1 + drivers/block/drbd/drbd_req.c | 13 ++++++++++++- drivers/block/drbd/drbd_req.h | 12 +++++++----- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index bcba2742cfba..c804e44b9455 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1102,6 +1102,7 @@ struct drbd_conf { struct fifo_buffer rs_plan_s; /* correction values of resync planer */ int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_planed; /* resync sectors already planed */ + atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9d9c2ed31e9a..e81d009dd061 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2799,6 +2799,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) atomic_set(&mdev->pp_in_use_by_net, 0); atomic_set(&mdev->rs_sect_in, 0); atomic_set(&mdev->rs_sect_ev, 0); + atomic_set(&mdev->ap_in_flight, 0); mutex_init(&mdev->md_io_mutex); mutex_init(&mdev->data.mutex); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 08f53ce9b88f..5c60d77d447c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -558,6 +558,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, case handed_over_to_network: /* assert something? */ + if (bio_data_dir(req->master_bio) == WRITE) + atomic_add(req->size>>9, &mdev->ap_in_flight); + if (bio_data_dir(req->master_bio) == WRITE && mdev->net_conf->wire_protocol == DRBD_PROT_A) { /* this is what is dangerous about protocol A: @@ -591,6 +594,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, dec_ap_pending(mdev); req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); req->rq_state |= RQ_NET_DONE; + if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) + atomic_sub(req->size>>9, &mdev->ap_in_flight); + /* if it is still queued, we may not complete it here. * it will be canceled soon. */ if (!(req->rq_state & RQ_NET_QUEUED)) @@ -628,14 +634,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_NET_OK; D_ASSERT(req->rq_state & RQ_NET_PENDING); dec_ap_pending(mdev); + atomic_sub(req->size>>9, &mdev->ap_in_flight); req->rq_state &= ~RQ_NET_PENDING; _req_may_be_done_not_susp(req, m); break; case neg_acked: /* assert something? */ - if (req->rq_state & RQ_NET_PENDING) + if (req->rq_state & RQ_NET_PENDING) { dec_ap_pending(mdev); + atomic_sub(req->size>>9, &mdev->ap_in_flight); + } req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); req->rq_state |= RQ_NET_DONE; @@ -692,6 +701,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, } D_ASSERT(req->rq_state & RQ_NET_SENT); req->rq_state |= RQ_NET_DONE; + if (mdev->net_conf->wire_protocol == DRBD_PROT_A) + atomic_sub(req->size>>9, &mdev->ap_in_flight); _req_may_be_done(req, m); /* Allowed while state.susp */ break; diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index ab2bd09d54b4..69d350fe7c1e 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -338,19 +338,21 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) return rv; } -/* completion of master bio is outside of spinlock. - * If you need it irqsave, do it your self! - * Which means: don't use from bio endio callback. */ +/* completion of master bio is outside of our spinlock. + * We still may or may not be inside some irqs disabled section + * of the lower level driver completion callback, so we need to + * spin_lock_irqsave here. */ static inline int req_mod(struct drbd_request *req, enum drbd_req_event what) { + unsigned long flags; struct drbd_conf *mdev = req->mdev; struct bio_and_error m; int rv; - spin_lock_irq(&mdev->req_lock); + spin_lock_irqsave(&mdev->req_lock, flags); rv = __req_mod(req, what, &m); - spin_unlock_irq(&mdev->req_lock); + spin_unlock_irqrestore(&mdev->req_lock, flags); if (m.bio) complete_master_bio(mdev, &m); From 422028b1ca4c07995af82a18abced022ff4c296c Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 27 Oct 2010 11:12:07 +0200 Subject: [PATCH 018/122] drbd: New configuration parameters for dealing with network congestion net { on_congestion {block|pull-ahead|disconnect}; congestion-fill {sectors}; congestion-extents {al-extents}; } Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 7 +++++++ include/linux/drbd.h | 7 +++++++ include/linux/drbd_limits.h | 9 +++++++++ include/linux/drbd_nl.h | 3 +++ 4 files changed, 26 insertions(+) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 9e27d82a9a19..f969d8717e23 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1323,6 +1323,8 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, new_conf->wire_protocol = DRBD_PROT_C; new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; + new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; + new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { retcode = ERR_MANDATORY_TAG; @@ -1344,6 +1346,11 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } } + if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) { + retcode = ERR_CONG_NOT_PROTO_A; + goto fail; + } + if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { retcode = ERR_DISCARD; goto fail; diff --git a/include/linux/drbd.h b/include/linux/drbd.h index ef44c7a0638c..03a08baabf11 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -96,6 +96,12 @@ enum drbd_on_no_data { OND_SUSPEND_IO }; +enum drbd_on_congestion { + OC_BLOCK, + OC_PULL_AHEAD, + OC_DISCONNECT, +}; + /* KEEP the order, do not delete or insert. Only append. */ enum drbd_ret_codes { ERR_CODE_BASE = 100, @@ -146,6 +152,7 @@ enum drbd_ret_codes { ERR_PERM = 152, ERR_NEED_APV_93 = 153, ERR_STONITH_AND_PROT_A = 154, + ERR_CONG_NOT_PROTO_A = 155, /* insert new ones above this line */ AFTER_LAST_ERR_CODE diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 4ac33f34b77e..abf418724e52 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -129,6 +129,7 @@ #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT #define DRBD_ON_NO_DATA_DEF OND_IO_ERROR +#define DRBD_ON_CONGESTION_DEF OC_BLOCK #define DRBD_MAX_BIO_BVECS_MIN 0 #define DRBD_MAX_BIO_BVECS_MAX 128 @@ -154,5 +155,13 @@ #define DRBD_C_MIN_RATE_MAX (4 << 20) #define DRBD_C_MIN_RATE_DEF 4096 +#define DRBD_CONG_FILL_MIN 0 +#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */ +#define DRBD_CONG_FILL_DEF 0 + +#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN +#define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX +#define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF + #undef RANGE #endif diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index ade91107c9a5..8cde3945d1f7 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h @@ -56,6 +56,9 @@ NL_PACKET(net_conf, 5, NL_INTEGER( 39, T_MAY_IGNORE, rr_conflict) NL_INTEGER( 40, T_MAY_IGNORE, ping_timeo) NL_INTEGER( 67, T_MAY_IGNORE, rcvbuf_size) + NL_INTEGER( 81, T_MAY_IGNORE, on_congestion) + NL_INTEGER( 82, T_MAY_IGNORE, cong_fill) + NL_INTEGER( 83, T_MAY_IGNORE, cong_extents) /* 59 addr_family was available in GIT, never released */ NL_BIT( 60, T_MANDATORY, mind_af) NL_BIT( 27, T_MAY_IGNORE, want_lose) From 67531718d8f1259f01ab84c2aa25f7b03c7afd46 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 27 Oct 2010 12:21:30 +0200 Subject: [PATCH 019/122] drbd: Implemented two new connection states Ahead/Behind In this connection mode, the ahead node no longer replicates application IO. The behind's disk becomes out dated. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 2 ++ drivers/block/drbd/drbd_main.c | 12 ++++++++++-- drivers/block/drbd/drbd_receiver.c | 3 +++ drivers/block/drbd/drbd_req.c | 23 +++++++++++++++++++++++ drivers/block/drbd/drbd_strings.c | 4 +++- include/linux/drbd.h | 4 ++++ 6 files changed, 45 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index c804e44b9455..21b7439438cd 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -2217,6 +2217,8 @@ static inline int drbd_state_is_stable(union drbd_state s) case C_VERIFY_T: case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T: + case C_AHEAD: + case C_BEHIND: /* maybe stable, look at the disk state */ break; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index e81d009dd061..46f27d6c0b21 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -871,16 +871,19 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state if (ns.conn >= C_CONNECTED && ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) || - (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) { + (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T) || + ns.conn >= C_AHEAD)) { switch (ns.conn) { case C_WF_BITMAP_T: case C_PAUSED_SYNC_T: + case C_BEHIND: ns.disk = D_OUTDATED; break; case C_CONNECTED: case C_WF_BITMAP_S: case C_SYNC_SOURCE: case C_PAUSED_SYNC_S: + case C_AHEAD: ns.disk = D_UP_TO_DATE; break; case C_SYNC_TARGET: @@ -893,16 +896,18 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state } if (ns.conn >= C_CONNECTED && - (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) { + (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED || ns.conn >= C_AHEAD)) { switch (ns.conn) { case C_CONNECTED: case C_WF_BITMAP_T: case C_PAUSED_SYNC_T: case C_SYNC_TARGET: + case C_BEHIND: ns.pdsk = D_UP_TO_DATE; break; case C_WF_BITMAP_S: case C_PAUSED_SYNC_S: + case C_AHEAD: /* remap any consistent state to D_OUTDATED, * but disallow "upgrade" of not even consistent states. */ @@ -1374,6 +1379,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) drbd_send_state(mdev); + if (os.conn != C_AHEAD && ns.conn == C_AHEAD) + drbd_send_state(mdev); + /* We are in the progress to start a full sync... */ if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index f3052d871d31..b19e8b2c4ce5 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3179,6 +3179,9 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned if (ns.conn == C_WF_REPORT_PARAMS) ns.conn = C_CONNECTED; + if (peer_state.conn == C_AHEAD) + ns.conn = C_BEHIND; + if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && get_ldev_if_state(mdev, D_NEGOTIATING)) { int cr; /* consider resync */ diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 5c60d77d447c..60288fb3c4d7 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -948,6 +948,29 @@ allocate_barrier: ? queue_for_net_write : queue_for_net_read); } + + if (remote && mdev->net_conf->on_congestion != OC_BLOCK) { + int congested = 0; + + if (mdev->net_conf->cong_fill && + atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { + dev_info(DEV, "Congestion-fill threshold reached\n"); + congested = 1; + } + + if (mdev->act_log->used >= mdev->net_conf->cong_extents) { + dev_info(DEV, "Congestion-extents threshold reached\n"); + congested = 1; + } + + if (congested) { + if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) + _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); + else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ + _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); + } + } + spin_unlock_irq(&mdev->req_lock); kfree(b); /* if someone else has beaten us to it... */ diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c index 85179e1fb50a..5b970adc3b6f 100644 --- a/drivers/block/drbd/drbd_strings.c +++ b/drivers/block/drbd/drbd_strings.c @@ -48,6 +48,8 @@ static const char *drbd_conn_s_names[] = { [C_PAUSED_SYNC_T] = "PausedSyncT", [C_VERIFY_S] = "VerifyS", [C_VERIFY_T] = "VerifyT", + [C_AHEAD] = "Ahead", + [C_BEHIND] = "Behind", }; static const char *drbd_role_s_names[] = { @@ -92,7 +94,7 @@ static const char *drbd_state_sw_errors[] = { const char *drbd_conn_str(enum drbd_conns s) { /* enums are unsigned... */ - return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s]; + return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s]; } const char *drbd_role_str(enum drbd_role s) diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 03a08baabf11..23f31be6f00d 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -206,6 +206,10 @@ enum drbd_conns { C_VERIFY_T, C_PAUSED_SYNC_S, C_PAUSED_SYNC_T, + + C_AHEAD, + C_BEHIND, + C_MASK = 31 }; From 73a01a18b9c28a0fab1131ece5b0a9bc00a879b8 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 27 Oct 2010 14:33:00 +0200 Subject: [PATCH 020/122] drbd: New packet for Ahead/Behind mode: P_OUT_OF_SYNC Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 10 ++++--- drivers/block/drbd/drbd_int.h | 14 +++++++++- drivers/block/drbd/drbd_main.c | 10 +++++++ drivers/block/drbd/drbd_receiver.c | 10 +++++++ drivers/block/drbd/drbd_req.c | 44 ++++++++++++++++++++++-------- drivers/block/drbd/drbd_req.h | 4 ++- drivers/block/drbd/drbd_worker.c | 16 +++++++++++ include/linux/drbd.h | 2 +- 8 files changed, 91 insertions(+), 19 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index b4adb58c7472..33f6cc537d08 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -1007,22 +1007,22 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, * called by tl_clear and drbd_send_dblock (==drbd_make_request). * so this can be _any_ process. */ -void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, +int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line) { unsigned long sbnr, ebnr, lbnr, flags; sector_t esector, nr_sectors; - unsigned int enr, count; + unsigned int enr, count = 0; struct lc_element *e; if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "sector: %llus, size: %d\n", (unsigned long long)sector, size); - return; + return 0; } if (!get_ldev(mdev)) - return; /* no disk, no metadata, no bitmap to set bits in */ + return 0; /* no disk, no metadata, no bitmap to set bits in */ nr_sectors = drbd_get_capacity(mdev->this_bdev); esector = sector + (size >> 9) - 1; @@ -1052,6 +1052,8 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, out: put_ldev(mdev); + + return count; } static diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 21b7439438cd..471331236826 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -212,6 +212,7 @@ enum drbd_packets { /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ + P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */ P_MAX_CMD = 0x28, P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ @@ -269,6 +270,7 @@ static inline const char *cmdname(enum drbd_packets cmd) [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", [P_COMPRESSED_BITMAP] = "CBitmap", [P_DELAY_PROBE] = "DelayProbe", + [P_OUT_OF_SYNC] = "OutOfSync", [P_MAX_CMD] = NULL, }; @@ -550,6 +552,13 @@ struct p_discard { u32 pad; } __packed; +struct p_block_desc { + struct p_header80 head; + u64 sector; + u32 blksize; + u32 pad; /* to multiple of 8 Byte */ +} __packed; + /* Valid values for the encoding field. * Bump proto version when changing this. */ enum drbd_bitmap_code { @@ -647,6 +656,7 @@ union p_polymorph { struct p_block_req block_req; struct p_delay_probe93 delay_probe93; struct p_rs_uuid rs_uuid; + struct p_block_desc block_desc; } __packed; /**********************************************************************/ @@ -1221,6 +1231,7 @@ extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, struct p_data *dp, int data_size); extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, sector_t sector, int blksize, u64 block_id); +extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req); extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, struct drbd_epoch_entry *e); extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); @@ -1534,6 +1545,7 @@ extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); +extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int); extern void resync_timer_fn(unsigned long data); @@ -1626,7 +1638,7 @@ extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line); #define drbd_set_in_sync(mdev, sector, size) \ __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) -extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, +extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line); #define drbd_set_out_of_sync(mdev, sector, size) \ __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 46f27d6c0b21..0dc93f43a476 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2634,6 +2634,16 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, return ok; } +int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req) +{ + struct p_block_desc p; + + p.sector = cpu_to_be64(req->sector); + p.blksize = cpu_to_be32(req->size); + + return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p)); +} + /* drbd_send distinguishes two cases: diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index b19e8b2c4ce5..04a08e7541cc 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3562,6 +3562,15 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u return TRUE; } +static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) +{ + struct p_block_desc *p = &mdev->data.rbuf.block_desc; + + drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); + + return TRUE; +} + typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); struct data_cmd { @@ -3592,6 +3601,7 @@ static struct data_cmd drbd_cmd_handler[] = { [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, + [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, /* anything missing from this table is in * the asender_tbl, see get_asender_cmd */ [P_MAX_CMD] = { 0, 0, NULL }, diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 60288fb3c4d7..a8d1ff2bda27 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -142,7 +142,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, /* before we can signal completion to the upper layers, * we may need to close the current epoch */ - if (mdev->state.conn >= C_CONNECTED && + if (mdev->state.conn >= C_CONNECTED && mdev->state.conn < C_AHEAD && req->epoch == mdev->newest_tle->br_number) queue_barrier(mdev); @@ -545,6 +545,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; + case queue_for_send_oos: + req->rq_state |= RQ_NET_QUEUED; + req->w.cb = w_send_oos; + drbd_queue_work(&mdev->data.work, &req->w); + break; + + case oos_handed_to_network: + /* actually the same */ case send_canceled: /* treat it the same */ case send_failed: @@ -756,7 +764,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) const sector_t sector = bio->bi_sector; struct drbd_tl_epoch *b = NULL; struct drbd_request *req; - int local, remote; + int local, remote, send_oos = 0; int err = -EIO; int ret = 0; @@ -820,8 +828,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) } remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || - (mdev->state.pdsk == D_INCONSISTENT && - mdev->state.conn >= C_CONNECTED)); + (mdev->state.pdsk >= D_INCONSISTENT && + mdev->state.conn >= C_CONNECTED && + mdev->state.conn < C_AHEAD)); + send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD && + mdev->state.pdsk >= D_INCONSISTENT); if (!(local || remote) && !is_susp(mdev->state)) { if (__ratelimit(&drbd_ratelimit_state)) @@ -835,7 +846,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) * but there is a race between testing the bit and pointer outside the * spinlock, and grabbing the spinlock. * if we lost that race, we retry. */ - if (rw == WRITE && remote && + if (rw == WRITE && (remote || send_oos) && mdev->unused_spare_tle == NULL && test_bit(CREATE_BARRIER, &mdev->flags)) { allocate_barrier: @@ -860,11 +871,15 @@ allocate_barrier: goto fail_free_complete; } - if (remote) { + if (remote || send_oos) { remote = (mdev->state.pdsk == D_UP_TO_DATE || - (mdev->state.pdsk == D_INCONSISTENT && - mdev->state.conn >= C_CONNECTED)); - if (!remote) + (mdev->state.pdsk >= D_INCONSISTENT && + mdev->state.conn >= C_CONNECTED && + mdev->state.conn < C_AHEAD)); + send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD && + mdev->state.pdsk >= D_INCONSISTENT); + + if (!(remote || send_oos)) dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); if (!(local || remote)) { dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); @@ -877,7 +892,7 @@ allocate_barrier: mdev->unused_spare_tle = b; b = NULL; } - if (rw == WRITE && remote && + if (rw == WRITE && (remote || send_oos) && mdev->unused_spare_tle == NULL && test_bit(CREATE_BARRIER, &mdev->flags)) { /* someone closed the current epoch @@ -900,7 +915,7 @@ allocate_barrier: * barrier packet. To get the write ordering right, we only have to * make sure that, if this is a write request and it triggered a * barrier packet, this request is queued within the same spinlock. */ - if (remote && mdev->unused_spare_tle && + if ((remote || send_oos) && mdev->unused_spare_tle && test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { _tl_add_barrier(mdev, mdev->unused_spare_tle); mdev->unused_spare_tle = NULL; @@ -948,8 +963,11 @@ allocate_barrier: ? queue_for_net_write : queue_for_net_read); } + if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) + _req_mod(req, queue_for_send_oos); - if (remote && mdev->net_conf->on_congestion != OC_BLOCK) { + if (remote && + mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { int congested = 0; if (mdev->net_conf->cong_fill && @@ -964,6 +982,8 @@ allocate_barrier: } if (congested) { + queue_barrier(mdev); + if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 69d350fe7c1e..40d3dcd8fc81 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -82,14 +82,16 @@ enum drbd_req_event { to_be_submitted, /* XXX yes, now I am inconsistent... - * these two are not "events" but "actions" + * these are not "events" but "actions" * oh, well... */ queue_for_net_write, queue_for_net_read, + queue_for_send_oos, send_canceled, send_failed, handed_over_to_network, + oos_handed_to_network, connection_lost_while_pending, read_retry_remote_canceled, recv_acked_by_peer, diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 782d87237cb4..67499077c482 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1237,6 +1237,22 @@ int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel) return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); } +int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel) +{ + struct drbd_request *req = container_of(w, struct drbd_request, w); + int ok; + + if (unlikely(cancel)) { + req_mod(req, send_canceled); + return 1; + } + + ok = drbd_send_oos(mdev, req); + req_mod(req, oos_handed_to_network); + + return ok; +} + /** * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request * @mdev: DRBD device. diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 23f31be6f00d..41da654cc0b1 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -56,7 +56,7 @@ extern const char *drbd_buildtag(void); #define REL_VERSION "8.3.9" #define API_VERSION 88 #define PRO_VERSION_MIN 86 -#define PRO_VERSION_MAX 95 +#define PRO_VERSION_MAX 96 enum drbd_io_error_p { From c4752ef1284519c3baa1c3b19df34a80b4905245 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 27 Oct 2010 17:32:36 +0200 Subject: [PATCH 021/122] drbd: When proxy's buffer drained off go into regular resync mode Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 4 +++- drivers/block/drbd/drbd_main.c | 1 + drivers/block/drbd/drbd_receiver.c | 9 +++++++++ drivers/block/drbd/drbd_worker.c | 10 +++++++++- 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 471331236826..da02cce374c9 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -973,7 +973,8 @@ struct drbd_conf { struct drbd_work resync_work, unplug_work, go_diskless, - md_sync_work; + md_sync_work, + start_resync_work; struct timer_list resync_timer; struct timer_list md_sync_timer; #ifdef DRBD_DEBUG_MD_SYNC @@ -1546,6 +1547,7 @@ extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int); +extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int); extern void resync_timer_fn(unsigned long data); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 0dc93f43a476..f49505cf8d0f 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2846,6 +2846,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) INIT_LIST_HEAD(&mdev->unplug_work.list); INIT_LIST_HEAD(&mdev->go_diskless.list); INIT_LIST_HEAD(&mdev->md_sync_work.list); + INIT_LIST_HEAD(&mdev->start_resync_work.list); INIT_LIST_HEAD(&mdev->bm_io_work.w.list); mdev->resync_work.cb = w_resync_inactive; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 04a08e7541cc..ee9238e59327 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3279,6 +3279,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi wait_event(mdev->misc_wait, mdev->state.conn == C_WF_SYNC_UUID || + mdev->state.conn == C_BEHIND || mdev->state.conn < C_CONNECTED || mdev->state.disk < D_NEGOTIATING); @@ -4338,6 +4339,14 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); + if (mdev->state.conn == C_AHEAD && + atomic_read(&mdev->ap_in_flight) == 0 && + list_empty(&mdev->start_resync_work.list)) { + struct drbd_work *w = &mdev->start_resync_work; + w->cb = w_start_resync; + drbd_queue_work_front(&mdev->data.work, w); + } + return TRUE; } diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 67499077c482..c9e7cb7c788a 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -742,6 +742,14 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca return 1; } + +int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) +{ + drbd_start_resync(mdev, C_SYNC_SOURCE); + + return 1; +} + int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { kfree(w); @@ -1472,7 +1480,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) union drbd_state ns; int r; - if (mdev->state.conn >= C_SYNC_SOURCE) { + if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) { dev_err(DEV, "Resync already running!\n"); return; } From 59817f4fab6a165ba83ce399464ba38432db8233 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Fri, 29 Oct 2010 12:44:20 +0200 Subject: [PATCH 022/122] drbd: Do not cleanup resync LRU for the Ahead/Behind SyncSource/SyncTarget transitions This one should be replaced with moving this cleanup to the 'right' position. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index c9e7cb7c788a..982d68432a0f 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1485,8 +1485,13 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) return; } - /* In case a previous resync run was aborted by an IO error/detach on the peer. */ - drbd_rs_cancel_all(mdev); + if (mdev->state.conn < C_AHEAD) { + /* In case a previous resync run was aborted by an IO error/detach on the peer. */ + drbd_rs_cancel_all(mdev); + /* This should be done when we abort the resync. We definitely do not + want to have this for connections going back and forth between + Ahead/Behind and SyncSource/SyncTarget */ + } if (side == C_SYNC_TARGET) { /* Since application IO was locked out during C_WF_BITMAP_T and From e3555d8545976703938d1b59e2db509426dbe02c Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Sun, 7 Nov 2010 15:56:29 +0100 Subject: [PATCH 023/122] drbd: Implemented priority inheritance for resync requests We only issue resync requests if there is no significant application IO going on. = Application IO has higher priority than resnyc IO. If application IO can not be started because the resync process locked an resync_lru entry, start the IO operations necessary to release the lock ASAP. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 4 ++-- drivers/block/drbd/drbd_int.h | 3 ++- drivers/block/drbd/drbd_receiver.c | 22 ++++++++++++++++++---- drivers/block/drbd/drbd_worker.c | 18 +++++------------- 4 files changed, 27 insertions(+), 20 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 33f6cc537d08..28f85d950781 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -182,6 +182,7 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) if (unlikely(tmp != NULL)) { struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { + set_bit(BME_PRIORITY, &bm_ext->flags); spin_unlock_irq(&mdev->al_lock); return NULL; } @@ -1297,8 +1298,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) } if (lc_put(mdev->resync, &bm_ext->lce) == 0) { - clear_bit(BME_LOCKED, &bm_ext->flags); - clear_bit(BME_NO_WRITES, &bm_ext->flags); + bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */ mdev->resync_locked--; wake_up(&mdev->al_wait); } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index da02cce374c9..366873d661b8 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1326,6 +1326,7 @@ struct bm_extent { #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ +#define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ /* drbd_bitmap.c */ /* @@ -1552,7 +1553,7 @@ extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int); extern void resync_timer_fn(unsigned long data); /* drbd_receiver.c */ -extern int drbd_rs_should_slow_down(struct drbd_conf *mdev); +extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, const unsigned rw, const int fault_type); extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index ee9238e59327..0630a2e122d3 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1862,10 +1862,11 @@ out_interrupted: * The current sync rate used here uses only the most recent two step marks, * to have a short time average so we can react faster. */ -int drbd_rs_should_slow_down(struct drbd_conf *mdev) +int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) { struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; unsigned long db, dt, dbdt; + struct lc_element *tmp; int curr_events; int throttle = 0; @@ -1873,9 +1874,22 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) if (mdev->sync_conf.c_min_rate == 0) return 0; + spin_lock_irq(&mdev->al_lock); + tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); + if (tmp) { + struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); + if (test_bit(BME_PRIORITY, &bm_ext->flags)) { + spin_unlock_irq(&mdev->al_lock); + return 0; + } + /* Do not slow down if app IO is already waiting for this extent */ + } + spin_unlock_irq(&mdev->al_lock); + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - atomic_read(&mdev->rs_sect_ev); + if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { unsigned long rs_left; int i; @@ -2060,9 +2074,9 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un * we would also throttle its application reads. * In that case, throttling is done on the SyncTarget only. */ - if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev)) - msleep(100); - if (drbd_rs_begin_io(mdev, e->sector)) + if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) + schedule_timeout_uninterruptible(HZ/10); + if (drbd_rs_begin_io(mdev, sector)) goto out_free_e; submit_for_resync: diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 982d68432a0f..4008130f2b2c 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -355,7 +355,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) if (!get_ldev(mdev)) return -EIO; - if (drbd_rs_should_slow_down(mdev)) + if (drbd_rs_should_slow_down(mdev, sector)) goto defer; /* GFP_TRY, because if there is no memory available right now, this may @@ -503,16 +503,6 @@ int drbd_rs_number_requests(struct drbd_conf *mdev) number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); } - /* Throttle resync on lower level disk activity, which may also be - * caused by application IO on Primary/SyncTarget. - * Keep this after the call to drbd_rs_controller, as that assumes - * to be called as precisely as possible every SLEEP_TIME, - * and would be confused otherwise. */ - if (number && drbd_rs_should_slow_down(mdev)) { - mdev->c_sync_rate = 1; - number = 0; - } - /* ignore the amount of pending requests, the resync controller should * throttle down to incoming reply rate soon enough anyways. */ return number; @@ -594,7 +584,8 @@ next_sector: sector = BM_BIT_TO_SECT(bit); - if (drbd_try_rs_begin_io(mdev, sector)) { + if (drbd_rs_should_slow_down(mdev, sector) || + drbd_try_rs_begin_io(mdev, sector)) { mdev->bm_resync_fo = bit; goto requeue; } @@ -719,7 +710,8 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca size = BM_BLOCK_SIZE; - if (drbd_try_rs_begin_io(mdev, sector)) { + if (drbd_rs_should_slow_down(mdev, sector) || + drbd_try_rs_begin_io(mdev, sector)) { mdev->ov_position = sector; goto requeue; } From 9d77a5fee9d2a1ea4cd9a841d27b107df5913b33 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Sun, 7 Nov 2010 18:02:56 +0100 Subject: [PATCH 024/122] drbd: Make some functions static Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 1 - drivers/block/drbd/drbd_worker.c | 10 ++++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 366873d661b8..77ac6765fd57 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1540,7 +1540,6 @@ extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); -extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 4008130f2b2c..ab5cff502f5b 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -39,6 +39,8 @@ #include "drbd_req.h" static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); +static int w_make_resync_request(struct drbd_conf *mdev, + struct drbd_work *w, int cancel); @@ -438,7 +440,7 @@ static void fifo_add_val(struct fifo_buffer *fb, int value) fb->values[i] += value; } -int drbd_rs_controller(struct drbd_conf *mdev) +static int drbd_rs_controller(struct drbd_conf *mdev) { unsigned int sect_in; /* Number of sectors that came in since the last turn */ unsigned int want; /* The number of sectors we want in the proxy */ @@ -492,7 +494,7 @@ int drbd_rs_controller(struct drbd_conf *mdev) return req_sect; } -int drbd_rs_number_requests(struct drbd_conf *mdev) +static int drbd_rs_number_requests(struct drbd_conf *mdev) { int number; if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ @@ -508,8 +510,8 @@ int drbd_rs_number_requests(struct drbd_conf *mdev) return number; } -int w_make_resync_request(struct drbd_conf *mdev, - struct drbd_work *w, int cancel) +static int w_make_resync_request(struct drbd_conf *mdev, + struct drbd_work *w, int cancel) { unsigned long bit; sector_t sector; From f91ab6282df251d28aa4ad1bd85194ebe0a0115b Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 9 Nov 2010 13:59:41 +0100 Subject: [PATCH 025/122] drbd: Implemented side-stepping in drbd_res_begin_io() Before: drbd_rs_begin_io() locked app-IO out of an RS extent, and waited then until all previous app-IO in that area finished. (But not only until the disk-IO was finished but until the barrier/epoch ack came in for that == round trip time latency ++) After: As soon as a new app-IO waits wants to start new IO on that RS extent, drbd_rs_begin_io() steps aside (clearing the BME_NO_WRITES flag again). It retries after 100ms. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 28f85d950781..5570d9bdc863 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -176,14 +176,17 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) struct lc_element *al_ext; struct lc_element *tmp; unsigned long al_flags = 0; + int wake; spin_lock_irq(&mdev->al_lock); tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); if (unlikely(tmp != NULL)) { struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { - set_bit(BME_PRIORITY, &bm_ext->flags); + wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags); spin_unlock_irq(&mdev->al_lock); + if (wake) + wake_up(&mdev->al_wait); return NULL; } } @@ -1135,7 +1138,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) unsigned int enr = BM_SECT_TO_EXT(sector); struct bm_extent *bm_ext; int i, sig; + int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait. + 200 times -> 20 seconds. */ +retry: sig = wait_event_interruptible(mdev->al_wait, (bm_ext = _bme_get(mdev, enr))); if (sig) @@ -1146,16 +1152,24 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { sig = wait_event_interruptible(mdev->al_wait, - !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); - if (sig) { + !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) || + (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)); + + if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { spin_lock_irq(&mdev->al_lock); if (lc_put(mdev->resync, &bm_ext->lce) == 0) { - clear_bit(BME_NO_WRITES, &bm_ext->flags); + bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ mdev->resync_locked--; wake_up(&mdev->al_wait); } spin_unlock_irq(&mdev->al_lock); - return -EINTR; + if (sig) + return -EINTR; + if (schedule_timeout_interruptible(HZ/10)) + return -EINTR; + if (--sa == 0) + dev_warn(DEV,"drbd_rs_begin_io() no longer stepping aside.\n"); + goto retry; } } set_bit(BME_LOCKED, &bm_ext->flags); From aeda1cd6a58009ef21b6d9c09b1a3ded726c2d10 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 9 Nov 2010 17:45:06 +0100 Subject: [PATCH 026/122] drbd: Begin to account BIO processing time before inc_ap_bio() Since inc_ap_bio() might sleep already Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_req.c | 12 ++++++++---- drivers/block/drbd/drbd_req.h | 1 - 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index a8d1ff2bda27..eff0fbf69dd4 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -757,7 +757,7 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); } -static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) +static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) { const int rw = bio_rw(bio); const int size = bio->bi_size; @@ -778,6 +778,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) bio_endio(bio, -ENOMEM); return 0; } + req->start_time = start_time; local = get_ldev(mdev); if (!local) { @@ -1076,12 +1077,15 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) { unsigned int s_enr, e_enr; struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; + unsigned long start_time; if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { bio_endio(bio, -EPERM); return 0; } + start_time = jiffies; + /* * what we "blindly" assume: */ @@ -1096,7 +1100,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) if (likely(s_enr == e_enr)) { inc_ap_bio(mdev, 1); - return drbd_make_request_common(mdev, bio); + return drbd_make_request_common(mdev, bio, start_time); } /* can this bio be split generically? @@ -1138,10 +1142,10 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) D_ASSERT(e_enr == s_enr + 1); - while (drbd_make_request_common(mdev, &bp->bio1)) + while (drbd_make_request_common(mdev, &bp->bio1, start_time)) inc_ap_bio(mdev, 1); - while (drbd_make_request_common(mdev, &bp->bio2)) + while (drbd_make_request_common(mdev, &bp->bio2, start_time)) inc_ap_bio(mdev, 1); dec_ap_bio(mdev); diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 40d3dcd8fc81..077c47b1e9d7 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -291,7 +291,6 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, req->epoch = 0; req->sector = bio_src->bi_sector; req->size = bio_src->bi_size; - req->start_time = jiffies; INIT_HLIST_NODE(&req->colision); INIT_LIST_HEAD(&req->tl_requests); INIT_LIST_HEAD(&req->w.list); From 127b317844e7cc0458743b604998bece95eab030 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 16 Nov 2010 10:07:53 +0100 Subject: [PATCH 027/122] drbd: use test_and_set_bit() to decide if bm_io_work should be queued Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f49505cf8d0f..843b90966920 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3772,6 +3772,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) drbd_bm_unlock(mdev); clear_bit(BITMAP_IO, &mdev->flags); + smp_mb__after_clear_bit(); wake_up(&mdev->misc_wait); if (work->done) @@ -3850,11 +3851,8 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, set_bit(BITMAP_IO, &mdev->flags); if (atomic_read(&mdev->ap_bio_cnt) == 0) { - if (list_empty(&mdev->bm_io_work.w.list)) { - set_bit(BITMAP_IO_QUEUED, &mdev->flags); + if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); - } else - dev_err(DEV, "FIXME avoided double queuing bm_io_work\n"); } } From 8869d683b7491467fd39fcbe79756fce3e6f35e7 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 17 Nov 2010 18:24:19 +0100 Subject: [PATCH 028/122] drbd: Fixed inc_ap_bio() The condition must be checked after perpare_to_wait(). The old implementaion could loose wakeup events. Never observed in real life. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 77ac6765fd57..9a944604939f 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -2309,15 +2309,21 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) return 1; } -/* I'd like to use wait_event_lock_irq, - * but I'm not sure when it got introduced, - * and not sure when it has 3 or 4 arguments */ +static inline int _inc_ap_bio_cond(struct drbd_conf *mdev, int count) +{ + int rv = 0; + + spin_lock_irq(&mdev->req_lock); + rv = __inc_ap_bio_cond(mdev); + if (rv) + atomic_add(count, &mdev->ap_bio_cnt); + spin_unlock_irq(&mdev->req_lock); + + return rv; +} + static inline void inc_ap_bio(struct drbd_conf *mdev, int count) { - /* compare with after_state_ch, - * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ - DEFINE_WAIT(wait); - /* we wait here * as long as the device is suspended * until the bitmap is no longer on the fly during connection @@ -2326,16 +2332,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count) * to avoid races with the reconnect code, * we need to atomic_inc within the spinlock. */ - spin_lock_irq(&mdev->req_lock); - while (!__inc_ap_bio_cond(mdev)) { - prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&mdev->req_lock); - schedule(); - finish_wait(&mdev->misc_wait, &wait); - spin_lock_irq(&mdev->req_lock); - } - atomic_add(count, &mdev->ap_bio_cnt); - spin_unlock_irq(&mdev->req_lock); + wait_event(mdev->misc_wait, _inc_ap_bio_cond(mdev, count)); } static inline void dec_ap_bio(struct drbd_conf *mdev) From 22afd7ee94c1c5857323b677267ba8bace09bcef Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 16 Nov 2010 15:30:44 +0100 Subject: [PATCH 029/122] drbd: Fixed race condition in drbd_queue_bitmap_io May only test for ap_bio_cnt == 0 under req_lock. It can increase only under req_lock. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 843b90966920..975dc5a66549 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3849,11 +3849,13 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, mdev->bm_io_work.done = done; mdev->bm_io_work.why = why; + spin_lock_irq(&mdev->req_lock); set_bit(BITMAP_IO, &mdev->flags); if (atomic_read(&mdev->ap_bio_cnt) == 0) { if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); } + spin_unlock_irq(&mdev->req_lock); } /** From ab17b68f4579b460753a416b0afc4446381d876f Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 17 Nov 2010 16:54:36 +0100 Subject: [PATCH 030/122] drbd: Improvements in sanitize_state() The relevant change is that the state change to C_FW_BITMAP_S should implicitly change pdsk to C_CONSISTENT. (Think of it as C_OUTDATED, only without the guarantee that the peer has the outdated written to its meta data) At that opportunity I restructured the switch statement so that it gets evaluated every time. (Has declarative character) Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 144 ++++++++++++++++++++------------- 1 file changed, 89 insertions(+), 55 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 975dc5a66549..74a6d55259af 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -817,6 +817,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state union drbd_state ns, const char **warn_sync_abort) { enum drbd_fencing_p fp; + enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max; fp = FP_DONT_CARE; if (get_ldev(mdev)) { @@ -869,61 +870,6 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns.conn = C_CONNECTED; } - if (ns.conn >= C_CONNECTED && - ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) || - (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T) || - ns.conn >= C_AHEAD)) { - switch (ns.conn) { - case C_WF_BITMAP_T: - case C_PAUSED_SYNC_T: - case C_BEHIND: - ns.disk = D_OUTDATED; - break; - case C_CONNECTED: - case C_WF_BITMAP_S: - case C_SYNC_SOURCE: - case C_PAUSED_SYNC_S: - case C_AHEAD: - ns.disk = D_UP_TO_DATE; - break; - case C_SYNC_TARGET: - ns.disk = D_INCONSISTENT; - dev_warn(DEV, "Implicitly set disk state Inconsistent!\n"); - break; - } - if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE) - dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n"); - } - - if (ns.conn >= C_CONNECTED && - (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED || ns.conn >= C_AHEAD)) { - switch (ns.conn) { - case C_CONNECTED: - case C_WF_BITMAP_T: - case C_PAUSED_SYNC_T: - case C_SYNC_TARGET: - case C_BEHIND: - ns.pdsk = D_UP_TO_DATE; - break; - case C_WF_BITMAP_S: - case C_PAUSED_SYNC_S: - case C_AHEAD: - /* remap any consistent state to D_OUTDATED, - * but disallow "upgrade" of not even consistent states. - */ - ns.pdsk = - (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED) - ? os.pdsk : D_OUTDATED; - break; - case C_SYNC_SOURCE: - ns.pdsk = D_INCONSISTENT; - dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n"); - break; - } - if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE) - dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n"); - } - /* Connection breaks down before we finished "Negotiating" */ if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && get_ldev_if_state(mdev, D_NEGOTIATING)) { @@ -938,6 +884,94 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state put_ldev(mdev); } + /* D_CONSISTENT and D_OUTDATED vanish when we get connected */ + if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) { + if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) + ns.disk = D_UP_TO_DATE; + if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED) + ns.pdsk = D_UP_TO_DATE; + } + + /* Implications of the connection stat on the disk states */ + disk_min = D_DISKLESS; + disk_max = D_UP_TO_DATE; + pdsk_min = D_INCONSISTENT; + pdsk_max = D_UNKNOWN; + switch ((enum drbd_conns)ns.conn) { + case C_WF_BITMAP_T: + case C_PAUSED_SYNC_T: + case C_STARTING_SYNC_T: + case C_WF_SYNC_UUID: + case C_BEHIND: + disk_min = D_INCONSISTENT; + disk_max = D_OUTDATED; + pdsk_min = D_UP_TO_DATE; + pdsk_max = D_UP_TO_DATE; + break; + case C_VERIFY_S: + case C_VERIFY_T: + disk_min = D_UP_TO_DATE; + disk_max = D_UP_TO_DATE; + pdsk_min = D_UP_TO_DATE; + pdsk_max = D_UP_TO_DATE; + break; + case C_CONNECTED: + disk_min = D_DISKLESS; + disk_max = D_UP_TO_DATE; + pdsk_min = D_DISKLESS; + pdsk_max = D_UP_TO_DATE; + break; + case C_WF_BITMAP_S: + case C_PAUSED_SYNC_S: + case C_STARTING_SYNC_S: + case C_AHEAD: + disk_min = D_UP_TO_DATE; + disk_max = D_UP_TO_DATE; + pdsk_min = D_INCONSISTENT; + pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/ + break; + case C_SYNC_TARGET: + disk_min = D_INCONSISTENT; + disk_max = D_INCONSISTENT; + pdsk_min = D_UP_TO_DATE; + pdsk_max = D_UP_TO_DATE; + break; + case C_SYNC_SOURCE: + disk_min = D_UP_TO_DATE; + disk_max = D_UP_TO_DATE; + pdsk_min = D_INCONSISTENT; + pdsk_max = D_INCONSISTENT; + break; + case C_STANDALONE: + case C_DISCONNECTING: + case C_UNCONNECTED: + case C_TIMEOUT: + case C_BROKEN_PIPE: + case C_NETWORK_FAILURE: + case C_PROTOCOL_ERROR: + case C_TEAR_DOWN: + case C_WF_CONNECTION: + case C_WF_REPORT_PARAMS: + case C_MASK: + break; + } + if (ns.disk > disk_max) + ns.disk = disk_max; + + if (ns.disk < disk_min) { + dev_warn(DEV, "Implicitly set disk from %s to %s\n", + drbd_disk_str(ns.disk), drbd_disk_str(disk_min)); + ns.disk = disk_min; + } + if (ns.pdsk > pdsk_max) + ns.pdsk = pdsk_max; + + if (ns.pdsk < pdsk_min) { + dev_warn(DEV, "Implicitly set pdsk from %s to %s\n", + drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min)); + ns.pdsk = pdsk_min; + } + if (fp == FP_STONITH && (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) From 3719094ec2dec411b3151f10048316d787e086f9 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 10 Nov 2010 12:08:37 +0100 Subject: [PATCH 031/122] drbd: Starting with protocol 96 we can allow app-IO while receiving the bitmap * C_STARTING_SYNC_S, C_STARTING_SYNC_T In these states the bitmap gets written to disk. Locking out of app-IO is done by using the drbd_queue_bitmap_io() and drbd_bitmap_io() functions these days. It is no longer necessary to lock out app-IO based on the connection state. App-IO that may come in after the BITMAP_IO flag got cleared before the state transition to C_SYNC_(SOURCE|TARGET) does not get mirrored, sets a bit in the local bitmap, that is already set, therefore changes nothing. * C_WF_BITMAP_S In this state we send updates (P_OUT_OF_SYNC packets). With that we make sure they have the same number of bits when going into the C_SYNC_(SOURCE|TARGET) connection state. * C_UNCONNECTED: The receiver starts, no need to lock out IO. * C_DISCONNECTING: in drbd_disconnect() we had a wait_event() to wait until ap_bio_cnt reaches 0. Removed that. * C_TIMEOUT, C_BROKEN_PIPE, C_NETWORK_FAILURE C_PROTOCOL_ERROR, C_TEAR_DOWN: Same as C_DISCONNECTING * C_WF_REPORT_PARAMS: IO still possible since that is still like C_WF_CONNECTION. And we do not need to send barriers in C_WF_BITMAP_S connection state. Allow concurrent accesses to the bitmap when receiving the bitmap. Everything gets ORed anyways. A drbd_free_tl_hash() is in after_state_chg_work(). At that point all the work items of the last connections must have been processed. Introduced a call to drbd_free_tl_hash() into drbd_free_mdev() for paranoia reasons. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 19 +++++++++------ drivers/block/drbd/drbd_main.c | 1 + drivers/block/drbd/drbd_receiver.c | 13 ++--------- drivers/block/drbd/drbd_req.c | 37 +++++++++++++++++++----------- 4 files changed, 39 insertions(+), 31 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 9a944604939f..38bbaba8bd89 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -2213,8 +2213,9 @@ static inline int drbd_get_max_buffers(struct drbd_conf *mdev) return mxb; } -static inline int drbd_state_is_stable(union drbd_state s) +static inline int drbd_state_is_stable(struct drbd_conf *mdev) { + union drbd_state s = mdev->state; /* DO NOT add a default clause, we want the compiler to warn us * for any newly introduced state we may have forgotten to add here */ @@ -2233,11 +2234,7 @@ static inline int drbd_state_is_stable(union drbd_state s) case C_PAUSED_SYNC_T: case C_AHEAD: case C_BEHIND: - /* maybe stable, look at the disk state */ - break; - - /* no new io accepted during tansitional states - * like handshake or teardown */ + /* transitional states, IO allowed */ case C_DISCONNECTING: case C_UNCONNECTED: case C_TIMEOUT: @@ -2248,7 +2245,15 @@ static inline int drbd_state_is_stable(union drbd_state s) case C_WF_REPORT_PARAMS: case C_STARTING_SYNC_S: case C_STARTING_SYNC_T: + break; + + /* Allow IO in BM exchange states with new protocols */ case C_WF_BITMAP_S: + if (mdev->agreed_pro_version < 96) + return 0; + break; + + /* no new io accepted in these states */ case C_WF_BITMAP_T: case C_WF_SYNC_UUID: case C_MASK: @@ -2297,7 +2302,7 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) * to start during "stable" states. */ /* no new io accepted when attaching or detaching the disk */ - if (!drbd_state_is_stable(mdev->state)) + if (!drbd_state_is_stable(mdev)) return 0; /* since some older kernels don't have atomic_add_unless, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 74a6d55259af..14afbd4e53a5 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3334,6 +3334,7 @@ void drbd_free_mdev(struct drbd_conf *mdev) put_disk(mdev->vdisk); blk_cleanup_queue(mdev->rq_queue); free_cpumask_var(mdev->cpu_mask); + drbd_free_tl_hash(mdev); kfree(mdev); } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 0630a2e122d3..f4aba9f894ba 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3468,9 +3468,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne int ok = FALSE; struct p_header80 *h = &mdev->data.rbuf.header.h80; - wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); - - drbd_bm_lock(mdev, "receive bitmap"); + /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */ /* maybe we should use some per thread scratch page, * and allocate that during initial device creation? */ @@ -3542,7 +3540,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne ok = TRUE; out: - drbd_bm_unlock(mdev); + /* drbd_bm_unlock(mdev); by intention no lock */ if (ok && mdev->state.conn == C_WF_BITMAP_S) drbd_start_resync(mdev, C_SYNC_SOURCE); free_page((unsigned long) buffer); @@ -3804,13 +3802,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) if (os.conn == C_DISCONNECTING) { wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); - if (!is_susp(mdev->state)) { - /* we must not free the tl_hash - * while application io is still on the fly */ - wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); - drbd_free_tl_hash(mdev); - } - crypto_free_hash(mdev->cram_hmac_tfm); mdev->cram_hmac_tfm = NULL; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index eff0fbf69dd4..4cb8247d83c9 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -142,7 +142,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, /* before we can signal completion to the upper layers, * we may need to close the current epoch */ - if (mdev->state.conn >= C_CONNECTED && mdev->state.conn < C_AHEAD && + if (mdev->state.conn >= C_WF_BITMAP_T && mdev->state.conn < C_AHEAD && req->epoch == mdev->newest_tle->br_number) queue_barrier(mdev); @@ -757,6 +757,23 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); } +static int drbd_should_do_remote(struct drbd_conf *mdev) +{ + union drbd_state s = mdev->state; + + return s.pdsk == D_UP_TO_DATE || + (s.pdsk >= D_INCONSISTENT && + s.conn >= C_WF_BITMAP_T && + s.conn < C_AHEAD); +} +static int drbd_should_send_oos(struct drbd_conf *mdev) +{ + union drbd_state s = mdev->state; + + return s.pdsk >= D_INCONSISTENT && + (s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S); +} + static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) { const int rw = bio_rw(bio); @@ -828,12 +845,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns drbd_al_begin_io(mdev, sector); } - remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || - (mdev->state.pdsk >= D_INCONSISTENT && - mdev->state.conn >= C_CONNECTED && - mdev->state.conn < C_AHEAD)); - send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD && - mdev->state.pdsk >= D_INCONSISTENT); + remote = remote && drbd_should_do_remote(mdev); + send_oos = rw == WRITE && drbd_should_send_oos(mdev); + D_ASSERT(!(remote && send_oos)); if (!(local || remote) && !is_susp(mdev->state)) { if (__ratelimit(&drbd_ratelimit_state)) @@ -873,12 +887,9 @@ allocate_barrier: } if (remote || send_oos) { - remote = (mdev->state.pdsk == D_UP_TO_DATE || - (mdev->state.pdsk >= D_INCONSISTENT && - mdev->state.conn >= C_CONNECTED && - mdev->state.conn < C_AHEAD)); - send_oos = (rw == WRITE && mdev->state.conn == C_AHEAD && - mdev->state.pdsk >= D_INCONSISTENT); + remote = drbd_should_do_remote(mdev); + send_oos = rw == WRITE && drbd_should_send_oos(mdev); + D_ASSERT(!(remote && send_oos)); if (!(remote || send_oos)) dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); From 1fc80cf37810d6b00cac27a219b8ecab2010adac Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 22 Nov 2010 14:18:47 +0100 Subject: [PATCH 032/122] drbd: Becoming sync target may not happen out of < C_WF_REPORT_PARAMS This patch is acutally a necessary addendum to the patch "fix for spurious full sync (becoming sync target looked like invalidate)" Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 14afbd4e53a5..8b8a38dc6492 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -800,6 +800,10 @@ static int is_valid_state_transition(struct drbd_conf *mdev, os.conn < C_CONNECTED) rv = SS_NEED_CONNECTION; + if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE) + && os.conn < C_WF_REPORT_PARAMS) + rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */ + return rv; } From c507f46f26661d4d0dc95a1b1271df5855ab4602 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 22 Nov 2010 15:49:17 +0100 Subject: [PATCH 033/122] drbd: Removed 20 seconds upper bound for side-stepping Given low-enough network bandwidth combined with a IO pattern that hammers onto a single RS-extent, side-stepping might be necessary for much longer times. Changed the code to print a single informal message after 20 seconds, but it keeps on stepping aside forever. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 5570d9bdc863..4a1b199f4ae7 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -1153,7 +1153,7 @@ retry: for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { sig = wait_event_interruptible(mdev->al_wait, !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) || - (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)); + test_bit(BME_PRIORITY, &bm_ext->flags)); if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { spin_lock_irq(&mdev->al_lock); @@ -1167,8 +1167,9 @@ retry: return -EINTR; if (schedule_timeout_interruptible(HZ/10)) return -EINTR; - if (--sa == 0) - dev_warn(DEV,"drbd_rs_begin_io() no longer stepping aside.\n"); + if (sa && --sa == 0) + dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec." + "Resync stalled?\n"); goto retry; } } From 3da127fa887e5187ede702b835770634d705f8b2 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 24 Nov 2010 10:33:02 +0100 Subject: [PATCH 034/122] drbd: increase module count on /proc/drbd access If someone holds /proc/drbd open, previously rmmod would "succeed" in starting the unload, but then block on remove_proc_entry, leading to a situation where the lsmod does not show drbd anymore, but /proc/drbd being still there (but no longer accessible). I'd rather have rmmod fail up front in this case. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_proc.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 49d817cfe865..efba62cd2e58 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -34,6 +34,7 @@ #include "drbd_int.h" static int drbd_proc_open(struct inode *inode, struct file *file); +static int drbd_proc_release(struct inode *inode, struct file *file); struct proc_dir_entry *drbd_proc; @@ -42,7 +43,7 @@ const struct file_operations drbd_proc_fops = { .open = drbd_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release, + .release = drbd_proc_release, }; void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) @@ -304,7 +305,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v) static int drbd_proc_open(struct inode *inode, struct file *file) { - return single_open(file, drbd_seq_show, PDE(inode)->data); + if (try_module_get(THIS_MODULE)) + return single_open(file, drbd_seq_show, PDE(inode)->data); + return -ENODEV; +} + +static int drbd_proc_release(struct inode *inode, struct file *file) +{ + module_put(THIS_MODULE); + return single_release(inode, file); } /* PROC FS stuff end */ From 17a93f3007c13003c739a19d0814cb5c2d21daba Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 24 Nov 2010 10:37:35 +0100 Subject: [PATCH 035/122] drbd: remove /proc/drbd before unregistering from netlink There still exists a (theoretical) race on module unload, where /proc/drbd may still exist, but the netlink callback has been unregistered already, allowing drbdsetup to shout without listeners, and get no reply. Reorder remove_proc_entry and unregister of netlink callback. drbdsetup first checks for existence of the proc entry, and if that is missing, won't even try to contact the module. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8b8a38dc6492..150ed16d26ef 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3174,11 +3174,20 @@ static void drbd_cleanup(void) unregister_reboot_notifier(&drbd_notifier); + /* first remove proc, + * drbdsetup uses it's presence to detect + * whether DRBD is loaded. + * If we would get stuck in proc removal, + * but have netlink already deregistered, + * some drbdsetup commands may wait forever + * for an answer. + */ + if (drbd_proc) + remove_proc_entry("drbd", NULL); + drbd_nl_cleanup(); if (minor_table) { - if (drbd_proc) - remove_proc_entry("drbd", NULL); i = minor_count; while (i--) drbd_delete_device(i); From 3e3a7766c2e6995ac98e7855017abc3544d54e08 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 24 Nov 2010 10:41:45 +0100 Subject: [PATCH 036/122] drbd: use kzalloc and memset(,0,) to start with clean buffers in drbd_nl Make sure we start with clean buffers to not accidentally send garbage back to userspace. Note: has not been observed; but just in case. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index f969d8717e23..80a389d24cdd 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2211,7 +2211,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms reply_size += cm->reply_body_size; /* allocation not in the IO path, cqueue thread context */ - cn_reply = kmalloc(reply_size, GFP_KERNEL); + cn_reply = kzalloc(reply_size, GFP_KERNEL); if (!cn_reply) { retcode = ERR_NOMEM; goto fail; @@ -2382,7 +2382,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, /* receiver thread context, which is not in the writeout path (of this node), * but may be in the writeout path of the _other_ node. * GFP_NOIO to avoid potential "distributed deadlock". */ - cn_reply = kmalloc( + cn_reply = kzalloc( sizeof(struct cn_msg)+ sizeof(struct drbd_nl_cfg_reply)+ sizeof(struct dump_ee_tag_len_struct)+ @@ -2517,6 +2517,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code) (struct drbd_nl_cfg_reply *)cn_reply->data; int rr; + memset(buffer, 0, sizeof(buffer)); cn_reply->id = req->id; cn_reply->seq = req->seq; From 42ff269d1022a86be4f526cf674998c47b7ab856 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 24 Nov 2010 10:11:14 +0100 Subject: [PATCH 037/122] drbd: add packet_type 27 (return_code_only) to netlink api In case we ever should add an other packet type, we must not reuse 27, as that currently used for "empty" return code only replies. Document it as such. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 6 ++++-- include/linux/drbd_nl.h | 6 +++++- include/linux/drbd_tag_magic.h | 1 + 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 80a389d24cdd..6a6dde6c51c6 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2195,7 +2195,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms goto fail; } - if (nlp->packet_type >= P_nl_after_last_packet) { + if (nlp->packet_type >= P_nl_after_last_packet || + nlp->packet_type == P_return_code_only) { retcode = ERR_PACKET_NR; goto fail; } @@ -2219,7 +2220,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms reply = (struct drbd_nl_cfg_reply *) cn_reply->data; reply->packet_type = - cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; + cm->reply_body_size ? nlp->packet_type : P_return_code_only; reply->minor = nlp->drbd_minor; reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ /* reply->tag_list; might be modified by cm->function. */ @@ -2525,6 +2526,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code) cn_reply->len = sizeof(struct drbd_nl_cfg_reply); cn_reply->flags = 0; + reply->packet_type = P_return_code_only; reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; reply->ret_code = ret_code; diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index 8cde3945d1f7..6fc614b06c4d 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h @@ -146,9 +146,13 @@ NL_PACKET(new_c_uuid, 26, NL_BIT( 63, T_MANDATORY, clear_bm) ) +#ifdef NL_RESPONSE +NL_RESPONSE(return_code_only, 27) +#endif + #undef NL_PACKET #undef NL_INTEGER #undef NL_INT64 #undef NL_BIT #undef NL_STRING - +#undef NL_RESPONSE diff --git a/include/linux/drbd_tag_magic.h b/include/linux/drbd_tag_magic.h index fcdff8410e99..f14a165e82dc 100644 --- a/include/linux/drbd_tag_magic.h +++ b/include/linux/drbd_tag_magic.h @@ -7,6 +7,7 @@ /* declare packet_type enums */ enum packet_types { #define NL_PACKET(name, number, fields) P_ ## name = number, +#define NL_RESPONSE(name, number) P_ ## name = number, #define NL_INTEGER(pn, pr, member) #define NL_INT64(pn, pr, member) #define NL_BIT(pn, pr, member) From 2561b9c1f1d63077c41903fc6ad58dc9ec47248b Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Fri, 3 Dec 2010 15:22:48 +0100 Subject: [PATCH 038/122] drbd: --force option for disconnect As the network connection can be lost at any time, a --force option for disconnect is just a matter of completeness. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 15 +++++++++++++++ include/linux/drbd_nl.h | 4 +++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 6a6dde6c51c6..cd0459f0403f 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1531,6 +1531,21 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl struct drbd_nl_cfg_reply *reply) { int retcode; + struct disconnect dc; + + memset(&dc, 0, sizeof(struct disconnect)); + if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) { + retcode = ERR_MANDATORY_TAG; + goto fail; + } + + if (dc.force) { + spin_lock_irq(&mdev->req_lock); + if (mdev->state.conn >= C_WF_CONNECTION) + _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL); + spin_unlock_irq(&mdev->req_lock); + goto done; + } retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index 6fc614b06c4d..ab6159e4fcf0 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h @@ -69,7 +69,9 @@ NL_PACKET(net_conf, 5, NL_BIT( 70, T_MANDATORY, dry_run) ) -NL_PACKET(disconnect, 6, ) +NL_PACKET(disconnect, 6, + NL_BIT( 84, T_MAY_IGNORE, force) +) NL_PACKET(resize, 7, NL_INT64( 29, T_MAY_IGNORE, resize_size) From 09b9e7979378fe070784de20e50bb1d42aa643ab Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Fri, 3 Dec 2010 16:04:24 +0100 Subject: [PATCH 039/122] drbd: Implemented the before-resync-source handler Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index ab5cff502f5b..e201f6f82c0e 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1499,6 +1499,20 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); return; } + } else /* C_SYNC_SOURCE */ { + r = drbd_khelper(mdev, "before-resync-source"); + r = (r >> 8) & 0xff; + if (r > 0) { + if (r == 3) { + dev_info(DEV, "before-resync-source handler returned %d, " + "ignoring. Old userland tools?", r); + } else { + dev_info(DEV, "before-resync-source handler returned %d, " + "dropping connection.\n", r); + drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); + return; + } + } } drbd_state_lock(mdev); From 8a3c104438be4986a77f332009b695fcac48f620 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Sun, 5 Dec 2010 14:11:14 +0100 Subject: [PATCH 040/122] drbd: fix regression, we need to close drbd epochs during normal operation commit e2041475e6ddb081734d161f6421977323f5a9b9 drbd: Starting with protocol 96 we can allow app-IO while receiving the bitmap Contained a bad chunk that tried to optimize away drbd barriers during bitmap exchange, but accidentally dropped them for normal mode as well. Impact: depending on activity log size and access pattern, activity log extents may not be recycled in time, causeing IO to block indefinetely. Fix: skip drbd barriers only if there is no connection to send them on, or the request being completed has not been on the network at all. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_req.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 4cb8247d83c9..de5fe70f2b42 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -140,9 +140,14 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, struct hlist_node *n; struct hlist_head *slot; - /* before we can signal completion to the upper layers, - * we may need to close the current epoch */ - if (mdev->state.conn >= C_WF_BITMAP_T && mdev->state.conn < C_AHEAD && + /* Before we can signal completion to the upper layers, + * we may need to close the current epoch. + * We can skip this, if this request has not even been sent, because we + * did not have a fully established connection yet/anymore, during + * bitmap exchange, or while we are C_AHEAD due to congestion policy. + */ + if (mdev->state.conn >= C_CONNECTED && + (s & RQ_NET_SENT) != 0 && req->epoch == mdev->newest_tle->br_number) queue_barrier(mdev); From 7e458c32da946bd4f6aea476b61b79575578f834 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Wed, 8 Dec 2010 19:02:09 +0100 Subject: [PATCH 041/122] drbd: Removed an unnecessary #undef Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index f4aba9f894ba..f8cd3f230d84 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2963,7 +2963,6 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned } put_ldev(mdev); } -#undef min_not_zero ddsf = be16_to_cpu(p->dds_flags); if (get_ldev(mdev)) { From 220df4d006ed561b8fd4fbd8c01c6c28d6143653 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 9 Dec 2010 15:21:02 +0100 Subject: [PATCH 042/122] drbd: fix incomplete error message Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index f8cd3f230d84..55fee1a6c647 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2499,7 +2499,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol return C_MASK; } if (hg == -1001) { - dev_alert(DEV, "To resolve this both sides have to support at least protocol\n"); + dev_alert(DEV, "To resolve this both sides have to support at least protocol 91\n"); return C_MASK; } From cab2f74b45127a78b9a2980f54ca16cc9f45ddac Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 9 Dec 2010 16:08:46 +0100 Subject: [PATCH 043/122] drbd: Make sure that drbd_send() has sent the right number of bytes Reviewed-by: Lars Ellenberg Signed-off-by: Philipp Reisner --- drivers/block/drbd/drbd_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 150ed16d26ef..50b6841d135b 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2580,7 +2580,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) if (ok && dgs) { dgb = mdev->int_dig_out; drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); - ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); + ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); } if (ok) { /* For protocol A, we have to memcpy the payload into @@ -2662,7 +2662,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, if (ok && dgs) { dgb = mdev->int_dig_out; drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); - ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); + ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); } if (ok) ok = _drbd_send_zc_ee(mdev, e); From 96756784a6250c7d9878671ef3386b93b0c576d4 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 9 Dec 2010 16:23:43 +0100 Subject: [PATCH 044/122] drbd: Remove left-over prototype Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 38bbaba8bd89..3268d201025b 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1236,8 +1236,6 @@ extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req); extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, struct drbd_epoch_entry *e); extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); -extern int _drbd_send_barrier(struct drbd_conf *mdev, - struct drbd_tl_epoch *barrier); extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, sector_t sector, int size, u64 block_id); extern int drbd_send_drequest_csum(struct drbd_conf *mdev, From 2f58dcfc85b6800efd938f755e6c5f9979f4aa5c Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Mon, 13 Dec 2010 17:48:19 +0100 Subject: [PATCH 045/122] drbd: Rename drbd_make_request_26 to drbd_make_request Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_main.c | 2 +- drivers/block/drbd/drbd_req.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 3268d201025b..7514cf7a958c 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1479,7 +1479,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev); extern int proc_details; /* drbd_req */ -extern int drbd_make_request_26(struct request_queue *q, struct bio *bio); +extern int drbd_make_request(struct request_queue *q, struct bio *bio); extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); extern int is_valid_ar_handle(struct drbd_request *, sector_t); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 50b6841d135b..7d60527c3b5a 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3284,7 +3284,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_fn = drbd_congested; q->backing_dev_info.congested_data = mdev; - blk_queue_make_request(q, drbd_make_request_26); + blk_queue_make_request(q, drbd_make_request); blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_merge_bvec(q, drbd_merge_bvec); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index de5fe70f2b42..079213d93490 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -884,7 +884,7 @@ allocate_barrier: if (is_susp(mdev->state)) { /* If we got suspended, use the retry mechanism of generic_make_request() to restart processing of this - bio. In the next call to drbd_make_request_26 + bio. In the next call to drbd_make_request we sleep in inc_ap_bio() */ ret = 1; spin_unlock_irq(&mdev->req_lock); @@ -1089,7 +1089,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) return 0; } -int drbd_make_request_26(struct request_queue *q, struct bio *bio) +int drbd_make_request(struct request_queue *q, struct bio *bio) { unsigned int s_enr, e_enr; struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; @@ -1182,7 +1182,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) * As long as the BIO is empty we have to allow at least one bvec, * regardless of size and offset. so the resulting bio may still * cross extent boundaries. those are dealt with (bio_split) in - * drbd_make_request_26. + * drbd_make_request. */ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) { From 662d91a23a8e8451ca47c08d5cff710fd080fd3a Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Tue, 7 Dec 2010 03:01:41 +0100 Subject: [PATCH 046/122] drbd: Get rid of unnecessary macros (1) This macro doesn't save much code, but makes things a lot harder to read. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 71 +++++++++++++++++++--------------- 1 file changed, 40 insertions(+), 31 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7d60527c3b5a..f43e2aa354a6 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -674,21 +674,6 @@ void print_st_err(struct drbd_conf *mdev, } -#define drbd_peer_str drbd_role_str -#define drbd_pdsk_str drbd_disk_str - -#define drbd_susp_str(A) ((A) ? "1" : "0") -#define drbd_aftr_isp_str(A) ((A) ? "1" : "0") -#define drbd_peer_isp_str(A) ((A) ? "1" : "0") -#define drbd_user_isp_str(A) ((A) ? "1" : "0") - -#define PSC(A) \ - ({ if (ns.A != os.A) { \ - pbp += sprintf(pbp, #A "( %s -> %s ) ", \ - drbd_##A##_str(os.A), \ - drbd_##A##_str(ns.A)); \ - } }) - /** * is_valid_state() - Returns an SS_ error code if ns is not valid * @mdev: DRBD device. @@ -1084,22 +1069,46 @@ int __drbd_set_state(struct drbd_conf *mdev, dev_warn(DEV, "%s aborted.\n", warn_sync_abort); { - char *pbp, pb[300]; - pbp = pb; - *pbp = 0; - PSC(role); - PSC(peer); - PSC(conn); - PSC(disk); - PSC(pdsk); - if (is_susp(ns) != is_susp(os)) - pbp += sprintf(pbp, "susp( %s -> %s ) ", - drbd_susp_str(is_susp(os)), - drbd_susp_str(is_susp(ns))); - PSC(aftr_isp); - PSC(peer_isp); - PSC(user_isp); - dev_info(DEV, "%s\n", pb); + char *pbp, pb[300]; + pbp = pb; + *pbp = 0; + if (ns.role != os.role) + pbp += sprintf(pbp, "role( %s -> %s ) ", + drbd_role_str(os.role), + drbd_role_str(ns.role)); + if (ns.peer != os.peer) + pbp += sprintf(pbp, "peer( %s -> %s ) ", + drbd_role_str(os.peer), + drbd_role_str(ns.peer)); + if (ns.conn != os.conn) + pbp += sprintf(pbp, "conn( %s -> %s ) ", + drbd_conn_str(os.conn), + drbd_conn_str(ns.conn)); + if (ns.disk != os.disk) + pbp += sprintf(pbp, "disk( %s -> %s ) ", + drbd_disk_str(os.disk), + drbd_disk_str(ns.disk)); + if (ns.pdsk != os.pdsk) + pbp += sprintf(pbp, "pdsk( %s -> %s ) ", + drbd_disk_str(os.pdsk), + drbd_disk_str(ns.pdsk)); + if (is_susp(ns) != is_susp(os)) + pbp += sprintf(pbp, "susp( %d -> %d ) ", + is_susp(os), + is_susp(ns)); + if (ns.aftr_isp != os.aftr_isp) + pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ", + os.aftr_isp, + ns.aftr_isp); + if (ns.peer_isp != os.peer_isp) + pbp += sprintf(pbp, "peer_isp( %d -> %d ) ", + os.peer_isp, + ns.peer_isp); + if (ns.user_isp != os.user_isp) + pbp += sprintf(pbp, "user_isp( %d -> %d ) ", + os.user_isp, + ns.user_isp); + dev_info(DEV, "%s\n", pb); } /* solve the race between becoming unconfigured, From 0cf9d27e38447efe5e5edce155a66a782a5aac4a Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Tue, 7 Dec 2010 10:43:29 +0100 Subject: [PATCH 047/122] drbd: Get rid of unnecessary macros (2) The FAULT_ACTIVE macro just wraps the drbd_insert_fault macro for no apparent reason. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 4 ++-- drivers/block/drbd/drbd_bitmap.c | 4 ++-- drivers/block/drbd/drbd_int.h | 9 ++++----- drivers/block/drbd/drbd_receiver.c | 4 ++-- drivers/block/drbd/drbd_req.c | 6 +++--- drivers/block/drbd/drbd_wrappers.h | 2 +- 6 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 4a1b199f4ae7..2e8a95ce79b1 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -92,7 +92,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, bio->bi_end_io = drbd_md_io_complete; bio->bi_rw = rw; - if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) + if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) bio_endio(bio, -EIO); else submit_bio(rw, bio); @@ -685,7 +685,7 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) for (i = 0; i < nr_elements; i++) { if (bios[i] == NULL) break; - if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { + if (drbd_insert_fault(mdev, DRBD_FAULT_MD_WR)) { bios[i]->bi_rw = WRITE; bio_endio(bios[i], -EIO); } else { diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 0645ca829a94..5dafbabe9617 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -502,7 +502,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) D_ASSERT(b->bm_pages != NULL); npages = b->bm_pages; } else { - if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC)) + if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC)) npages = NULL; else npages = bm_realloc_pages(b, want); @@ -768,7 +768,7 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int bio->bi_private = b; bio->bi_end_io = bm_async_io_complete; - if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { + if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio->bi_rw |= rw; bio_endio(bio, -EIO); } else { diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7514cf7a958c..c81710a42111 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -137,20 +137,19 @@ enum { DRBD_FAULT_MAX, }; -#ifdef CONFIG_DRBD_FAULT_INJECTION extern unsigned int _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); + static inline int drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { +#ifdef CONFIG_DRBD_FAULT_INJECTION return fault_rate && (enable_faults & (1<> PAGE_SHIFT; - if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) + if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) return NULL; e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); @@ -1264,7 +1264,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ unsigned len = min_t(int, ds, PAGE_SIZE); data = kmap(page); rr = drbd_recv(mdev, data, len); - if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) { + if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { dev_err(DEV, "Fault injection: Corrupting data on receive\n"); data[0] = data[0] ^ (unsigned long)-1; } diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 079213d93490..53e7cc506456 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1020,9 +1020,9 @@ allocate_barrier: * stable storage, and this is a WRITE, we may not even submit * this bio. */ if (get_ldev(mdev)) { - if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR - : rw == READ ? DRBD_FAULT_DT_RD - : DRBD_FAULT_DT_RA)) + if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR + : rw == READ ? DRBD_FAULT_DT_RD + : DRBD_FAULT_DT_RA)) bio_endio(req->private_bio, -EIO); else generic_make_request(req->private_bio); diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h index 53586fa5ae1b..151f1a37478f 100644 --- a/drivers/block/drbd/drbd_wrappers.h +++ b/drivers/block/drbd/drbd_wrappers.h @@ -39,7 +39,7 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev, return; } - if (FAULT_ACTIVE(mdev, fault_type)) + if (drbd_insert_fault(mdev, fault_type)) bio_endio(bio, -EIO); else generic_make_request(bio); From 116676ca621a862a8124969772f4dd61c8b40eee Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Wed, 8 Dec 2010 13:33:11 +0100 Subject: [PATCH 048/122] drbd: Rename enum drbd_ret_codes to enum drbd_ret_code Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 2 +- drivers/block/drbd/drbd_nl.c | 4 ++-- include/linux/drbd.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f43e2aa354a6..8d69e3a1b3c2 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3570,7 +3570,7 @@ void drbd_md_sync(struct drbd_conf *mdev) * @mdev: DRBD device. * @bdev: Device from which the meta data should be read in. * - * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case + * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. */ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index cd0459f0403f..fe336592e538 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -849,7 +849,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev) static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) { - enum drbd_ret_codes retcode; + enum drbd_ret_code retcode; enum determine_dev_size dd; sector_t max_possible_sectors; sector_t min_md_device_sectors; @@ -1278,7 +1278,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) { int i, ns; - enum drbd_ret_codes retcode; + enum drbd_ret_code retcode; struct net_conf *new_conf = NULL; struct crypto_hash *tfm = NULL; struct crypto_hash *integrity_w_tfm = NULL; diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 41da654cc0b1..d92f989036c9 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -103,7 +103,7 @@ enum drbd_on_congestion { }; /* KEEP the order, do not delete or insert. Only append. */ -enum drbd_ret_codes { +enum drbd_ret_code { ERR_CODE_BASE = 100, NO_ERROR = 101, ERR_LOCAL_ADDR = 102, From c8b325632f0e5ffdaeca3d1f3be77c9399316a40 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Wed, 8 Dec 2010 01:06:16 +0100 Subject: [PATCH 049/122] drbd: Rename enum drbd_state_ret_codes to enum drbd_state_rv Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 5 +++-- drivers/block/drbd/drbd_strings.c | 2 +- include/linux/drbd.h | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8d69e3a1b3c2..cddf311b7429 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -510,8 +510,9 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state); -static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, - union drbd_state mask, union drbd_state val) +static enum drbd_state_rv +_req_st_cond(struct drbd_conf *mdev, union drbd_state mask, + union drbd_state val) { union drbd_state os, ns; unsigned long flags; diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c index 5b970adc3b6f..c44a2a602772 100644 --- a/drivers/block/drbd/drbd_strings.c +++ b/drivers/block/drbd/drbd_strings.c @@ -107,7 +107,7 @@ const char *drbd_disk_str(enum drbd_disk_state s) return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s]; } -const char *drbd_set_st_err_str(enum drbd_state_ret_codes err) +const char *drbd_set_st_err_str(enum drbd_state_rv err) { return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" : err > SS_TWO_PRIMARIES ? "TOO_LARGE" diff --git a/include/linux/drbd.h b/include/linux/drbd.h index d92f989036c9..d10431fab004 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -270,7 +270,7 @@ union drbd_state { unsigned int i; }; -enum drbd_state_ret_codes { +enum drbd_state_rv { SS_CW_NO_NEED = 4, SS_CW_SUCCESS = 3, SS_NOTHING_TO_DO = 2, @@ -301,7 +301,7 @@ enum drbd_state_ret_codes { extern const char *drbd_conn_str(enum drbd_conns); extern const char *drbd_role_str(enum drbd_role); extern const char *drbd_disk_str(enum drbd_disk_state); -extern const char *drbd_set_st_err_str(enum drbd_state_ret_codes); +extern const char *drbd_set_st_err_str(enum drbd_state_rv); #define SHARED_SECRET_MAX 64 From bf885f8a6772fb48409dd505a09d974a5e621f22 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Wed, 8 Dec 2010 00:39:32 +0100 Subject: [PATCH 050/122] drbd: Be more explicit about functions that return an enum drbd_state_rv Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 32 +++++++++------- drivers/block/drbd/drbd_main.c | 59 ++++++++++++++++-------------- drivers/block/drbd/drbd_nl.c | 29 ++++++++------- drivers/block/drbd/drbd_receiver.c | 2 +- 4 files changed, 67 insertions(+), 55 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index c81710a42111..749607a494e6 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1174,14 +1174,19 @@ enum dds_flags { }; extern void drbd_init_set_defaults(struct drbd_conf *mdev); -extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, - union drbd_state mask, union drbd_state val); +extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev, + enum chg_state_flags f, + union drbd_state mask, + union drbd_state val); extern void drbd_force_state(struct drbd_conf *, union drbd_state, union drbd_state); -extern int _drbd_request_state(struct drbd_conf *, union drbd_state, - union drbd_state, enum chg_state_flags); -extern int __drbd_set_state(struct drbd_conf *, union drbd_state, - enum chg_state_flags, struct completion *done); +extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *, + union drbd_state, + union drbd_state, + enum chg_state_flags); +extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state, + enum chg_state_flags, + struct completion *done); extern void print_st_err(struct drbd_conf *, union drbd_state, union drbd_state, int); extern int drbd_thread_start(struct drbd_thread *thi); @@ -1245,7 +1250,7 @@ extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size) extern int drbd_send_bitmap(struct drbd_conf *mdev); extern int _drbd_send_bitmap(struct drbd_conf *mdev); -extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode); +extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); extern void drbd_free_bc(struct drbd_backing_dev *ldev); extern void drbd_mdev_cleanup(struct drbd_conf *mdev); @@ -1493,8 +1498,9 @@ enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); extern void resync_after_online_grow(struct drbd_conf *); extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); -extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, - int force); +extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, + enum drbd_role new_role, + int force); extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); @@ -1761,11 +1767,11 @@ static inline void drbd_state_unlock(struct drbd_conf *mdev) wake_up(&mdev->misc_wait); } -static inline int _drbd_set_state(struct drbd_conf *mdev, - union drbd_state ns, enum chg_state_flags flags, - struct completion *done) +static inline enum drbd_state_rv +_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, + enum chg_state_flags flags, struct completion *done) { - int rv; + enum drbd_state_rv rv; read_lock(&global_state_lock); rv = __drbd_set_state(mdev, ns, flags, done); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index cddf311b7429..a101dceb2d49 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -473,12 +473,13 @@ static int cl_wide_st_chg(struct drbd_conf *mdev, (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S); } -int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, - union drbd_state mask, union drbd_state val) +enum drbd_state_rv +drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, + union drbd_state mask, union drbd_state val) { unsigned long flags; union drbd_state os, ns; - int rv; + enum drbd_state_rv rv; spin_lock_irqsave(&mdev->req_lock, flags); os = mdev->state; @@ -502,9 +503,10 @@ void drbd_force_state(struct drbd_conf *mdev, drbd_change_state(mdev, CS_HARD, mask, val); } -static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns); -static int is_valid_state_transition(struct drbd_conf *, - union drbd_state, union drbd_state); +static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state); +static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *, + union drbd_state, + union drbd_state); static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns, const char **warn_sync_abort); int drbd_send_state_req(struct drbd_conf *, @@ -516,7 +518,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask, { union drbd_state os, ns; unsigned long flags; - int rv; + enum drbd_state_rv rv; if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) return SS_CW_SUCCESS; @@ -537,7 +539,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask, if (rv == SS_SUCCESS) { rv = is_valid_state_transition(mdev, ns, os); if (rv == SS_SUCCESS) - rv = 0; /* cont waiting, otherwise fail. */ + rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ } } spin_unlock_irqrestore(&mdev->req_lock, flags); @@ -555,14 +557,14 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask, * Should not be called directly, use drbd_request_state() or * _drbd_request_state(). */ -static int drbd_req_state(struct drbd_conf *mdev, - union drbd_state mask, union drbd_state val, - enum chg_state_flags f) +static enum drbd_state_rv +drbd_req_state(struct drbd_conf *mdev, union drbd_state mask, + union drbd_state val, enum chg_state_flags f) { struct completion done; unsigned long flags; union drbd_state os, ns; - int rv; + enum drbd_state_rv rv; init_completion(&done); @@ -637,10 +639,11 @@ abort: * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE * flag, or when logging of failed state change requests is not desired. */ -int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, - union drbd_state val, enum chg_state_flags f) +enum drbd_state_rv +_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, + union drbd_state val, enum chg_state_flags f) { - int rv; + enum drbd_state_rv rv; wait_event(mdev->state_wait, (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE); @@ -664,8 +667,8 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns) ); } -void print_st_err(struct drbd_conf *mdev, - union drbd_state os, union drbd_state ns, int err) +void print_st_err(struct drbd_conf *mdev, union drbd_state os, + union drbd_state ns, enum drbd_state_rv err) { if (err == SS_IN_TRANSIENT_STATE) return; @@ -680,12 +683,13 @@ void print_st_err(struct drbd_conf *mdev, * @mdev: DRBD device. * @ns: State to consider. */ -static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) +static enum drbd_state_rv +is_valid_state(struct drbd_conf *mdev, union drbd_state ns) { /* See drbd_state_sw_errors in drbd_strings.c */ enum drbd_fencing_p fp; - int rv = SS_SUCCESS; + enum drbd_state_rv rv = SS_SUCCESS; fp = FP_DONT_CARE; if (get_ldev(mdev)) { @@ -748,10 +752,11 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) * @ns: new state. * @os: old state. */ -static int is_valid_state_transition(struct drbd_conf *mdev, - union drbd_state ns, union drbd_state os) +static enum drbd_state_rv +is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns, + union drbd_state os) { - int rv = SS_SUCCESS; + enum drbd_state_rv rv = SS_SUCCESS; if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && os.conn > C_CONNECTED) @@ -1029,12 +1034,12 @@ static void drbd_resume_al(struct drbd_conf *mdev) * * Caller needs to hold req_lock, and global_state_lock. Do not call directly. */ -int __drbd_set_state(struct drbd_conf *mdev, - union drbd_state ns, enum chg_state_flags flags, - struct completion *done) +enum drbd_state_rv +__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, + enum chg_state_flags flags, struct completion *done) { union drbd_state os; - int rv = SS_SUCCESS; + enum drbd_state_rv rv = SS_SUCCESS; const char *warn_sync_abort = NULL; struct after_state_chg_work *ascw; @@ -2031,7 +2036,7 @@ int drbd_send_state_req(struct drbd_conf *mdev, (struct p_header80 *)&p, sizeof(p)); } -int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) +int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) { struct p_req_state_reply p; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index fe336592e538..7a2faf6d48e2 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -288,10 +288,11 @@ void drbd_try_outdate_peer_async(struct drbd_conf *mdev) dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); } -int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) +enum drbd_state_rv +drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) { const int max_tries = 4; - int r = 0; + enum drbd_state_rv rv = SS_UNKNOWN_ERROR; int try = 0; int forced = 0; union drbd_state mask, val; @@ -306,17 +307,17 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) val.i = 0; val.role = new_role; while (try++ < max_tries) { - r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); + rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); /* in case we first succeeded to outdate, * but now suddenly could establish a connection */ - if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { + if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { val.pdsk = 0; mask.pdsk = 0; continue; } - if (r == SS_NO_UP_TO_DATE_DISK && force && + if (rv == SS_NO_UP_TO_DATE_DISK && force && (mdev->state.disk < D_UP_TO_DATE && mdev->state.disk >= D_INCONSISTENT)) { mask.disk = D_MASK; @@ -325,7 +326,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) continue; } - if (r == SS_NO_UP_TO_DATE_DISK && + if (rv == SS_NO_UP_TO_DATE_DISK && mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { D_ASSERT(mdev->state.pdsk == D_UNKNOWN); nps = drbd_try_outdate_peer(mdev); @@ -341,9 +342,9 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) continue; } - if (r == SS_NOTHING_TO_DO) + if (rv == SS_NOTHING_TO_DO) goto fail; - if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { + if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { nps = drbd_try_outdate_peer(mdev); if (force && nps > D_OUTDATED) { @@ -356,7 +357,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) continue; } - if (r == SS_TWO_PRIMARIES) { + if (rv == SS_TWO_PRIMARIES) { /* Maybe the peer is detected as dead very soon... retry at most once more in this case. */ __set_current_state(TASK_INTERRUPTIBLE); @@ -365,16 +366,16 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) try = max_tries - 1; continue; } - if (r < SS_SUCCESS) { - r = _drbd_request_state(mdev, mask, val, + if (rv < SS_SUCCESS) { + rv = _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_WAIT_COMPLETE); - if (r < SS_SUCCESS) + if (rv < SS_SUCCESS) goto fail; } break; } - if (r < SS_SUCCESS) + if (rv < SS_SUCCESS) goto fail; if (forced) @@ -423,7 +424,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); fail: mutex_unlock(&mdev->state_mutex); - return r; + return rv; } static struct drbd_conf *ensure_mdev(int minor, int create) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index d34d093278c7..c116cbdc9dca 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3112,7 +3112,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi { struct p_req_state *p = &mdev->data.rbuf.req_state; union drbd_state mask, val; - int rv; + enum drbd_state_rv rv; mask.i = be32_to_cpu(p->mask); val.i = be32_to_cpu(p->val); From bb4379464efeb4624757e2cc0bf9207a309c1075 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 9 Dec 2010 14:02:35 +0100 Subject: [PATCH 051/122] drbd: Another small enum drbd_state_rv cleanup Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c116cbdc9dca..8088744d0ec1 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2209,12 +2209,14 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) case ASB_CALL_HELPER: hg = drbd_asb_recover_0p(mdev); if (hg == -1 && mdev->state.role == R_PRIMARY) { - self = drbd_set_role(mdev, R_SECONDARY, 0); + enum drbd_state_rv rv2; + + drbd_set_role(mdev, R_SECONDARY, 0); /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, * we might be here in C_WF_REPORT_PARAMS which is transient. * we do not need to wait for the after state change work either. */ - self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); - if (self != SS_SUCCESS) { + rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); + if (rv2 != SS_SUCCESS) { drbd_khelper(mdev, "pri-lost-after-sb"); } else { dev_warn(DEV, "Successfully gave up primary role.\n"); @@ -2252,11 +2254,13 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) case ASB_CALL_HELPER: hg = drbd_asb_recover_0p(mdev); if (hg == -1) { + enum drbd_state_rv rv2; + /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, * we might be here in C_WF_REPORT_PARAMS which is transient. * we do not need to wait for the after state change work either. */ - self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); - if (self != SS_SUCCESS) { + rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); + if (rv2 != SS_SUCCESS) { drbd_khelper(mdev, "pri-lost-after-sb"); } else { dev_warn(DEV, "Successfully gave up primary role.\n"); From 6184ea2145609b4ad63b141bf1f8124135ff4949 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 9 Dec 2010 14:23:27 +0100 Subject: [PATCH 052/122] drbd: This code is dead now Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 8088744d0ec1..00dcb1172ca8 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2179,10 +2179,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) { - int self, peer, hg, rv = -100; - - self = mdev->ldev->md.uuid[UI_BITMAP] & 1; - peer = mdev->p_uuid[UI_BITMAP] & 1; + int hg, rv = -100; switch (mdev->net_conf->after_sb_1p) { case ASB_DISCARD_YOUNGER_PRI: @@ -2231,10 +2228,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) { - int self, peer, hg, rv = -100; - - self = mdev->ldev->md.uuid[UI_BITMAP] & 1; - peer = mdev->p_uuid[UI_BITMAP] & 1; + int hg, rv = -100; switch (mdev->net_conf->after_sb_2p) { case ASB_DISCARD_YOUNGER_PRI: From 81e84650c200de0695372461964dd960365696db Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 9 Dec 2010 15:03:57 +0100 Subject: [PATCH 053/122] drbd: Use the standard bool, true, and false keywords Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 8 +- drivers/block/drbd/drbd_bitmap.c | 4 +- drivers/block/drbd/drbd_int.h | 13 +-- drivers/block/drbd/drbd_main.c | 30 +++---- drivers/block/drbd/drbd_nl.c | 4 +- drivers/block/drbd/drbd_receiver.c | 128 ++++++++++++++--------------- drivers/block/drbd/drbd_req.c | 4 +- drivers/block/drbd/drbd_worker.c | 4 +- 8 files changed, 94 insertions(+), 101 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 2e8a95ce79b1..e3f0f4d31d75 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -338,7 +338,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) + mdev->ldev->md.al_offset + mdev->al_tr_pos; if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) - drbd_chk_io_error(mdev, 1, TRUE); + drbd_chk_io_error(mdev, 1, true); if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) @@ -528,7 +528,7 @@ static void atodb_endio(struct bio *bio, int error) if (!error && !uptodate) error = -EIO; - drbd_chk_io_error(mdev, error, TRUE); + drbd_chk_io_error(mdev, error, true); if (error && wc->error == 0) wc->error = error; @@ -991,7 +991,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, if (count && get_ldev(mdev)) { drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev)); spin_lock_irqsave(&mdev->al_lock, flags); - drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); + drbd_try_clear_on_disk_bm(mdev, sector, count, true); spin_unlock_irqrestore(&mdev->al_lock, flags); /* just wake_up unconditional now, various lc_chaged(), @@ -1441,7 +1441,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) mdev->rs_failed += count; if (get_ldev(mdev)) { - drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); + drbd_try_clear_on_disk_bm(mdev, sector, count, false); put_ldev(mdev); } diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 5dafbabe9617..9390e9526786 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -844,7 +844,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); - drbd_chk_io_error(mdev, 1, TRUE); + drbd_chk_io_error(mdev, 1, true); err = -EIO; } @@ -916,7 +916,7 @@ int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(lo dev_err(DEV, "IO ERROR writing bitmap sector %lu " "(meta-disk sector %llus)\n", enr, (unsigned long long)on_disk_sector); - drbd_chk_io_error(mdev, 1, TRUE); + drbd_chk_io_error(mdev, 1, true); for (i = 0; i < AL_EXT_PER_BM_SECT; i++) drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i); } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 749607a494e6..0855934e19d0 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -72,13 +72,6 @@ extern int fault_devs; extern char usermode_helper[]; -#ifndef TRUE -#define TRUE 1 -#endif -#ifndef FALSE -#define FALSE 0 -#endif - /* I don't remember why XCPU ... * This is used to wake the asender, * and to interrupt sending the sending task @@ -2002,17 +1995,17 @@ static inline int drbd_send_ping_ack(struct drbd_conf *mdev) static inline void drbd_thread_stop(struct drbd_thread *thi) { - _drbd_thread_stop(thi, FALSE, TRUE); + _drbd_thread_stop(thi, false, true); } static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) { - _drbd_thread_stop(thi, FALSE, FALSE); + _drbd_thread_stop(thi, false, false); } static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) { - _drbd_thread_stop(thi, TRUE, FALSE); + _drbd_thread_stop(thi, true, false); } /* counts how many answer packets packets we expect from our peer, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a101dceb2d49..7eb447d20cce 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -456,7 +456,7 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) } /** - * cl_wide_st_chg() - TRUE if the state change is a cluster wide one + * cl_wide_st_chg() - true if the state change is a cluster wide one * @mdev: DRBD device. * @os: old (current) state. * @ns: new (wanted) state. @@ -1623,7 +1623,7 @@ int drbd_thread_start(struct drbd_thread *thi) if (!try_module_get(THIS_MODULE)) { dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); spin_unlock_irqrestore(&thi->t_lock, flags); - return FALSE; + return false; } init_completion(&thi->stop); @@ -1640,7 +1640,7 @@ int drbd_thread_start(struct drbd_thread *thi) dev_err(DEV, "Couldn't start thread\n"); module_put(THIS_MODULE); - return FALSE; + return false; } spin_lock_irqsave(&thi->t_lock, flags); thi->task = nt; @@ -1660,7 +1660,7 @@ int drbd_thread_start(struct drbd_thread *thi) break; } - return TRUE; + return true; } @@ -1758,8 +1758,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, { int sent, ok; - ERR_IF(!h) return FALSE; - ERR_IF(!size) return FALSE; + ERR_IF(!h) return false; + ERR_IF(!size) return false; h->magic = BE_DRBD_MAGIC; h->command = cpu_to_be16(cmd); @@ -2196,14 +2196,14 @@ int _drbd_send_bitmap(struct drbd_conf *mdev) struct p_header80 *p; int ret; - ERR_IF(!mdev->bitmap) return FALSE; + ERR_IF(!mdev->bitmap) return false; /* maybe we should use some per thread scratch page, * and allocate that during initial device creation? */ p = (struct p_header80 *) __get_free_page(GFP_NOIO); if (!p) { dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); - return FALSE; + return false; } if (get_ldev(mdev)) { @@ -2256,7 +2256,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size) p.set_size = cpu_to_be32(set_size); if (mdev->state.conn < C_CONNECTED) - return FALSE; + return false; ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, (struct p_header80 *)&p, sizeof(p)); return ok; @@ -2284,7 +2284,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) - return FALSE; + return false; ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, (struct p_header80 *)&p, sizeof(p)); return ok; @@ -2390,8 +2390,8 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) } /* called on sndtimeo - * returns FALSE if we should retry, - * TRUE if we think connection is dead + * returns false if we should retry, + * true if we think connection is dead */ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock) { @@ -2404,7 +2404,7 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket * || mdev->state.conn < C_CONNECTED; if (drop_it) - return TRUE; + return true; drop_it = !--mdev->ko_count; if (!drop_it) { @@ -3283,7 +3283,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) goto out_no_disk; mdev->vdisk = disk; - set_disk_ro(disk, TRUE); + set_disk_ro(disk, true); disk->queue = q; disk->major = DRBD_MAJOR; @@ -3560,7 +3560,7 @@ void drbd_md_sync(struct drbd_conf *mdev) if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { /* this was a try anyways ... */ dev_err(DEV, "meta data update failed!\n"); - drbd_chk_io_error(mdev, 1, TRUE); + drbd_chk_io_error(mdev, 1, true); } /* Update mdev->ldev->md.la_size_sect, diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 7a2faf6d48e2..9e94c348c42c 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -385,7 +385,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); if (new_role == R_SECONDARY) { - set_disk_ro(mdev->vdisk, TRUE); + set_disk_ro(mdev->vdisk, true); if (get_ldev(mdev)) { mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; put_ldev(mdev); @@ -395,7 +395,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) mdev->net_conf->want_lose = 0; put_net_conf(mdev); } - set_disk_ro(mdev->vdisk, FALSE); + set_disk_ro(mdev->vdisk, false); if (get_ldev(mdev)) { if (((mdev->state.conn < C_CONNECTED || mdev->state.pdsk <= D_FAILED) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 00dcb1172ca8..732aacb46a32 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -725,16 +725,16 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) char tb[4]; if (!*sock) - return FALSE; + return false; rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); if (rr > 0 || rr == -EAGAIN) { - return TRUE; + return true; } else { sock_release(*sock); *sock = NULL; - return FALSE; + return false; } } @@ -933,7 +933,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi r = drbd_recv(mdev, h, sizeof(*h)); if (unlikely(r != sizeof(*h))) { dev_err(DEV, "short read expecting header on sock: r=%d\n", r); - return FALSE; + return false; } if (likely(h->h80.magic == BE_DRBD_MAGIC)) { @@ -947,11 +947,11 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi be32_to_cpu(h->h80.magic), be16_to_cpu(h->h80.command), be16_to_cpu(h->h80.length)); - return FALSE; + return false; } mdev->last_received = jiffies; - return TRUE; + return true; } static void drbd_flush(struct drbd_conf *mdev) @@ -1160,7 +1160,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign switch (mdev->write_ordering) { case WO_none: if (rv == FE_RECYCLED) - return TRUE; + return true; /* receiver context, in the writeout path of the other node. * avoid potential distributed deadlock */ @@ -1188,10 +1188,10 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign D_ASSERT(atomic_read(&epoch->active) == 0); D_ASSERT(epoch->flags == 0); - return TRUE; + return true; default: dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); - return FALSE; + return false; } epoch->flags = 0; @@ -1209,7 +1209,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign } spin_unlock(&mdev->epoch_lock); - return TRUE; + return true; } /* used from receive_RSDataReply (recv_resync_read) @@ -1303,7 +1303,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) void *data; if (!data_size) - return TRUE; + return true; page = drbd_pp_alloc(mdev, 1, 1); @@ -1426,7 +1426,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si atomic_add(data_size >> 9, &mdev->rs_sect_ev); if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) - return TRUE; + return true; /* drbd_submit_ee currently fails for one reason only: * not being able to allocate enough bios. @@ -1438,7 +1438,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si drbd_free_ee(mdev, e); fail: put_ldev(mdev); - return FALSE; + return false; } static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) @@ -1455,7 +1455,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi spin_unlock_irq(&mdev->req_lock); if (unlikely(!req)) { dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); - return FALSE; + return false; } /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid @@ -1655,7 +1655,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned e = read_in_block(mdev, p->block_id, sector, data_size); if (!e) { put_ldev(mdev); - return FALSE; + return false; } e->w.cb = e_end_block; @@ -1774,7 +1774,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned put_ldev(mdev); wake_asender(mdev); finish_wait(&mdev->misc_wait, &wait); - return TRUE; + return true; } if (signal_pending(current)) { @@ -1830,7 +1830,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned } if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) - return TRUE; + return true; /* drbd_submit_ee currently fails for one reason only: * not being able to allocate enough bios. @@ -1848,7 +1848,7 @@ out_interrupted: * receive a barrier... atomic_inc(&mdev->epoch_size); */ put_ldev(mdev); drbd_free_ee(mdev, e); - return FALSE; + return false; } /* We may throttle resync, if the lower device seems to be busy, @@ -1934,12 +1934,12 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, (unsigned long long)sector, size); - return FALSE; + return false; } if (sector + (size>>9) > capacity) { dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, (unsigned long long)sector, size); - return FALSE; + return false; } if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { @@ -1976,7 +1976,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); if (!e) { put_ldev(mdev); - return FALSE; + return false; } switch (cmd) { @@ -2089,7 +2089,7 @@ submit: spin_unlock_irq(&mdev->req_lock); if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) - return TRUE; + return true; /* drbd_submit_ee currently fails for one reason only: * not being able to allocate enough bios. @@ -2102,7 +2102,7 @@ submit: out_free_e: put_ldev(mdev); drbd_free_ee(mdev, e); - return FALSE; + return false; } static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) @@ -2690,7 +2690,7 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig unsigned char *my_alg = mdev->net_conf->integrity_alg; if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) - return FALSE; + return false; p_integrity_alg[SHARED_SECRET_MAX-1] = 0; if (strcmp(p_integrity_alg, my_alg)) { @@ -2701,11 +2701,11 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig my_alg[0] ? my_alg : (unsigned char *)""); } - return TRUE; + return true; disconnect: drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); - return FALSE; + return false; } /* helper function @@ -2737,7 +2737,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) { - int ok = TRUE; + int ok = true; struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; unsigned int header_size, data_size, exp_max_sz; struct crypto_hash *verify_tfm = NULL; @@ -2755,7 +2755,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi if (packet_size > exp_max_sz) { dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", packet_size, exp_max_sz); - return FALSE; + return false; } if (apv <= 88) { @@ -2775,7 +2775,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) - return FALSE; + return false; mdev->sync_conf.rate = be32_to_cpu(p->rate); @@ -2785,11 +2785,11 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi dev_err(DEV, "verify-alg too long, " "peer wants %u, accepting only %u byte\n", data_size, SHARED_SECRET_MAX); - return FALSE; + return false; } if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) - return FALSE; + return false; /* we expect NUL terminated string */ /* but just in case someone tries to be evil */ @@ -2883,7 +2883,7 @@ disconnect: /* but free the verify_tfm again, if csums_tfm did not work out */ crypto_free_hash(verify_tfm); drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); - return FALSE; + return false; } static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) @@ -2920,7 +2920,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned if (p_size == 0 && mdev->state.disk == D_DISKLESS) { dev_err(DEV, "some backing storage is needed\n"); drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); - return FALSE; + return false; } /* just store the peer's disk size for now. @@ -2957,7 +2957,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); mdev->ldev->dc.disk_size = my_usize; put_ldev(mdev); - return FALSE; + return false; } put_ldev(mdev); } @@ -2967,7 +2967,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned dd = drbd_determin_dev_size(mdev, ddsf); put_ldev(mdev); if (dd == dev_size_error) - return FALSE; + return false; drbd_md_sync(mdev); } else { /* I am diskless, need to accept the peer's size. */ @@ -3014,7 +3014,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned } } - return TRUE; + return true; } static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) @@ -3038,7 +3038,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", (unsigned long long)mdev->ed_uuid); drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); - return FALSE; + return false; } if (get_ldev(mdev)) { @@ -3073,7 +3073,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); - return TRUE; + return true; } /** @@ -3118,7 +3118,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); - return TRUE; + return true; } mask = convert_state(mask); @@ -3129,7 +3129,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi drbd_send_sr_reply(mdev, rv); drbd_md_sync(mdev); - return TRUE; + return true; } static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) @@ -3174,7 +3174,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned peer_state.conn == C_CONNECTED) { if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) drbd_resync_finished(mdev); - return TRUE; + return true; } } @@ -3227,10 +3227,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned real_peer_disk = D_DISKLESS; } else { if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) - return FALSE; + return false; D_ASSERT(os.conn == C_WF_REPORT_PARAMS); drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); - return FALSE; + return false; } } } @@ -3255,7 +3255,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned drbd_uuid_new_current(mdev); clear_bit(NEW_CUR_UUID, &mdev->flags); drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); - return FALSE; + return false; } rv = _drbd_set_state(mdev, ns, cs_flags, NULL); ns = mdev->state; @@ -3263,7 +3263,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned if (rv < SS_SUCCESS) { drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); - return FALSE; + return false; } if (os.conn > C_WF_REPORT_PARAMS) { @@ -3281,7 +3281,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ - return TRUE; + return true; } static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) @@ -3308,7 +3308,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi } else dev_err(DEV, "Ignoring SyncUUID packet!\n"); - return TRUE; + return true; } enum receive_bitmap_ret { OK, DONE, FAILED }; @@ -3462,7 +3462,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne struct bm_xfer_ctx c; void *buffer; enum receive_bitmap_ret ret; - int ok = FALSE; + int ok = false; struct p_header80 *h = &mdev->data.rbuf.header.h80; /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */ @@ -3535,7 +3535,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne drbd_conn_str(mdev->state.conn)); } - ok = TRUE; + ok = true; out: /* drbd_bm_unlock(mdev); by intention no lock */ if (ok && mdev->state.conn == C_WF_BITMAP_S) @@ -3569,7 +3569,7 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u * with the data requests being unplugged */ drbd_tcp_quickack(mdev->data.socket); - return TRUE; + return true; } static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) @@ -3578,7 +3578,7 @@ static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, un drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); - return TRUE; + return true; } typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); @@ -4147,7 +4147,7 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) } wake_up(&mdev->state_wait); - return TRUE; + return true; } static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) @@ -4163,7 +4163,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) wake_up(&mdev->misc_wait); - return TRUE; + return true; } static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) @@ -4186,7 +4186,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) dec_rs_pending(mdev); atomic_add(blksize >> 9, &mdev->rs_sect_in); - return TRUE; + return true; } /* when we receive the ACK for a write request, @@ -4230,14 +4230,14 @@ static int validate_req_change_req_state(struct drbd_conf *mdev, if (unlikely(!req)) { spin_unlock_irq(&mdev->req_lock); dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); - return FALSE; + return false; } __req_mod(req, what, &m); spin_unlock_irq(&mdev->req_lock); if (m.bio) complete_master_bio(mdev, &m); - return TRUE; + return true; } static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) @@ -4252,7 +4252,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) if (is_syncer_block_id(p->block_id)) { drbd_set_in_sync(mdev, sector, blksize); dec_rs_pending(mdev); - return TRUE; + return true; } switch (be16_to_cpu(h->command)) { case P_RS_WRITE_ACK: @@ -4273,7 +4273,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) break; default: D_ASSERT(0); - return FALSE; + return false; } return validate_req_change_req_state(mdev, p->block_id, sector, @@ -4294,7 +4294,7 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) int size = be32_to_cpu(p->blksize); dec_rs_pending(mdev); drbd_rs_failed_io(mdev, sector, size); - return TRUE; + return true; } return validate_req_change_req_state(mdev, p->block_id, sector, _ack_id_to_req, __func__ , neg_acked); @@ -4332,7 +4332,7 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) put_ldev(mdev); } - return TRUE; + return true; } static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) @@ -4349,7 +4349,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) drbd_queue_work_front(&mdev->data.work, w); } - return TRUE; + return true; } static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) @@ -4370,7 +4370,7 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) ov_oos_print(mdev); if (!get_ldev(mdev)) - return TRUE; + return true; drbd_rs_complete_io(mdev, sector); dec_rs_pending(mdev); @@ -4393,12 +4393,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) } } put_ldev(mdev); - return TRUE; + return true; } static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) { - return TRUE; + return true; } struct asender_cmd { diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 53e7cc506456..528909090df7 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -445,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state &= ~RQ_LOCAL_PENDING; - __drbd_chk_io_error(mdev, FALSE); + __drbd_chk_io_error(mdev, false); _req_may_be_done_not_susp(req, m); put_ldev(mdev); break; @@ -466,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(!(req->rq_state & RQ_NET_MASK)); - __drbd_chk_io_error(mdev, FALSE); + __drbd_chk_io_error(mdev, false); put_ldev(mdev); /* no point in retrying if there is no good remote data, diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index e201f6f82c0e..9fe3e890da0f 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -96,7 +96,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) if (list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, FALSE); + __drbd_chk_io_error(mdev, false); spin_unlock_irqrestore(&mdev->req_lock, flags); drbd_queue_work(&mdev->data.work, &e->w); @@ -139,7 +139,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo : list_empty(&mdev->active_ee); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, FALSE); + __drbd_chk_io_error(mdev, false); spin_unlock_irqrestore(&mdev->req_lock, flags); if (is_syncer_req) From f2024e7ce29f4287395ce879364cd68c7ac226f2 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Fri, 10 Dec 2010 13:44:05 +0100 Subject: [PATCH 054/122] drbd: drbd_nl_disk_conf: Avoid a compiler warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Warning: comparison between ‘enum drbd_ret_code’ and ‘enum drbd_state_rv’ Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 9e94c348c42c..ada5483f7e56 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -859,7 +859,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp struct lru_cache *resync_lru = NULL; union drbd_state ns, os; unsigned int max_bio_size; - int rv; + enum drbd_state_rv rv; int cp_discovered = 0; int logical_block_size; @@ -1005,9 +1005,10 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp /* and for any other previously queued work */ drbd_flush_workqueue(mdev); - retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); + rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); + retcode = rv; /* FIXME: Type mismatch. */ drbd_resume_io(mdev); - if (retcode < SS_SUCCESS) + if (rv < SS_SUCCESS) goto fail; if (!get_ldev_if_state(mdev, D_ATTACHING)) From 4114be815f9811da42a21dc2f7ff552833660595 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Fri, 10 Dec 2010 17:09:10 +0100 Subject: [PATCH 055/122] drbd: receive_bitmap: Fix the wrong return value Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 732aacb46a32..44b102d41b4a 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3499,7 +3499,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne goto out; if (data_size <= (sizeof(*p) - sizeof(p->head))) { dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); - return FAILED; + return false; } ret = decode_bitmap_c(mdev, p, &c); } else { From de1f8e4a0af3c2d0dff9f6341fead4b509941282 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Fri, 10 Dec 2010 21:04:00 +0100 Subject: [PATCH 056/122] drbd: receive_bitmap: Avoid casting enum drbd_state_rv to int Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 44b102d41b4a..2a604e7b7a01 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3522,12 +3522,14 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne INFO_bm_xfer_stats(mdev, "receive", &c); if (mdev->state.conn == C_WF_BITMAP_T) { + enum drbd_state_rv rv; + ok = !drbd_send_bitmap(mdev); if (!ok) goto out; /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ - ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); - D_ASSERT(ok == SS_SUCCESS); + rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); + D_ASSERT(rv == SS_SUCCESS); } else if (mdev->state.conn != C_WF_BITMAP_S) { /* admin may have requested C_DISCONNECTING, * other threads may have noticed network errors */ From 78fcbdae224469cb48767fa29ca41f1d5890e57c Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Fri, 10 Dec 2010 22:18:27 +0100 Subject: [PATCH 057/122] drbd: receive_bitmap: Missing free_page() on error path Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 2a604e7b7a01..f217d351d77c 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3499,7 +3499,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne goto out; if (data_size <= (sizeof(*p) - sizeof(p->head))) { dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); - return false; + goto out; } ret = decode_bitmap_c(mdev, p, &c); } else { From f70af118e3f3638698ac08959a41b9f9fe7237c8 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Sat, 11 Dec 2010 18:51:50 +0100 Subject: [PATCH 058/122] drbd: send_bitmap_rle_or_plain: Get rid of ugly and useless enum Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7eb447d20cce..7c72595b7ff0 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2140,9 +2140,15 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev, return len; } -enum { OK, FAILED, DONE } +/** + * send_bitmap_rle_or_plain + * + * Return 0 when done, 1 when another iteration is needed, and a negative error + * code upon failure. + */ +static int send_bitmap_rle_or_plain(struct drbd_conf *mdev, - struct p_header80 *h, struct bm_xfer_ctx *c) + struct p_header80 *h, struct bm_xfer_ctx *c) { struct p_compressed_bm *p = (void*)h; unsigned long num_words; @@ -2152,7 +2158,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, len = fill_bitmap_rle_bits(mdev, p, c); if (len < 0) - return FAILED; + return -EIO; if (len) { DCBP_set_code(p, RLE_VLI_Bits); @@ -2182,11 +2188,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, if (c->bit_offset > c->bm_bits) c->bit_offset = c->bm_bits; } - ok = ok ? ((len == 0) ? DONE : OK) : FAILED; - - if (ok == DONE) - INFO_bm_xfer_stats(mdev, "send", c); - return ok; + if (ok) { + if (len == 0) { + INFO_bm_xfer_stats(mdev, "send", c); + return 0; + } else + return 1; + } + return -EIO; } /* See the comment at receive_bitmap() */ @@ -2194,7 +2203,7 @@ int _drbd_send_bitmap(struct drbd_conf *mdev) { struct bm_xfer_ctx c; struct p_header80 *p; - int ret; + int err; ERR_IF(!mdev->bitmap) return false; @@ -2229,11 +2238,11 @@ int _drbd_send_bitmap(struct drbd_conf *mdev) }; do { - ret = send_bitmap_rle_or_plain(mdev, p, &c); - } while (ret == OK); + err = send_bitmap_rle_or_plain(mdev, p, &c); + } while (err > 0); free_page((unsigned long) p); - return (ret == DONE); + return err == 0; } int drbd_send_bitmap(struct drbd_conf *mdev) From 2c46407d241f4b0e5b87959aae8f50f41fdd2a3a Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Sat, 11 Dec 2010 21:53:12 +0100 Subject: [PATCH 059/122] drbd: receive_bitmap_plain: Get rid of ugly and useless enum Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 73 +++++++++++++++++++----------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index f217d351d77c..be7fc67eeeca 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3311,23 +3311,32 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi return true; } -enum receive_bitmap_ret { OK, DONE, FAILED }; - -static enum receive_bitmap_ret +/** + * receive_bitmap_plain + * + * Return 0 when done, 1 when another iteration is needed, and a negative error + * code upon failure. + */ +static int receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, unsigned long *buffer, struct bm_xfer_ctx *c) { unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); unsigned want = num_words * sizeof(long); + int err; if (want != data_size) { dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); - return FAILED; + return -EIO; } if (want == 0) - return DONE; - if (drbd_recv(mdev, buffer, want) != want) - return FAILED; + return 0; + err = drbd_recv(mdev, buffer, want); + if (err != want) { + if (err >= 0) + err = -EIO; + return err; + } drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); @@ -3336,10 +3345,16 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, if (c->bit_offset > c->bm_bits) c->bit_offset = c->bm_bits; - return OK; + return 1; } -static enum receive_bitmap_ret +/** + * recv_bm_rle_bits + * + * Return 0 when done, 1 when another iteration is needed, and a negative error + * code upon failure. + */ +static int recv_bm_rle_bits(struct drbd_conf *mdev, struct p_compressed_bm *p, struct bm_xfer_ctx *c) @@ -3359,18 +3374,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev, bits = bitstream_get_bits(&bs, &look_ahead, 64); if (bits < 0) - return FAILED; + return -EIO; for (have = bits; have > 0; s += rl, toggle = !toggle) { bits = vli_decode_bits(&rl, look_ahead); if (bits <= 0) - return FAILED; + return -EIO; if (toggle) { e = s + rl -1; if (e >= c->bm_bits) { dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); - return FAILED; + return -EIO; } _drbd_bm_set_bits(mdev, s, e); } @@ -3380,14 +3395,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev, have, bits, look_ahead, (unsigned int)(bs.cur.b - p->code), (unsigned int)bs.buf_len); - return FAILED; + return -EIO; } look_ahead >>= bits; have -= bits; bits = bitstream_get_bits(&bs, &tmp, 64 - have); if (bits < 0) - return FAILED; + return -EIO; look_ahead |= tmp << have; have += bits; } @@ -3395,10 +3410,16 @@ recv_bm_rle_bits(struct drbd_conf *mdev, c->bit_offset = s; bm_xfer_ctx_bit_to_word_offset(c); - return (s == c->bm_bits) ? DONE : OK; + return (s != c->bm_bits); } -static enum receive_bitmap_ret +/** + * decode_bitmap_c + * + * Return 0 when done, 1 when another iteration is needed, and a negative error + * code upon failure. + */ +static int decode_bitmap_c(struct drbd_conf *mdev, struct p_compressed_bm *p, struct bm_xfer_ctx *c) @@ -3412,7 +3433,7 @@ decode_bitmap_c(struct drbd_conf *mdev, dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); - return FAILED; + return -EIO; } void INFO_bm_xfer_stats(struct drbd_conf *mdev, @@ -3461,7 +3482,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne { struct bm_xfer_ctx c; void *buffer; - enum receive_bitmap_ret ret; + int err; int ok = false; struct p_header80 *h = &mdev->data.rbuf.header.h80; @@ -3480,9 +3501,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne .bm_words = drbd_bm_words(mdev), }; - do { + for(;;) { if (cmd == P_BITMAP) { - ret = receive_bitmap_plain(mdev, data_size, buffer, &c); + err = receive_bitmap_plain(mdev, data_size, buffer, &c); } else if (cmd == P_COMPRESSED_BITMAP) { /* MAYBE: sanity check that we speak proto >= 90, * and the feature is enabled! */ @@ -3501,7 +3522,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); goto out; } - ret = decode_bitmap_c(mdev, p, &c); + err = decode_bitmap_c(mdev, p, &c); } else { dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); goto out; @@ -3510,14 +3531,14 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne c.packets[cmd == P_BITMAP]++; c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; - if (ret != OK) + if (err <= 0) { + if (err < 0) + goto out; break; - + } if (!drbd_recv_header(mdev, &cmd, &data_size)) goto out; - } while (ret == OK); - if (ret == FAILED) - goto out; + } INFO_bm_xfer_stats(mdev, "receive", &c); From 24dccabb390412d04435e11cfb535df51def7b2d Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Sun, 12 Dec 2010 17:45:41 +0100 Subject: [PATCH 060/122] drbd: Fix: drbd_bitmap_io does not return an enum determine_dev_size I guess bitmap I/O errors are supposed to cause drbd_determin_dev_size to return dev_size_error. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index ada5483f7e56..2f0724982143 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -643,11 +643,17 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_ || prev_size != mdev->ldev->md.md_size_sect; if (la_size_changed || md_moved) { + int err; + drbd_al_shrink(mdev); /* All extents inactive. */ dev_info(DEV, "Writing the whole bitmap, %s\n", la_size_changed && md_moved ? "size changed and md moved" : la_size_changed ? "size changed" : "md moved"); - rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ + err = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ + if (err) { + rv = dev_size_error; + goto out; + } drbd_md_mark_dirty(mdev); } From 1b881ef77537f1077482f9946a6a99b4e2dd54b2 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Mon, 13 Dec 2010 18:03:38 +0100 Subject: [PATCH 061/122] drbd: Rename __inc_ap_bio_cond to may_inc_ap_bio The old name is confusing: the function does not increment anything. Also rename _inc_ap_bio_cond to inc_ap_bio_cond: there is no need for an underscore. Finally, make it clear that these functions return boolean values. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 22 +++++++++++----------- drivers/block/drbd/drbd_main.c | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 0855934e19d0..f6da48bb8c70 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -2284,14 +2284,14 @@ static inline int is_susp(union drbd_state s) return s.susp || s.susp_nod || s.susp_fen; } -static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) +static inline bool may_inc_ap_bio(struct drbd_conf *mdev) { int mxb = drbd_get_max_buffers(mdev); if (is_susp(mdev->state)) - return 0; + return false; if (test_bit(SUSPEND_IO, &mdev->flags)) - return 0; + return false; /* to avoid potential deadlock or bitmap corruption, * in various places, we only allow new application io @@ -2299,23 +2299,23 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) /* no new io accepted when attaching or detaching the disk */ if (!drbd_state_is_stable(mdev)) - return 0; + return false; /* since some older kernels don't have atomic_add_unless, * and we are within the spinlock anyways, we have this workaround. */ if (atomic_read(&mdev->ap_bio_cnt) > mxb) - return 0; + return false; if (test_bit(BITMAP_IO, &mdev->flags)) - return 0; - return 1; + return false; + return true; } -static inline int _inc_ap_bio_cond(struct drbd_conf *mdev, int count) +static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count) { - int rv = 0; + bool rv = false; spin_lock_irq(&mdev->req_lock); - rv = __inc_ap_bio_cond(mdev); + rv = may_inc_ap_bio(mdev); if (rv) atomic_add(count, &mdev->ap_bio_cnt); spin_unlock_irq(&mdev->req_lock); @@ -2333,7 +2333,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count) * to avoid races with the reconnect code, * we need to atomic_inc within the spinlock. */ - wait_event(mdev->misc_wait, _inc_ap_bio_cond(mdev, count)); + wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count)); } static inline void dec_ap_bio(struct drbd_conf *mdev) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7c72595b7ff0..67fffad213ec 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3239,7 +3239,7 @@ static int drbd_congested(void *congested_data, int bdi_bits) char reason = '-'; int r = 0; - if (!__inc_ap_bio_cond(mdev)) { + if (!may_inc_ap_bio(mdev)) { /* DRBD has frozen IO */ r = bdi_bits; reason = 'd'; From 7777a8ba1fc980e5edfe492ebf5a1676497b8db2 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 15 Dec 2010 23:21:39 +0100 Subject: [PATCH 062/122] drbd: bitmap: don't count unused bits (fix non-terminating resync) We trusted the on-disk bitmap to have unused bits cleared. In case that is not true for whatever reason, and we take a code path where the unused bits don't get cleared elsewhere (bm_clear_surplus is not called), we may miscount the bits, and get confused during resync, waiting for bits to get cleared that we don't even use: the resync process would not terminate. Fix this by masking out unused bits in __bm_count_bits. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 9390e9526786..c5361487cf47 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -376,9 +376,16 @@ static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endia unsigned long *p_addr, *bm, offset = 0; unsigned long bits = 0; unsigned long i, do_now; + unsigned long words; - while (offset < b->bm_words) { - i = do_now = min_t(size_t, b->bm_words-offset, LWPP); + /* due to 64bit alignment, the last long on a 32bit arch + * may be not used at all. The last used long will likely + * be only partially used, always. Don't count those bits, + * but mask them out. */ + words = (b->bm_bits + BITS_PER_LONG - 1) >> LN2_BPL; + + while (offset < words) { + i = do_now = min_t(size_t, words-offset, LWPP); p_addr = __bm_map_paddr(b, offset, KM_USER0); bm = p_addr + MLPP(offset); while (i--) { @@ -388,8 +395,20 @@ static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endia #endif bits += hweight_long(*bm++); } - __bm_unmap(p_addr, KM_USER0); offset += do_now; + if (offset == words) { + /* last word may only be partially used, + * see also bm_clear_surplus. */ + i = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) -1; + if (i) { + bits -= hweight_long(p_addr[do_now-1] & ~i); + p_addr[do_now-1] &= i; + } + /* 32bit arch, may have an unused padding long */ + if (words != b->bm_words) + p_addr[do_now] = 0; + } + __bm_unmap(p_addr, KM_USER0); cond_resched(); } From 95a0f10cddbf93ce89c175ac1c53dad2d20ad309 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 15 Dec 2010 08:59:09 +0100 Subject: [PATCH 063/122] drbd: store in-core bitmap little endian, regardless of architecture Our on-disk bitmap is a little endian bitstream. Up to now, we have stored the in-core copy of that in native endian, applying byte order conversion when necessary. Instead, keep the bitmap pages little endian, as they are read from disk, and use the generic_*_le_bit family of functions. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 164 ++++++++++++++++--------------- 1 file changed, 83 insertions(+), 81 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index c5361487cf47..8d959ed6c2cc 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -30,6 +30,7 @@ #include #include "drbd_int.h" + /* OPAQUE outside this file! * interface defined in drbd_int.h @@ -154,6 +155,14 @@ void drbd_bm_unlock(struct drbd_conf *mdev) mutex_unlock(&b->bm_change); } +static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) +{ + /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ + unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); + BUG_ON(page_nr >= b->bm_number_of_pages); + return page_nr; +} + /* word offset to long pointer */ static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km) { @@ -168,6 +177,17 @@ static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset return (unsigned long *) kmap_atomic(page, km); } +static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) +{ + struct page *page = b->bm_pages[idx]; + return (unsigned long *) kmap_atomic(page, km); +} + +static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) +{ + return __bm_map_pidx(b, idx, KM_IRQ1); +} + static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset) { return __bm_map_paddr(b, offset, KM_IRQ1); @@ -329,22 +349,42 @@ void drbd_bm_cleanup(struct drbd_conf *mdev) * this masks out the remaining bits. * Returns the number of bits cleared. */ +#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) +#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) +#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) static int bm_clear_surplus(struct drbd_bitmap *b) { - const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; - size_t w = b->bm_bits >> LN2_BPL; - int cleared = 0; + unsigned long mask; unsigned long *p_addr, *bm; + int tmp; + int cleared = 0; - p_addr = bm_map_paddr(b, w); - bm = p_addr + MLPP(w); - if (w < b->bm_words) { + /* number of bits modulo bits per page */ + tmp = (b->bm_bits & BITS_PER_PAGE_MASK); + /* mask the used bits of the word containing the last bit */ + mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; + /* bitmap is always stored little endian, + * on disk and in core memory alike */ + mask = cpu_to_lel(mask); + + /* because of the "extra long to catch oob access" we allocate in + * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page + * containing the last _relevant_ bitmap word */ + p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1)); + bm = p_addr + (tmp/BITS_PER_LONG); + if (mask) { + /* If mask != 0, we are not exactly aligned, so bm now points + * to the long containing the last bit. + * If mask == 0, bm already points to the word immediately + * after the last (long word aligned) bit. */ cleared = hweight_long(*bm & ~mask); *bm &= mask; - w++; bm++; + bm++; } - if (w < b->bm_words) { + if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { + /* on a 32bit arch, we may need to zero out + * a padding long to align with a 64bit remote */ cleared += hweight_long(*bm); *bm = 0; } @@ -354,24 +394,41 @@ static int bm_clear_surplus(struct drbd_bitmap *b) static void bm_set_surplus(struct drbd_bitmap *b) { - const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; - size_t w = b->bm_bits >> LN2_BPL; + unsigned long mask; unsigned long *p_addr, *bm; + int tmp; - p_addr = bm_map_paddr(b, w); - bm = p_addr + MLPP(w); - if (w < b->bm_words) { + /* number of bits modulo bits per page */ + tmp = (b->bm_bits & BITS_PER_PAGE_MASK); + /* mask the used bits of the word containing the last bit */ + mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; + /* bitmap is always stored little endian, + * on disk and in core memory alike */ + mask = cpu_to_lel(mask); + + /* because of the "extra long to catch oob access" we allocate in + * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page + * containing the last _relevant_ bitmap word */ + p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1)); + bm = p_addr + (tmp/BITS_PER_LONG); + if (mask) { + /* If mask != 0, we are not exactly aligned, so bm now points + * to the long containing the last bit. + * If mask == 0, bm already points to the word immediately + * after the last (long word aligned) bit. */ *bm |= ~mask; - bm++; w++; + bm++; } - if (w < b->bm_words) { - *bm = ~(0UL); + if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { + /* on a 32bit arch, we may need to zero out + * a padding long to align with a 64bit remote */ + *bm = ~0UL; } bm_unmap(p_addr); } -static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian) +static unsigned long bm_count_bits(struct drbd_bitmap *b) { unsigned long *p_addr, *bm, offset = 0; unsigned long bits = 0; @@ -389,10 +446,6 @@ static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endia p_addr = __bm_map_paddr(b, offset, KM_USER0); bm = p_addr + MLPP(offset); while (i--) { -#ifndef __LITTLE_ENDIAN - if (swap_endian) - *bm = lel_to_cpu(*bm); -#endif bits += hweight_long(*bm++); } offset += do_now; @@ -415,16 +468,6 @@ static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endia return bits; } -static unsigned long bm_count_bits(struct drbd_bitmap *b) -{ - return __bm_count_bits(b, 0); -} - -static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b) -{ - return __bm_count_bits(b, 1); -} - /* offset and len in long words.*/ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) { @@ -662,7 +705,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, offset += do_now; while (do_now--) { bits = hweight_long(*bm); - word = *bm | lel_to_cpu(*buffer++); + word = *bm | *buffer++; *bm++ = word; b->bm_set += hweight_long(word) - bits; } @@ -709,7 +752,7 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, bm = p_addr + MLPP(offset); offset += do_now; while (do_now--) - *buffer++ = cpu_to_lel(*bm++); + *buffer++ = *bm++; bm_unmap(p_addr); } } @@ -795,39 +838,6 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int } } -# if defined(__LITTLE_ENDIAN) - /* nothing to do, on disk == in memory */ -# define bm_cpu_to_lel(x) ((void)0) -# else -static void bm_cpu_to_lel(struct drbd_bitmap *b) -{ - /* need to cpu_to_lel all the pages ... - * this may be optimized by using - * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0; - * the following is still not optimal, but better than nothing */ - unsigned int i; - unsigned long *p_addr, *bm; - if (b->bm_set == 0) { - /* no page at all; avoid swap if all is 0 */ - i = b->bm_number_of_pages; - } else if (b->bm_set == b->bm_bits) { - /* only the last page */ - i = b->bm_number_of_pages - 1; - } else { - /* all pages */ - i = 0; - } - for (; i < b->bm_number_of_pages; i++) { - p_addr = kmap_atomic(b->bm_pages[i], KM_USER0); - for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++) - *bm = cpu_to_lel(*bm); - kunmap_atomic(p_addr, KM_USER0); - } -} -# endif -/* lel_to_cpu == cpu_to_lel */ -# define bm_lel_to_cpu(x) bm_cpu_to_lel(x) - /* * bm_rw: read/write the whole bitmap from/to its on disk location. */ @@ -847,10 +857,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) bm_words = drbd_bm_words(mdev); num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT; - /* on disk bitmap is little endian */ - if (rw == WRITE) - bm_cpu_to_lel(b); - now = jiffies; atomic_set(&b->bm_async_io, num_pages); __clear_bit(BM_MD_IO_ERROR, &b->bm_flags); @@ -869,13 +875,9 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) now = jiffies; if (rw == WRITE) { - /* swap back endianness */ - bm_lel_to_cpu(b); - /* flush bitmap to stable storage */ drbd_md_flush(mdev); } else /* rw == READ */ { - /* just read, if necessary adjust endianness */ - b->bm_set = bm_count_bits_swap_endian(b); + b->bm_set = bm_count_bits(b); dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", jiffies - now); } @@ -969,9 +971,9 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, p_addr = __bm_map_paddr(b, offset, km); if (find_zero_bit) - i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); + i = generic_find_next_zero_le_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); else - i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); + i = generic_find_next_le_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); __bm_unmap(p_addr, km); if (i < PAGE_SIZE*8) { @@ -1064,9 +1066,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, last_page_nr = page_nr; } if (val) - c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr)); + c += (0 == generic___test_and_set_le_bit(bitnr & BPP_MASK, p_addr)); else - c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr)); + c -= (0 != generic___test_and_clear_le_bit(bitnr & BPP_MASK, p_addr)); } if (p_addr) __bm_unmap(p_addr, km); @@ -1211,7 +1213,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) if (bitnr < b->bm_bits) { unsigned long offset = bitnr>>LN2_BPL; p_addr = bm_map_paddr(b, offset); - i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; + i = generic_test_le_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; bm_unmap(p_addr); } else if (bitnr == b->bm_bits) { i = -1; @@ -1255,7 +1257,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi ERR_IF (bitnr >= b->bm_bits) { dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); } else { - c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); + c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); } } if (p_addr) From 19f843aa08e2d8f87a09b4c2edc43b00638423a8 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 15 Dec 2010 08:59:11 +0100 Subject: [PATCH 064/122] drbd: bitmap keep track of changes vs on-disk bitmap When we set or clear bits in a bitmap page, also set a flag in the page->private pointer. This allows us to skip writes of unchanged pages. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 132 +++------- drivers/block/drbd/drbd_bitmap.c | 411 +++++++++++++++++++++++-------- drivers/block/drbd/drbd_int.h | 7 +- drivers/block/drbd/drbd_main.c | 27 +- drivers/block/drbd/drbd_nl.c | 11 +- drivers/block/drbd/drbd_worker.c | 7 +- 6 files changed, 374 insertions(+), 221 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index e3f0f4d31d75..090fc2ce0df4 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -262,6 +262,33 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) spin_unlock_irqrestore(&mdev->al_lock, flags); } +#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT) +/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT + * are still coupled, or assume too much about their relation. + * Code below will not work if this is violated. + * Will be cleaned up with some followup patch. + */ +# error FIXME +#endif + +static unsigned int al_extent_to_bm_page(unsigned int al_enr) +{ + return al_enr >> + /* bit to page */ + ((PAGE_SHIFT + 3) - + /* al extent number to bit */ + (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)); +} + +static unsigned int rs_extent_to_bm_page(unsigned int rs_enr) +{ + return rs_enr >> + /* bit to page */ + ((PAGE_SHIFT + 3) - + /* al extent number to bit */ + (BM_EXT_SHIFT - BM_BLOCK_SHIFT)); +} + int w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) { @@ -289,7 +316,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) * For now, we must not write the transaction, * if we cannot write out the bitmap of the evicted extent. */ if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) - drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); + drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted)); /* The bitmap write may have failed, causing a state change. */ if (mdev->state.disk < D_INCONSISTENT) { @@ -635,105 +662,6 @@ out_bio_put: return err; } -/** - * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents - * @mdev: DRBD device. - * - * Called when we detach (unconfigure) local storage, - * or when we go from R_PRIMARY to R_SECONDARY role. - */ -void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) -{ - int i, nr_elements; - unsigned int enr; - struct bio **bios; - struct drbd_atodb_wait wc; - - ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) - return; /* sorry, I don't have any act_log etc... */ - - wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); - - nr_elements = mdev->act_log->nr_elements; - - /* GFP_KERNEL, we are not in anyone's write-out path */ - bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); - if (!bios) - goto submit_one_by_one; - - atomic_set(&wc.count, 0); - init_completion(&wc.io_done); - wc.mdev = mdev; - wc.error = 0; - - for (i = 0; i < nr_elements; i++) { - enr = lc_element_by_index(mdev->act_log, i)->lc_number; - if (enr == LC_FREE) - continue; - /* next statement also does atomic_inc wc.count and local_cnt */ - if (atodb_prepare_unless_covered(mdev, bios, - enr/AL_EXT_PER_BM_SECT, - &wc)) - goto free_bios_submit_one_by_one; - } - - /* unnecessary optimization? */ - lc_unlock(mdev->act_log); - wake_up(&mdev->al_wait); - - /* all prepared, submit them */ - for (i = 0; i < nr_elements; i++) { - if (bios[i] == NULL) - break; - if (drbd_insert_fault(mdev, DRBD_FAULT_MD_WR)) { - bios[i]->bi_rw = WRITE; - bio_endio(bios[i], -EIO); - } else { - submit_bio(WRITE, bios[i]); - } - } - - /* always (try to) flush bitmap to stable storage */ - drbd_md_flush(mdev); - - /* In case we did not submit a single IO do not wait for - * them to complete. ( Because we would wait forever here. ) - * - * In case we had IOs and they are already complete, there - * is not point in waiting anyways. - * Therefore this if () ... */ - if (atomic_read(&wc.count)) - wait_for_completion(&wc.io_done); - - put_ldev(mdev); - - kfree(bios); - return; - - free_bios_submit_one_by_one: - /* free everything by calling the endio callback directly. */ - for (i = 0; i < nr_elements && bios[i]; i++) - bio_endio(bios[i], 0); - - kfree(bios); - - submit_one_by_one: - dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); - - for (i = 0; i < mdev->act_log->nr_elements; i++) { - enr = lc_element_by_index(mdev->act_log, i)->lc_number; - if (enr == LC_FREE) - continue; - /* Really slow: if we have al-extents 16..19 active, - * sector 4 will be written four times! Synchronous! */ - drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); - } - - lc_unlock(mdev->act_log); - wake_up(&mdev->al_wait); - put_ldev(mdev); -} - /** * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents * @mdev: DRBD device. @@ -813,7 +741,7 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused return 1; } - drbd_bm_write_sect(mdev, udw->enr); + drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr)); put_ldev(mdev); kfree(udw); @@ -893,7 +821,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, dev_warn(DEV, "Kicking resync_lru element enr=%u " "out with rs_failed=%d\n", ext->lce.lc_number, ext->rs_failed); - set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); } ext->rs_left = rs_left; ext->rs_failed = success ? 0 : count; @@ -912,7 +839,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, drbd_queue_work_front(&mdev->data.work, &udw->w); } else { dev_warn(DEV, "Could not kmalloc an udw\n"); - set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); } } } else { diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 8d959ed6c2cc..72cd41a96ef9 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -70,8 +70,7 @@ struct drbd_bitmap { sector_t bm_dev_capacity; struct mutex bm_change; /* serializes resize operations */ - atomic_t bm_async_io; - wait_queue_head_t bm_io_wait; + wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ unsigned long bm_flags; @@ -82,7 +81,7 @@ struct drbd_bitmap { /* definition of bits in bm_flags */ #define BM_LOCKED 0 -#define BM_MD_IO_ERROR 1 +// #define BM_MD_IO_ERROR 1 unused now. #define BM_P_VMALLOCED 2 static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, @@ -155,26 +154,117 @@ void drbd_bm_unlock(struct drbd_conf *mdev) mutex_unlock(&b->bm_change); } -static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) +/* we store some "meta" info about our pages in page->private */ +/* at a granularity of 4k storage per bitmap bit: + * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks + * 1<<38 bits, + * 1<<23 4k bitmap pages. + * Use 24 bits as page index, covers 2 peta byte storage + * at a granularity of 4k per bit. + * Used to report the failed page idx on io error from the endio handlers. + */ +#define BM_PAGE_IDX_MASK ((1UL<<24)-1) +/* this page is currently read in, or written back */ +#define BM_PAGE_IO_LOCK 31 +/* if there has been an IO error for this page */ +#define BM_PAGE_IO_ERROR 30 +/* this is to be able to intelligently skip disk IO, + * set if bits have been set since last IO. */ +#define BM_PAGE_NEED_WRITEOUT 29 +/* to mark for lazy writeout once syncer cleared all clearable bits, + * we if bits have been cleared since last IO. */ +#define BM_PAGE_LAZY_WRITEOUT 28 + +/* store_page_idx uses non-atomic assingment. It is only used directly after + * allocating the page. All other bm_set_page_* and bm_clear_page_* need to + * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap + * changes) may happen from various contexts, and wait_on_bit/wake_up_bit + * requires it all to be atomic as well. */ +static void bm_store_page_idx(struct page *page, unsigned long idx) { - /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ - unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); - BUG_ON(page_nr >= b->bm_number_of_pages); - return page_nr; + BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); + page_private(page) |= idx; } -/* word offset to long pointer */ -static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km) +static unsigned long bm_page_to_idx(struct page *page) { - struct page *page; - unsigned long page_nr; + return page_private(page) & BM_PAGE_IDX_MASK; +} +/* As is very unlikely that the same page is under IO from more than one + * context, we can get away with a bit per page and one wait queue per bitmap. + */ +static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) +{ + struct drbd_bitmap *b = mdev->bitmap; + void *addr = &page_private(b->bm_pages[page_nr]); + wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); +} + +static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) +{ + struct drbd_bitmap *b = mdev->bitmap; + void *addr = &page_private(b->bm_pages[page_nr]); + clear_bit(BM_PAGE_IO_LOCK, addr); + smp_mb__after_clear_bit(); + wake_up(&mdev->bitmap->bm_io_wait); +} + +/* set _before_ submit_io, so it may be reset due to being changed + * while this page is in flight... will get submitted later again */ +static void bm_set_page_unchanged(struct page *page) +{ + /* use cmpxchg? */ + clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); + clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); +} + +static void bm_set_page_need_writeout(struct page *page) +{ + set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); +} + +static int bm_test_page_unchanged(struct page *page) +{ + volatile const unsigned long *addr = &page_private(page); + return (*addr & ((1UL<> PAGE_SHIFT; */ - page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); + unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); BUG_ON(page_nr >= b->bm_number_of_pages); - page = b->bm_pages[page_nr]; + return page_nr; +} - return (unsigned long *) kmap_atomic(page, km); +static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) +{ + /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ + unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); + BUG_ON(page_nr >= b->bm_number_of_pages); + return page_nr; } static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) @@ -188,11 +278,6 @@ static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) return __bm_map_pidx(b, idx, KM_IRQ1); } -static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset) -{ - return __bm_map_paddr(b, offset, KM_IRQ1); -} - static void __bm_unmap(unsigned long *p_addr, const enum km_type km) { kunmap_atomic(p_addr, km); @@ -222,6 +307,7 @@ static void bm_unmap(unsigned long *p_addr) * to be able to report device specific. */ + static void bm_free_pages(struct page **pages, unsigned long number) { unsigned long i; @@ -289,6 +375,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) bm_vk_free(new_pages, vmalloced); return NULL; } + /* we want to know which page it is + * from the endio handlers */ + bm_store_page_idx(page, i); new_pages[i] = page; } } else { @@ -443,7 +532,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b) while (offset < words) { i = do_now = min_t(size_t, words-offset, LWPP); - p_addr = __bm_map_paddr(b, offset, KM_USER0); + p_addr = __bm_map_pidx(b, bm_word_to_page_idx(b, offset), KM_USER0); bm = p_addr + MLPP(offset); while (i--) { bits += hweight_long(*bm++); @@ -472,6 +561,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b) static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) { unsigned long *p_addr, *bm; + unsigned int idx; size_t do_now, end; #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512) @@ -485,7 +575,8 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) while (offset < end) { do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; - p_addr = bm_map_paddr(b, offset); + idx = bm_word_to_page_idx(b, offset); + p_addr = bm_map_pidx(b, idx); bm = p_addr + MLPP(offset); if (bm+do_now > p_addr + LWPP) { printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", @@ -494,6 +585,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) } memset(bm, c, do_now * sizeof(long)); bm_unmap(p_addr); + bm_set_page_need_writeout(b->bm_pages[idx]); offset += do_now; } } @@ -604,7 +696,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) bm_free_pages(opages + want, have - want); } - p_addr = bm_map_paddr(b, words); + p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, words)); bm = p_addr + MLPP(words); *bm = DRBD_MAGIC; bm_unmap(p_addr); @@ -616,7 +708,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) bm_vk_free(opages, opages_vmalloced); if (!growing) b->bm_set = bm_count_bits(b); - dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words); + dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); out: drbd_bm_unlock(mdev); @@ -686,6 +778,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, struct drbd_bitmap *b = mdev->bitmap; unsigned long *p_addr, *bm; unsigned long word, bits; + unsigned int idx; size_t end, do_now; end = offset + number; @@ -700,7 +793,8 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, spin_lock_irq(&b->bm_lock); while (offset < end) { do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; - p_addr = bm_map_paddr(b, offset); + idx = bm_word_to_page_idx(b, offset); + p_addr = bm_map_pidx(b, idx); bm = p_addr + MLPP(offset); offset += do_now; while (do_now--) { @@ -710,6 +804,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, b->bm_set += hweight_long(word) - bits; } bm_unmap(p_addr); + bm_set_page_need_writeout(b->bm_pages[idx]); } /* with 32bit <-> 64bit cross-platform connect * this is only correct for current usage, @@ -748,7 +843,7 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, else { while (offset < end) { do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; - p_addr = bm_map_paddr(b, offset); + p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); bm = p_addr + MLPP(offset); offset += do_now; while (do_now--) @@ -786,9 +881,22 @@ void drbd_bm_clear_all(struct drbd_conf *mdev) spin_unlock_irq(&b->bm_lock); } +struct bm_aio_ctx { + struct drbd_conf *mdev; + atomic_t in_flight; + wait_queue_head_t io_wait; + unsigned flags; +#define BM_AIO_COPY_PAGES 1 + int error; +}; + +/* bv_page may be a copy, or may be the original */ static void bm_async_io_complete(struct bio *bio, int error) { - struct drbd_bitmap *b = bio->bi_private; + struct bm_aio_ctx *ctx = bio->bi_private; + struct drbd_conf *mdev = ctx->mdev; + struct drbd_bitmap *b = mdev->bitmap; + unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); int uptodate = bio_flagged(bio, BIO_UPTODATE); @@ -799,35 +907,79 @@ static void bm_async_io_complete(struct bio *bio, int error) if (!error && !uptodate) error = -EIO; + if (!bm_test_page_unchanged(b->bm_pages[idx])) + dev_info(DEV, "bitmap page idx %u changed during IO!\n", idx); + if (error) { - /* doh. what now? - * for now, set all bits, and flag MD_IO_ERROR */ - __set_bit(BM_MD_IO_ERROR, &b->bm_flags); + /* ctx error will hold the completed-last non-zero error code, + * in case error codes differ. */ + ctx->error = error; + bm_set_page_io_err(b->bm_pages[idx]); + /* Not identical to on disk version of it. + * Is BM_PAGE_IO_ERROR enough? */ + if (__ratelimit(&drbd_ratelimit_state)) + dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", + error, idx); + } else { + bm_clear_page_io_err(b->bm_pages[idx]); + dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); } - if (atomic_dec_and_test(&b->bm_async_io)) - wake_up(&b->bm_io_wait); + + bm_page_unlock_io(mdev, idx); + + /* FIXME give back to page pool */ + if (ctx->flags & BM_AIO_COPY_PAGES) + put_page(bio->bi_io_vec[0].bv_page); bio_put(bio); + + if (atomic_dec_and_test(&ctx->in_flight)) + wake_up(&ctx->io_wait); } -static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local) +static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) { /* we are process context. we always get a bio */ struct bio *bio = bio_alloc(GFP_KERNEL, 1); + struct drbd_conf *mdev = ctx->mdev; + struct drbd_bitmap *b = mdev->bitmap; + struct page *page; unsigned int len; + sector_t on_disk_sector = mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); /* this might happen with very small - * flexible external meta data device */ + * flexible external meta data device, + * or with PAGE_SIZE > 4k */ len = min_t(unsigned int, PAGE_SIZE, (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); + /* serialize IO on this page */ + bm_page_lock_io(mdev, page_nr); + /* before memcpy and submit, + * so it can be redirtied any time */ + bm_set_page_unchanged(b->bm_pages[page_nr]); + + if (ctx->flags & BM_AIO_COPY_PAGES) { + /* FIXME alloc_page is good enough for now, but actually needs + * to use pre-allocated page pool */ + void *src, *dest; + page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); + dest = kmap_atomic(page, KM_USER0); + src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); + memcpy(dest, src, PAGE_SIZE); + kunmap_atomic(src, KM_USER1); + kunmap_atomic(dest, KM_USER0); + bm_store_page_idx(page, page_nr); + } else + page = b->bm_pages[page_nr]; + bio->bi_bdev = mdev->ldev->md_bdev; bio->bi_sector = on_disk_sector; - bio_add_page(bio, b->bm_pages[page_nr], len, 0); - bio->bi_private = b; + bio_add_page(bio, page, len, 0); + bio->bi_private = ctx; bio->bi_end_io = bm_async_io_complete; if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { @@ -841,36 +993,72 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int /* * bm_rw: read/write the whole bitmap from/to its on disk location. */ -static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) +static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local) { + struct bm_aio_ctx ctx = + { .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0 }; struct drbd_bitmap *b = mdev->bitmap; - /* sector_t sector; */ - int bm_words, num_pages, i; + int last_page, i, count = 0; unsigned long now; char ppb[10]; int err = 0; - WARN_ON(!bm_is_locked(b)); + /* + * We are protected against bitmap disappearing/resizing by holding an + * ldev reference (caller must have called get_ldev()). + * For read/write, we are protected against changes to the bitmap by + * the bitmap lock (see drbd_bitmap_io). + * For lazy writeout, we don't care for ongoing changes to the bitmap, + * as we submit copies of pages anyways. + */ + if (!ctx.flags) + WARN_ON(!bm_is_locked(b)); - /* no spinlock here, the drbd_bm_lock should be enough! */ - - bm_words = drbd_bm_words(mdev); - num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT; + /* because of the "extra long to catch oob access" we allocate in + * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page + * containing the last _relevant_ bitmap word */ + last_page = bm_word_to_page_idx(b, b->bm_words - 1); now = jiffies; - atomic_set(&b->bm_async_io, num_pages); - __clear_bit(BM_MD_IO_ERROR, &b->bm_flags); + ctx.mdev = mdev; + atomic_set(&ctx.in_flight, 1); /* one extra ref */ + init_waitqueue_head(&ctx.io_wait); + ctx.error = 0; /* let the layers below us try to merge these bios... */ - for (i = 0; i < num_pages; i++) - bm_page_io_async(mdev, b, i, rw); + for (i = 0; i <= last_page; i++) { + /* ignore completely unchanged pages */ + if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) + break; + if (rw & WRITE) { + if (bm_test_page_unchanged(b->bm_pages[i])) { + dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); + continue; + } + /* during lazy writeout, + * ignore those pages not marked for lazy writeout. */ + if (lazy_writeout_upper_idx && + !bm_test_page_lazy_writeout(b->bm_pages[i])) { + dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); + continue; + } + } + atomic_inc(&ctx.in_flight); + bm_page_io_async(&ctx, i, rw); + ++count; + cond_resched(); + } - wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); + atomic_dec(&ctx.in_flight); /* drop the extra ref */ + wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0); + dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", + rw == WRITE ? "WRITE" : "READ", + count, jiffies - now); - if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { + if (ctx.error) { dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); drbd_chk_io_error(mdev, 1, true); - err = -EIO; + err = -EIO; /* ctx.error ? */ } now = jiffies; @@ -895,55 +1083,63 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) */ int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) { - return bm_rw(mdev, READ); + return bm_rw(mdev, READ, 0); } /** * drbd_bm_write() - Write the whole bitmap to its on disk location. * @mdev: DRBD device. + * + * Will only write pages that have changed since last IO. */ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) { - return bm_rw(mdev, WRITE); + return bm_rw(mdev, WRITE, 0); } /** - * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap + * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. * @mdev: DRBD device. - * @enr: Extent number in the resync lru (happens to be sector offset) - * - * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered - * by a single sector write. Therefore enr == sector offset from the - * start of the bitmap. + * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages */ -int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local) +int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local) { - sector_t on_disk_sector = enr + mdev->ldev->md.md_offset - + mdev->ldev->md.bm_offset; - int bm_words, num_words, offset; - int err = 0; + return bm_rw(mdev, WRITE, upper_idx); +} - mutex_lock(&mdev->md_io_mutex); - bm_words = drbd_bm_words(mdev); - offset = S2W(enr); /* word offset into bitmap */ - num_words = min(S2W(1), bm_words - offset); - if (num_words < S2W(1)) - memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE); - drbd_bm_get_lel(mdev, offset, num_words, - page_address(mdev->md_io_page)); - if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) { - int i; - err = -EIO; - dev_err(DEV, "IO ERROR writing bitmap sector %lu " - "(meta-disk sector %llus)\n", - enr, (unsigned long long)on_disk_sector); - drbd_chk_io_error(mdev, 1, true); - for (i = 0; i < AL_EXT_PER_BM_SECT; i++) - drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i); + +/** + * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap + * @mdev: DRBD device. + * @idx: bitmap page index + * + * We don't want to special case on logical_block_size of the underlaying + * device, so we submit PAGE_SIZE aligned pieces containing the requested enr. + * Note that on "most" systems, PAGE_SIZE is 4k. + */ +int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) +{ + struct bm_aio_ctx ctx = { .flags = BM_AIO_COPY_PAGES, }; + + if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { + dev_info(DEV, "skipped bm page write for idx %u\n", idx); + return 0; } + + ctx.mdev = mdev; + atomic_set(&ctx.in_flight, 1); + init_waitqueue_head(&ctx.io_wait); + + bm_page_io_async(&ctx, idx, WRITE_SYNC); + wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0); + + if (ctx.error) + drbd_chk_io_error(mdev, 1, true); + /* that should force detach, so the in memory bitmap will be + * gone in a moment as well. */ + mdev->bm_writ_cnt++; - mutex_unlock(&mdev->md_io_mutex); - return err; + return ctx.error; } /* NOTE @@ -965,10 +1161,9 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); } else { while (bm_fo < b->bm_bits) { - unsigned long offset; - bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */ - offset = bit_offset >> LN2_BPL; /* word offset of the page */ - p_addr = __bm_map_paddr(b, offset, km); + /* bit offset of the first bit in the page */ + bit_offset = bm_fo & ~BPP_MASK; + p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); if (find_zero_bit) i = generic_find_next_zero_le_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); @@ -1048,8 +1243,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, struct drbd_bitmap *b = mdev->bitmap; unsigned long *p_addr = NULL; unsigned long bitnr; - unsigned long last_page_nr = -1UL; + unsigned int last_page_nr = -1U; int c = 0; + int changed_total = 0; if (e >= b->bm_bits) { dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", @@ -1057,12 +1253,17 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, e = b->bm_bits ? b->bm_bits -1 : 0; } for (bitnr = s; bitnr <= e; bitnr++) { - unsigned long offset = bitnr>>LN2_BPL; - unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); + unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); if (page_nr != last_page_nr) { if (p_addr) __bm_unmap(p_addr, km); - p_addr = __bm_map_paddr(b, offset, km); + if (c < 0) + bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); + else if (c > 0) + bm_set_page_need_writeout(b->bm_pages[last_page_nr]); + changed_total += c; + c = 0; + p_addr = __bm_map_pidx(b, page_nr, km); last_page_nr = page_nr; } if (val) @@ -1072,8 +1273,13 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, } if (p_addr) __bm_unmap(p_addr, km); - b->bm_set += c; - return c; + if (c < 0) + bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); + else if (c > 0) + bm_set_page_need_writeout(b->bm_pages[last_page_nr]); + changed_total += c; + b->bm_set += changed_total; + return changed_total; } /* returns number of bits actually changed. @@ -1211,8 +1417,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) if (bm_is_locked(b)) bm_print_lock_info(mdev); if (bitnr < b->bm_bits) { - unsigned long offset = bitnr>>LN2_BPL; - p_addr = bm_map_paddr(b, offset); + p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); i = generic_test_le_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; bm_unmap(p_addr); } else if (bitnr == b->bm_bits) { @@ -1231,10 +1436,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi { unsigned long flags; struct drbd_bitmap *b = mdev->bitmap; - unsigned long *p_addr = NULL, page_nr = -1; + unsigned long *p_addr = NULL; unsigned long bitnr; + unsigned int page_nr = -1U; int c = 0; - size_t w; /* If this is called without a bitmap, that is a bug. But just to be * robust in case we screwed up elsewhere, in that case pretend there @@ -1247,12 +1452,12 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi if (bm_is_locked(b)) bm_print_lock_info(mdev); for (bitnr = s; bitnr <= e; bitnr++) { - w = bitnr >> LN2_BPL; - if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) { - page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3); + unsigned int idx = bm_bit_to_page_idx(b, bitnr); + if (page_nr != idx) { + page_nr = idx; if (p_addr) bm_unmap(p_addr); - p_addr = bm_map_paddr(b, w); + p_addr = bm_map_pidx(b, idx); } ERR_IF (bitnr >= b->bm_bits) { dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); @@ -1300,7 +1505,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) count = 0; if (s < b->bm_words) { int n = e-s; - p_addr = bm_map_paddr(b, s); + p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); bm = p_addr + MLPP(s); while (n--) count += hweight_long(*bm++); @@ -1335,7 +1540,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) count = 0; if (s < b->bm_words) { i = do_now = e-s; - p_addr = bm_map_paddr(b, s); + p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); bm = p_addr + MLPP(s); while (i--) { count += hweight_long(*bm); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index f6da48bb8c70..74cc50a21822 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -833,7 +833,7 @@ enum { CRASHED_PRIMARY, /* This node was a crashed primary. * Gets cleared when the state.conn * goes into C_CONNECTED state. */ - WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ + NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ CONSIDER_RESYNC, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ @@ -1428,7 +1428,7 @@ extern void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); -extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local); +extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, @@ -1446,7 +1446,7 @@ extern int drbd_bm_rs_done(struct drbd_conf *mdev); /* for receive_bitmap */ extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, unsigned long *buffer); -/* for _drbd_send_bitmap and drbd_bm_write_sect */ +/* for _drbd_send_bitmap */ extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, unsigned long *buffer); @@ -1641,7 +1641,6 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, #define drbd_set_out_of_sync(mdev, sector, size) \ __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); -extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev); extern void drbd_al_shrink(struct drbd_conf *mdev); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 67fffad213ec..57ed7181742d 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1289,6 +1289,26 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv) } } +int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why) +{ + int rv; + + D_ASSERT(current == mdev->worker.task); + + /* open coded non-blocking drbd_suspend_io(mdev); */ + set_bit(SUSPEND_IO, &mdev->flags); + if (!is_susp(mdev->state)) + D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); + + drbd_bm_lock(mdev, why); + rv = io_fn(mdev); + drbd_bm_unlock(mdev); + + drbd_resume_io(mdev); + + return rv; +} + /** * after_state_ch() - Perform after state change actions that may sleep * @mdev: DRBD device. @@ -1404,7 +1424,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* D_DISKLESS Peer becomes secondary */ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) - drbd_al_to_on_disk_bm(mdev); + drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer"); + put_ldev(mdev); + } + + if (os.role == R_PRIMARY && ns.role == R_SECONDARY && get_ldev(mdev)) { + drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote"); put_ldev(mdev); } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 2f0724982143..77dc022eaf6b 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -407,10 +407,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) } } - if ((new_role == R_SECONDARY) && get_ldev(mdev)) { - drbd_al_to_on_disk_bm(mdev); - put_ldev(mdev); - } + /* writeout of activity log covered areas of the bitmap + * to stable storage done in after state change already */ if (mdev->state.conn >= C_WF_REPORT_PARAMS) { /* if this was forced, we should consider sync */ @@ -1174,7 +1172,10 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp if (cp_discovered) { drbd_al_apply_to_bm(mdev); - drbd_al_to_on_disk_bm(mdev); + if (drbd_bitmap_io(mdev, &drbd_bm_write, "crashed primary apply AL")) { + retcode = ERR_IO_MD_DISK; + goto force_diskless_dec; + } } if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 9fe3e890da0f..d17f2ed777ce 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -907,10 +907,8 @@ out: drbd_md_sync(mdev); - if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { - dev_info(DEV, "Writing the whole bitmap\n"); - drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); - } + dev_info(DEV, "Writing changed bitmap pages\n"); + drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); if (khelper_cmd) drbd_khelper(mdev, khelper_cmd); @@ -1127,7 +1125,6 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size) mdev->ov_last_oos_size = size>>9; } drbd_set_out_of_sync(mdev, sector, size); - set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); } int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) From 4b0715f09655e76ca24c35a9e25e7c464c2f7346 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Tue, 14 Dec 2010 15:13:04 +0100 Subject: [PATCH 065/122] drbd: allow petabyte storage on 64bit arch Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 166 +++++++++++++++++++------------ drivers/block/drbd/drbd_int.h | 41 +++++--- drivers/block/drbd/drbd_nl.c | 8 +- drivers/block/drbd/drbd_proc.c | 6 +- drivers/block/drbd/drbd_worker.c | 2 +- 5 files changed, 140 insertions(+), 83 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 72cd41a96ef9..0e31e573af72 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -37,10 +37,46 @@ * convention: * function name drbd_bm_... => used elsewhere, "public". * function name bm_... => internal to implementation, "private". + */ - * Note that since find_first_bit returns int, at the current granularity of - * the bitmap (4KB per byte), this implementation "only" supports up to - * 1<<(32+12) == 16 TB... + +/* + * LIMITATIONS: + * We want to support >= peta byte of backend storage, while for now still using + * a granularity of one bit per 4KiB of storage. + * 1 << 50 bytes backend storage (1 PiB) + * 1 << (50 - 12) bits needed + * 38 --> we need u64 to index and count bits + * 1 << (38 - 3) bitmap bytes needed + * 35 --> we still need u64 to index and count bytes + * (that's 32 GiB of bitmap for 1 PiB storage) + * 1 << (35 - 2) 32bit longs needed + * 33 --> we'd even need u64 to index and count 32bit long words. + * 1 << (35 - 3) 64bit longs needed + * 32 --> we could get away with a 32bit unsigned int to index and count + * 64bit long words, but I rather stay with unsigned long for now. + * We probably should neither count nor point to bytes or long words + * directly, but either by bitnumber, or by page index and offset. + * 1 << (35 - 12) + * 22 --> we need that much 4KiB pages of bitmap. + * 1 << (22 + 3) --> on a 64bit arch, + * we need 32 MiB to store the array of page pointers. + * + * Because I'm lazy, and because the resulting patch was too large, too ugly + * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), + * (1 << 32) bits * 4k storage. + * + + * bitmap storage and IO: + * Bitmap is stored little endian on disk, and is kept little endian in + * core memory. Currently we still hold the full bitmap in core as long + * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage + * seems excessive. + * + * We plan to reduce the amount of in-core bitmap pages by pageing them in + * and out against their on-disk location as necessary, but need to make + * sure we don't cause too much meta data IO, and must not deadlock in + * tight memory situations. This needs some more work. */ /* @@ -56,13 +92,9 @@ struct drbd_bitmap { struct page **bm_pages; spinlock_t bm_lock; - /* WARNING unsigned long bm_*: - * 32bit number of bit offset is just enough for 512 MB bitmap. - * it will blow up if we make the bitmap bigger... - * not that it makes much sense to have a bitmap that large, - * rather change the granularity to 16k or 64k or something. - * (that implies other problems, however...) - */ + + /* see LIMITATIONS: above */ + unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ unsigned long bm_bits; size_t bm_words; @@ -517,43 +549,39 @@ static void bm_set_surplus(struct drbd_bitmap *b) bm_unmap(p_addr); } +/* you better not modify the bitmap while this is running, + * or its results will be stale */ static unsigned long bm_count_bits(struct drbd_bitmap *b) { - unsigned long *p_addr, *bm, offset = 0; + unsigned long *p_addr; unsigned long bits = 0; - unsigned long i, do_now; - unsigned long words; + unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; + int idx, last_page, i, last_word; - /* due to 64bit alignment, the last long on a 32bit arch - * may be not used at all. The last used long will likely - * be only partially used, always. Don't count those bits, - * but mask them out. */ - words = (b->bm_bits + BITS_PER_LONG - 1) >> LN2_BPL; + /* because of the "extra long to catch oob access" we allocate in + * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page + * containing the last _relevant_ bitmap word */ + last_page = bm_bit_to_page_idx(b, b->bm_bits-1); - while (offset < words) { - i = do_now = min_t(size_t, words-offset, LWPP); - p_addr = __bm_map_pidx(b, bm_word_to_page_idx(b, offset), KM_USER0); - bm = p_addr + MLPP(offset); - while (i--) { - bits += hweight_long(*bm++); - } - offset += do_now; - if (offset == words) { - /* last word may only be partially used, - * see also bm_clear_surplus. */ - i = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) -1; - if (i) { - bits -= hweight_long(p_addr[do_now-1] & ~i); - p_addr[do_now-1] &= i; - } - /* 32bit arch, may have an unused padding long */ - if (words != b->bm_words) - p_addr[do_now] = 0; - } + /* all but last page */ + for (idx = 0; idx < last_page; idx++) { + p_addr = __bm_map_pidx(b, idx, KM_USER0); + for (i = 0; i < LWPP; i++) + bits += hweight_long(p_addr[i]); __bm_unmap(p_addr, KM_USER0); cond_resched(); } - + /* last (or only) page */ + last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; + p_addr = __bm_map_pidx(b, idx, KM_USER0); + for (i = 0; i < last_word; i++) + bits += hweight_long(p_addr[i]); + p_addr[last_word] &= cpu_to_lel(mask); + bits += hweight_long(p_addr[last_word]); + /* 32bit arch, may have an unused padding long */ + if (BITS_PER_LONG == 32 && (last_word & 1) == 0) + p_addr[last_word+1] = 0; + __bm_unmap(p_addr, KM_USER0); return bits; } @@ -564,8 +592,6 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) unsigned int idx; size_t do_now, end; -#define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512) - end = offset + len; if (end > b->bm_words) { @@ -645,8 +671,14 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) words = ALIGN(bits, 64) >> LN2_BPL; if (get_ldev(mdev)) { - D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12)); + u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12; put_ldev(mdev); + if (bits > bits_on_disk) { + dev_info(DEV, "bits = %lu\n", bits); + dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); + err = -ENOSPC; + goto out; + } } /* one extra long to catch off by one errors */ @@ -1113,9 +1145,12 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l * @mdev: DRBD device. * @idx: bitmap page index * - * We don't want to special case on logical_block_size of the underlaying - * device, so we submit PAGE_SIZE aligned pieces containing the requested enr. + * We don't want to special case on logical_block_size of the backend device, + * so we submit PAGE_SIZE aligned pieces. * Note that on "most" systems, PAGE_SIZE is 4k. + * + * In case this becomes an issue on systems with larger PAGE_SIZE, + * we may want to change this again to write 4k aligned 4k pieces. */ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) { @@ -1144,52 +1179,57 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc /* NOTE * find_first_bit returns int, we return unsigned long. - * should not make much difference anyways, but ... + * For this to work on 32bit arch with bitnumbers > (1<<32), + * we'd need to return u64, and get a whole lot of other places + * fixed where we still use unsigned long. * * this returns a bit number, NOT a sector! */ -#define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1) static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, const int find_zero_bit, const enum km_type km) { struct drbd_bitmap *b = mdev->bitmap; - unsigned long i = -1UL; unsigned long *p_addr; - unsigned long bit_offset; /* bit offset of the mapped page. */ + unsigned long bit_offset; + unsigned i; + if (bm_fo > b->bm_bits) { dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); + bm_fo = DRBD_END_OF_BITMAP; } else { while (bm_fo < b->bm_bits) { /* bit offset of the first bit in the page */ - bit_offset = bm_fo & ~BPP_MASK; + bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); if (find_zero_bit) - i = generic_find_next_zero_le_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); + i = generic_find_next_zero_le_bit(p_addr, + PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); else - i = generic_find_next_le_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); + i = generic_find_next_le_bit(p_addr, + PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); __bm_unmap(p_addr, km); if (i < PAGE_SIZE*8) { - i = bit_offset + i; - if (i >= b->bm_bits) + bm_fo = bit_offset + i; + if (bm_fo >= b->bm_bits) break; goto found; } bm_fo = bit_offset + PAGE_SIZE*8; } - i = -1UL; + bm_fo = DRBD_END_OF_BITMAP; } found: - return i; + return bm_fo; } static unsigned long bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, const int find_zero_bit) { struct drbd_bitmap *b = mdev->bitmap; - unsigned long i = -1UL; + unsigned long i = DRBD_END_OF_BITMAP; ERR_IF(!b) return i; ERR_IF(!b->bm_pages) return i; @@ -1267,9 +1307,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, last_page_nr = page_nr; } if (val) - c += (0 == generic___test_and_set_le_bit(bitnr & BPP_MASK, p_addr)); + c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); else - c -= (0 != generic___test_and_clear_le_bit(bitnr & BPP_MASK, p_addr)); + c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); } if (p_addr) __bm_unmap(p_addr, km); @@ -1418,7 +1458,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) bm_print_lock_info(mdev); if (bitnr < b->bm_bits) { p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); - i = generic_test_le_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; + i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; bm_unmap(p_addr); } else if (bitnr == b->bm_bits) { i = -1; @@ -1517,13 +1557,15 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) return count; } -/* set all bits covered by the AL-extent al_enr */ +/* Set all bits covered by the AL-extent al_enr. + * Returns number of bits changed. */ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) { struct drbd_bitmap *b = mdev->bitmap; unsigned long *p_addr, *bm; unsigned long weight; - int count, s, e, i, do_now; + unsigned long s, e; + int count, i, do_now; ERR_IF(!b) return 0; ERR_IF(!b->bm_pages) return 0; @@ -1552,7 +1594,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) if (e == b->bm_words) b->bm_set -= bm_clear_surplus(b); } else { - dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s); + dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s); } weight = b->bm_set - weight; spin_unlock_irq(&b->bm_lock); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 74cc50a21822..5a2d0ec72b34 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1003,9 +1003,9 @@ struct drbd_conf { struct hlist_head *tl_hash; unsigned int tl_hash_s; - /* blocks to sync in this run [unit BM_BLOCK_SIZE] */ + /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ unsigned long rs_total; - /* number of sync IOs that failed in this run */ + /* number of resync blocks that failed in this run */ unsigned long rs_failed; /* Syncer's start time [unit jiffies] */ unsigned long rs_start; @@ -1399,7 +1399,9 @@ struct bm_extent { * you should use 64bit OS for that much storage, anyways. */ #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) #else -#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32) +/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ +#define DRBD_MAX_SECTORS_FLEX (1UL << 51) +/* corresponds to (1UL << 38) bits right now. */ #endif #endif @@ -1419,11 +1421,15 @@ extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new extern void drbd_bm_cleanup(struct drbd_conf *mdev); extern void drbd_bm_set_all(struct drbd_conf *mdev); extern void drbd_bm_clear_all(struct drbd_conf *mdev); +/* set/clear/test only a few bits at a time */ extern int drbd_bm_set_bits( struct drbd_conf *mdev, unsigned long s, unsigned long e); extern int drbd_bm_clear_bits( struct drbd_conf *mdev, unsigned long s, unsigned long e); -/* bm_set_bits variant for use while holding drbd_bm_lock */ +extern int drbd_bm_count_bits( + struct drbd_conf *mdev, const unsigned long s, const unsigned long e); +/* bm_set_bits variant for use while holding drbd_bm_lock, + * may process the whole bitmap in one go */ extern void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); @@ -1436,6 +1442,8 @@ extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, extern size_t drbd_bm_words(struct drbd_conf *mdev); extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); + +#define DRBD_END_OF_BITMAP (~(unsigned long)0) extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); /* bm_find_next variants for use while you hold drbd_bm_lock() */ extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); @@ -1452,8 +1460,6 @@ extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); extern void drbd_bm_unlock(struct drbd_conf *mdev); - -extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); /* drbd_main.c */ extern struct kmem_cache *drbd_request_cache; @@ -2158,10 +2164,8 @@ extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, unsigned long *bits_left, unsigned int *per_mil_done) { - /* - * this is to break it at compile time when we change that - * (we may feel 4TB maximum storage per drbd is not enough) - */ + /* this is to break it at compile time when we change that, in case we + * want to support more than (1<<32) bits on a 32bit arch. */ typecheck(unsigned long, mdev->rs_total); /* note: both rs_total and rs_left are in bits, i.e. in @@ -2186,10 +2190,19 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, *bits_left, mdev->rs_total, mdev->rs_failed); *per_mil_done = 0; } else { - /* make sure the calculation happens in long context */ - unsigned long tmp = 1000UL - - (*bits_left >> 10)*1000UL - / ((mdev->rs_total >> 10) + 1UL); + /* Make sure the division happens in long context. + * We allow up to one petabyte storage right now, + * at a granularity of 4k per bit that is 2**38 bits. + * After shift right and multiplication by 1000, + * this should still fit easily into a 32bit long, + * so we don't need a 64bit division on 32bit arch. + * Note: currently we don't support such large bitmaps on 32bit + * arch anyways, but no harm done to be prepared for it here. + */ + unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10; + unsigned long left = *bits_left >> shift; + unsigned long total = 1UL + (mdev->rs_total >> shift); + unsigned long tmp = 1000UL - left * 1000UL/total; *per_mil_done = tmp; } } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 77dc022eaf6b..a46bc0287e21 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -527,17 +527,19 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, } } +/* input size is expected to be in KB */ char *ppsize(char *buf, unsigned long long size) { - /* Needs 9 bytes at max. */ + /* Needs 9 bytes at max including trailing NUL: + * -1ULL ==> "16384 EB" */ static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; int base = 0; - while (size >= 10000) { + while (size >= 10000 && base < sizeof(units)-1) { /* shift + round */ size = (size >> 10) + !!(size & (1<<9)); base++; } - sprintf(buf, "%lu %cB", (long)size, units[base]); + sprintf(buf, "%u %cB", (unsigned)size, units[base]); return buf; } diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index efba62cd2e58..2959cdfb77f5 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -91,9 +91,9 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) seq_printf(seq, "sync'ed:"); seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); - /* if more than 1 GB display in MB */ - if (mdev->rs_total > 0x100000L) - seq_printf(seq, "(%lu/%lu)M\n\t", + /* if more than a few GB, display in MB */ + if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) + seq_printf(seq, "(%lu/%lu)M", (unsigned long) Bit2KB(rs_left >> 10), (unsigned long) Bit2KB(mdev->rs_total >> 10)); else diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d17f2ed777ce..be46084c254e 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -577,7 +577,7 @@ next_sector: size = BM_BLOCK_SIZE; bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); - if (bit == -1UL) { + if (bit == DRBD_END_OF_BITMAP) { mdev->bm_resync_fo = drbd_bm_bits(mdev); mdev->resync_work.cb = w_resync_inactive; put_ldev(mdev); From 6850c4421481139dc2cf982358e79c833a50d73c Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 16 Dec 2010 00:32:38 +0100 Subject: [PATCH 066/122] drbd: get rid of unused debug code Long time ago, we had paranoia code in the bitmap that allocated one extra word, assigned a magic value, and checked on every occasion that the magic value was still unchanged. That debug code is unused, the extra long word complicates code a bit. Get rid of it. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 39 ++++++++------------------------ 1 file changed, 9 insertions(+), 30 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 0e31e573af72..09d208b858e7 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -488,10 +488,7 @@ static int bm_clear_surplus(struct drbd_bitmap *b) * on disk and in core memory alike */ mask = cpu_to_lel(mask); - /* because of the "extra long to catch oob access" we allocate in - * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page - * containing the last _relevant_ bitmap word */ - p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1)); + p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); bm = p_addr + (tmp/BITS_PER_LONG); if (mask) { /* If mask != 0, we are not exactly aligned, so bm now points @@ -527,10 +524,7 @@ static void bm_set_surplus(struct drbd_bitmap *b) * on disk and in core memory alike */ mask = cpu_to_lel(mask); - /* because of the "extra long to catch oob access" we allocate in - * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page - * containing the last _relevant_ bitmap word */ - p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1)); + p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); bm = p_addr + (tmp/BITS_PER_LONG); if (mask) { /* If mask != 0, we are not exactly aligned, so bm now points @@ -556,15 +550,10 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b) unsigned long *p_addr; unsigned long bits = 0; unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; - int idx, last_page, i, last_word; - - /* because of the "extra long to catch oob access" we allocate in - * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page - * containing the last _relevant_ bitmap word */ - last_page = bm_bit_to_page_idx(b, b->bm_bits-1); + int idx, i, last_word; /* all but last page */ - for (idx = 0; idx < last_page; idx++) { + for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { p_addr = __bm_map_pidx(b, idx, KM_USER0); for (i = 0; i < LWPP; i++) bits += hweight_long(p_addr[i]); @@ -627,7 +616,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) { struct drbd_bitmap *b = mdev->bitmap; - unsigned long bits, words, owords, obits, *p_addr, *bm; + unsigned long bits, words, owords, obits; unsigned long want, have, onpages; /* number of pages */ struct page **npages, **opages = NULL; int err = 0, growing; @@ -681,8 +670,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) } } - /* one extra long to catch off by one errors */ - want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; + want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; have = b->bm_number_of_pages; if (want == have) { D_ASSERT(b->bm_pages != NULL); @@ -728,11 +716,6 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) bm_free_pages(opages + want, have - want); } - p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, words)); - bm = p_addr + MLPP(words); - *bm = DRBD_MAGIC; - bm_unmap(p_addr); - (void)bm_clear_surplus(b); spin_unlock_irq(&b->bm_lock); @@ -845,7 +828,6 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, */ if (end == b->bm_words) b->bm_set -= bm_clear_surplus(b); - spin_unlock_irq(&b->bm_lock); } @@ -1030,7 +1012,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id struct bm_aio_ctx ctx = { .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0 }; struct drbd_bitmap *b = mdev->bitmap; - int last_page, i, count = 0; + int num_pages, i, count = 0; unsigned long now; char ppb[10]; int err = 0; @@ -1046,10 +1028,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id if (!ctx.flags) WARN_ON(!bm_is_locked(b)); - /* because of the "extra long to catch oob access" we allocate in - * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page - * containing the last _relevant_ bitmap word */ - last_page = bm_word_to_page_idx(b, b->bm_words - 1); + num_pages = b->bm_number_of_pages; now = jiffies; ctx.mdev = mdev; @@ -1058,7 +1037,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id ctx.error = 0; /* let the layers below us try to merge these bios... */ - for (i = 0; i <= last_page; i++) { + for (i = 0; i < num_pages; i++) { /* ignore completely unchanged pages */ if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) break; From 84e7c0f7d123d64d97e1f789ad2f23a72fe8981f Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 16 Dec 2010 00:37:57 +0100 Subject: [PATCH 067/122] drbd: Removed a reference to debug macros removed long time ago Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 09d208b858e7..423dede45844 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -596,9 +596,8 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) if (bm+do_now > p_addr + LWPP) { printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", p_addr, bm, (int)do_now); - break; /* breaks to after catch_oob_access_end() only! */ - } - memset(bm, c, do_now * sizeof(long)); + } else + memset(bm, c, do_now * sizeof(long)); bm_unmap(p_addr); bm_set_page_need_writeout(b->bm_pages[idx]); offset += do_now; From 02851e9f00d78dbc8ded0aacbf9bf3b631d627b3 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 16 Dec 2010 14:47:39 +0100 Subject: [PATCH 068/122] drbd: move bitmap write from resync_finished to after_state_change We must not call it directly from resync_finished, as we may be in either receiver or worker context there. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 14 ++++++++++---- drivers/block/drbd/drbd_worker.c | 3 --- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 57ed7181742d..c6c7e3e6dc23 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1548,6 +1548,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) drbd_send_state(mdev); + if (os.conn > C_CONNECTED && ns.conn == C_CONNECTED) + drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); + /* free tl_hash if we Got thawed and are C_STANDALONE */ if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) drbd_free_tl_hash(mdev); @@ -3860,13 +3863,16 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev) static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) { struct bm_io_work *work = container_of(w, struct bm_io_work, w); - int rv; + int rv = -EIO; D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); - drbd_bm_lock(mdev, work->why); - rv = work->io_fn(mdev); - drbd_bm_unlock(mdev); + if (get_ldev(mdev)) { + drbd_bm_lock(mdev, work->why); + rv = work->io_fn(mdev); + drbd_bm_unlock(mdev); + put_ldev(mdev); + } clear_bit(BITMAP_IO, &mdev->flags); smp_mb__after_clear_bit(); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index be46084c254e..2374454cdf17 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -907,9 +907,6 @@ out: drbd_md_sync(mdev); - dev_info(DEV, "Writing changed bitmap pages\n"); - drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); - if (khelper_cmd) drbd_khelper(mdev, khelper_cmd); From 2265b473aecc1a6fe1f84a0ee272ba39806c2a8a Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 16 Dec 2010 15:41:26 +0100 Subject: [PATCH 069/122] drbd: fix potential dereference of NULL pointer If drbd used to have crypto digest algorithms configured, then is being unconfigured (but not unloaded), it frees the algorithms, but does not reset the config. If it then is reconfigured to use the very same algorithm, it "forgot" to re-allocate the algorithms, thinking that the config has not changed in that aspect. It will then Oops on the first attempt to actually use those algorithms. Fix this by resetting the config to defaults after cleanup. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index c6c7e3e6dc23..4da6f11cc82e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3020,6 +3020,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) D_ASSERT(list_empty(&mdev->resync_work.list)); D_ASSERT(list_empty(&mdev->unplug_work.list)); D_ASSERT(list_empty(&mdev->go_diskless.list)); + + drbd_set_defaults(mdev); } From c88d65e2231dbae4b7cd0ad7b2a919857a1be171 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 20 Dec 2010 15:29:28 +0100 Subject: [PATCH 070/122] drbd: Documenting drbd_should_do_remote() and drbd_should_send_oos() Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_req.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 528909090df7..889175110c91 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -762,7 +762,7 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); } -static int drbd_should_do_remote(struct drbd_conf *mdev) +static bool drbd_should_do_remote(struct drbd_conf *mdev) { union drbd_state s = mdev->state; @@ -770,13 +770,17 @@ static int drbd_should_do_remote(struct drbd_conf *mdev) (s.pdsk >= D_INCONSISTENT && s.conn >= C_WF_BITMAP_T && s.conn < C_AHEAD); + /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. + That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* + states. */ } -static int drbd_should_send_oos(struct drbd_conf *mdev) +static bool drbd_should_send_oos(struct drbd_conf *mdev) { union drbd_state s = mdev->state; - return s.pdsk >= D_INCONSISTENT && - (s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S); + return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; + /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary + since we enter state C_AHEAD only if proto >= 96 */ } static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) From f735e3635430c6d1c319664d82b34376e3f9aa17 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 17 Dec 2010 21:06:18 +0100 Subject: [PATCH 071/122] drbd: add debugging assert to make sure the protocol is clean We expect to only receive the recently introduced "set out of sync" packets in specific states. If we receive them in different states, that may confuse the resync process to the point where it won't terminate, or think it made negative progress. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index be7fc67eeeca..f0a0f66fbe68 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3599,6 +3599,16 @@ static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, un { struct p_block_desc *p = &mdev->data.rbuf.block_desc; + switch (mdev->state.conn) { + case C_WF_SYNC_UUID: + case C_WF_BITMAP_T: + case C_BEHIND: + break; + default: + dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", + drbd_conn_str(mdev->state.conn)); + } + drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); return true; From 5a22db8968a69bec835d1ed9a96ab3381719e0c0 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 17 Dec 2010 21:14:23 +0100 Subject: [PATCH 072/122] drbd: serialize sending of resync uuid with pending w_send_oos To improve the latency of IO requests during bitmap exchange, we recently allowed writes while waiting for the bitmap, sending "set out-of-sync" information packets for any newly dirtied bits. We have to make sure that the new resync-uuid does not overtake these "set oos" packets. Once the resync-uuid is received, the sync target starts the resync process, and expects the bitmap to only be cleared, not re-set. If we use this protocol extension, we queue the generation and sending of the resync-uuid on the worker, which naturally serializes with all previously queued packets. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_main.c | 22 +++++++++++++++++++--- drivers/block/drbd/drbd_worker.c | 27 ++++++++++++++------------- 3 files changed, 34 insertions(+), 17 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 5a2d0ec72b34..ec06e744be42 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1204,7 +1204,7 @@ extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, extern int drbd_send_protocol(struct drbd_conf *mdev); extern int drbd_send_uuids(struct drbd_conf *mdev); extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); -extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); +extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); extern int _drbd_send_state(struct drbd_conf *mdev); extern int drbd_send_state(struct drbd_conf *mdev); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 4da6f11cc82e..2190064d59bd 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1387,6 +1387,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, spin_unlock_irq(&mdev->req_lock); } + /* Became sync source. With protocol >= 96, we still need to send out + * the sync uuid now. Need to do that before any drbd_send_state, or + * the other side may go "paused sync" before receiving the sync uuids, + * which is unexpected. */ + if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && + (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && + mdev->agreed_pro_version >= 96 && get_ldev(mdev)) { + drbd_gen_and_send_sync_uuid(mdev); + put_ldev(mdev); + } + /* Do not change the order of the if above and the two below... */ if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ drbd_send_uuids(mdev); @@ -1980,12 +1991,17 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) return _drbd_send_uuids(mdev, 8); } - -int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val) +int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) { struct p_rs_uuid p; + u64 uuid; - p.uuid = cpu_to_be64(val); + D_ASSERT(mdev->state.disk == D_UP_TO_DATE); + + get_random_bytes(&uuid, sizeof(u64)); + drbd_uuid_set(mdev, UI_BITMAP, uuid); + drbd_md_sync(mdev); + p.uuid = cpu_to_be64(uuid); return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, (struct p_header80 *)&p, sizeof(p)); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 2374454cdf17..3df37e65c118 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1516,18 +1516,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) return; } - if (side == C_SYNC_TARGET) { - mdev->bm_resync_fo = 0; - } else /* side == C_SYNC_SOURCE */ { - u64 uuid; - - get_random_bytes(&uuid, sizeof(u64)); - drbd_uuid_set(mdev, UI_BITMAP, uuid); - drbd_send_sync_uuid(mdev, uuid); - - D_ASSERT(mdev->state.disk == D_UP_TO_DATE); - } - write_lock_irq(&global_state_lock); ns = mdev->state; @@ -1565,7 +1553,19 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) _drbd_pause_after(mdev); } write_unlock_irq(&global_state_lock); - put_ldev(mdev); + + if (side == C_SYNC_TARGET) + mdev->bm_resync_fo = 0; + + /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid + * with w_send_oos, or the sync target will get confused as to + * how much bits to resync. We cannot do that always, because for an + * empty resync and protocol < 95, we need to do it here, as we call + * drbd_resync_finished from here in that case. + * We drbd_gen_and_send_sync_uuid here for protocol < 96, + * and from after_state_ch otherwise. */ + if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96) + drbd_gen_and_send_sync_uuid(mdev); if (r == SS_SUCCESS) { dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", @@ -1601,6 +1601,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) drbd_md_sync(mdev); } + put_ldev(mdev); drbd_state_unlock(mdev); } From 7648cdfe52daf0ca4fa9489879dea9e089b0dfe1 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 17 Dec 2010 23:58:41 +0100 Subject: [PATCH 073/122] drbd: be less noisy with some log messages We expect changes to a bitmap page in drbd_bm_write_page, that's why we submit a copy page. If a page changes during global writeout, that would be unexpected, and reason to warn, though. Also, often page writeout can be skipped (on activity log transactions during normal operation, for example), no need to log that everytime. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 423dede45844..314a3632303b 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -920,8 +920,9 @@ static void bm_async_io_complete(struct bio *bio, int error) if (!error && !uptodate) error = -EIO; - if (!bm_test_page_unchanged(b->bm_pages[idx])) - dev_info(DEV, "bitmap page idx %u changed during IO!\n", idx); + if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && + !bm_test_page_unchanged(b->bm_pages[idx])) + dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); if (error) { /* ctx error will hold the completed-last non-zero error code, @@ -1135,7 +1136,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc struct bm_aio_ctx ctx = { .flags = BM_AIO_COPY_PAGES, }; if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { - dev_info(DEV, "skipped bm page write for idx %u\n", idx); + dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); return 0; } From 418e0a927d520f9c8e875ea75abee35d93a0f1b3 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Sat, 18 Dec 2010 13:36:54 +0100 Subject: [PATCH 074/122] drbd: spelling fix in log message Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2190064d59bd..f529c25e1ad5 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1351,7 +1351,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (ns.conn == C_CONNECTED) what = resend, nsm.susp_nod = 0; else /* ns.conn > C_CONNECTED */ - dev_err(DEV, "Unexpected Resynd going on!\n"); + dev_err(DEV, "Unexpected Resync going on!\n"); } if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) From 06d33e968d2c58143a7aaafa8963cf6a58099467 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Sat, 18 Dec 2010 17:00:59 +0100 Subject: [PATCH 075/122] drbd: improve on bitmap write out timing Even though we now track the need for bitmap writeout per bitmap page, there is no need to trigger the writeout while a resync is going on. Once the resync is finished (or aborted), we trigger bitmap writeout anyways. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f529c25e1ad5..755297a15633 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1439,7 +1439,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, put_ldev(mdev); } - if (os.role == R_PRIMARY && ns.role == R_SECONDARY && get_ldev(mdev)) { + /* Write out all changed bits on demote. + * Though, no need to da that just yet + * if there is a resync going on still */ + if (os.role == R_PRIMARY && ns.role == R_SECONDARY && + mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) { drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote"); put_ldev(mdev); } @@ -1559,7 +1563,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) drbd_send_state(mdev); - if (os.conn > C_CONNECTED && ns.conn == C_CONNECTED) + if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); /* free tl_hash if we Got thawed and are C_STANDALONE */ From 725a97e43ee945cc813fffd9e628e50d703b973b Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Sun, 19 Dec 2010 11:29:55 +0100 Subject: [PATCH 076/122] drbd: fix potential access of on-stack wait_queue_head_t after return I run into something declaring itself as "spinlock deadlock", BUG: spinlock lockup on CPU#1, kjournald/27816, ffff88000ad6bca0 Pid: 27816, comm: kjournald Tainted: G W 2.6.34.6 #2 Call Trace: [] do_raw_spin_lock+0x11e/0x14d [] _raw_spin_lock_irqsave+0x6a/0x81 [] ? __wake_up+0x22/0x50 [] __wake_up+0x22/0x50 [] bm_async_io_complete+0x258/0x299 [drbd] but the call traces do not fit at all, all other cpus are cpu_idle. I think it may be this race: drbd_bm_write_page wait_queue_head_t io_wait; atomic_t in_flight; bm_async_io submit_bio bm_async_io_complete if (atomic_dec_and_test(in_flight)) wait_event(io_wait, atomic_read(in_flight) == 0) return wake_up(io_wait) The wake_up now accesses the wait_queue_head_t spinlock, which is no longer valid, since the stack frame of drbd_bm_write_page has been clobbered now. Fix this by using struct completion, which does both the condition test as well as the wake_up inside its spinlock, so this race cannot happen. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 38 ++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 314a3632303b..25428bc28476 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -897,7 +897,7 @@ void drbd_bm_clear_all(struct drbd_conf *mdev) struct bm_aio_ctx { struct drbd_conf *mdev; atomic_t in_flight; - wait_queue_head_t io_wait; + struct completion done; unsigned flags; #define BM_AIO_COPY_PAGES 1 int error; @@ -948,7 +948,7 @@ static void bm_async_io_complete(struct bio *bio, int error) bio_put(bio); if (atomic_dec_and_test(&ctx->in_flight)) - wake_up(&ctx->io_wait); + complete(&ctx->done); } static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) @@ -1009,8 +1009,12 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must */ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local) { - struct bm_aio_ctx ctx = - { .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0 }; + struct bm_aio_ctx ctx = { + .mdev = mdev, + .in_flight = ATOMIC_INIT(1), + .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), + .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0, + }; struct drbd_bitmap *b = mdev->bitmap; int num_pages, i, count = 0; unsigned long now; @@ -1031,10 +1035,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id num_pages = b->bm_number_of_pages; now = jiffies; - ctx.mdev = mdev; - atomic_set(&ctx.in_flight, 1); /* one extra ref */ - init_waitqueue_head(&ctx.io_wait); - ctx.error = 0; /* let the layers below us try to merge these bios... */ for (i = 0; i < num_pages; i++) { @@ -1060,8 +1060,13 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id cond_resched(); } - atomic_dec(&ctx.in_flight); /* drop the extra ref */ - wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0); + /* + * We initialize ctx.in_flight to one to make sure bm_async_io_complete + * will not complete() early, and decrement / test it here. If there + * are still some bios in flight, we need to wait for them here. + */ + if (!atomic_dec_and_test(&ctx.in_flight)) + wait_for_completion(&ctx.done); dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", rw == WRITE ? "WRITE" : "READ", count, jiffies - now); @@ -1133,19 +1138,20 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l */ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) { - struct bm_aio_ctx ctx = { .flags = BM_AIO_COPY_PAGES, }; + struct bm_aio_ctx ctx = { + .mdev = mdev, + .in_flight = ATOMIC_INIT(1), + .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), + .flags = BM_AIO_COPY_PAGES, + }; if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); return 0; } - ctx.mdev = mdev; - atomic_set(&ctx.in_flight, 1); - init_waitqueue_head(&ctx.io_wait); - bm_page_io_async(&ctx, idx, WRITE_SYNC); - wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0); + wait_for_completion(&ctx.done); if (ctx.error) drbd_chk_io_error(mdev, 1, true); From 3f98688afc2ce0138fc88e272bdd128e1e0b0976 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 20 Dec 2010 14:48:20 +0100 Subject: [PATCH 077/122] drbd: There might be a resync after unfreezing IO due to no disk [Bugz 332] When on-no-data-accessible is set to suspend-io, also consider that a Primary, SyncTarget node losses its connection. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 755297a15633..9bd53cf2cdb2 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1347,16 +1347,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, nsm.i = -1; if (ns.susp_nod) { - if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { - if (ns.conn == C_CONNECTED) - what = resend, nsm.susp_nod = 0; - else /* ns.conn > C_CONNECTED */ - dev_err(DEV, "Unexpected Resync going on!\n"); - } + if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) + what = resend; if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) - what = restart_frozen_disk_io, nsm.susp_nod = 0; + what = restart_frozen_disk_io; + if (what != nothing) + nsm.susp_nod = 0; } if (ns.susp_fen) { From 071942727824bab03b1a3f6b6eeb5b269697b333 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Mon, 20 Dec 2010 15:38:07 +0100 Subject: [PATCH 078/122] drbd: ratelimit io error messages Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 3df37e65c118..7bfeb79e7105 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -165,14 +165,15 @@ void drbd_endio_sec(struct bio *bio, int error) int uptodate = bio_flagged(bio, BIO_UPTODATE); int is_write = bio_data_dir(bio) == WRITE; - if (error) + if (error && __ratelimit(&drbd_ratelimit_state)) dev_warn(DEV, "%s: error=%d s=%llus\n", is_write ? "write" : "read", error, (unsigned long long)e->sector); if (!error && !uptodate) { - dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", - is_write ? "write" : "read", - (unsigned long long)e->sector); + if (__ratelimit(&drbd_ratelimit_state)) + dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", + is_write ? "write" : "read", + (unsigned long long)e->sector); /* strange behavior of some lower level drivers... * fail the request by clearing the uptodate flag, * but do not return any error?! */ From 617049aa7d753e8c821ac77126ab90e9f1b66d6d Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 22 Dec 2010 12:48:31 +0100 Subject: [PATCH 079/122] drbd: Fixed an issue with AHEAD -> SYNC_SOURCE transitions Create a new barrier when leaving the AHEAD mode. Otherwise we trigger the assertion in req_mod(, barrier_acked) D_ASSERT(req->rq_state & RQ_NET_SENT); The new barrier is created by recycling the newest existing one. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9bd53cf2cdb2..90050ab7adf3 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -335,6 +335,24 @@ bail: drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); } + +/* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach + * those requests from the newsest barrier when changing to an other cstate. + * + * That headless list vanishes when the last request finished its write or + * send out_of_sync packet. */ +static void tl_forget(struct drbd_conf *mdev) +{ + struct drbd_tl_epoch *b; + + if (test_bit(CREATE_BARRIER, &mdev->flags)) + return; + + b = mdev->newest_tle; + list_del(&b->requests); + _tl_add_barrier(mdev, b); +} + /** * _tl_restart() - Walks the transfer log, and applies an action to all requests * @mdev: DRBD device. @@ -1242,6 +1260,9 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) drbd_resume_al(mdev); + if (os.conn == C_AHEAD && ns.conn != C_AHEAD) + tl_forget(mdev); + ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC); if (ascw) { ascw->os = os; From d612d309e4c8401ad94c531678b59c4a8b7c41ce Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 27 Dec 2010 10:53:28 +0100 Subject: [PATCH 080/122] drbd: No longer answer P_RS_DATA_REQUEST packets when in C_AHEAD mode When the sync source node replies to a P_RS_DATA_REQUEST packet when it is already in ahead mode. I.e. those two packets crossed each other on the wire, that may lead to diverging bitmaps. This never happens in a well-tuned-system. In a well-tuned- system the resync controller has reduced the resync speed to zero long before we got into ahead-mode. But we have to be prepared for the not-well-tuned-system of course as well. Because -> diverging bitmaps = non terminating resync. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 3 ++- drivers/block/drbd/drbd_receiver.c | 12 +++++++++++- drivers/block/drbd/drbd_worker.c | 4 +++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index ec06e744be42..a529285b0cd3 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -205,8 +205,9 @@ enum drbd_packets { /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */ + P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */ - P_MAX_CMD = 0x28, + P_MAX_CMD = 0x2A, P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ P_MAX_OPT_CMD = 0x101, diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index f0a0f66fbe68..bf865bd83414 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -4361,7 +4361,16 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) if (get_ldev_if_state(mdev, D_FAILED)) { drbd_rs_complete_io(mdev, sector); - drbd_rs_failed_io(mdev, sector, size); + switch (be16_to_cpu(h->command)) { + case P_NEG_RS_DREPLY: + drbd_rs_failed_io(mdev, sector, size); + case P_RS_CANCEL: + break; + default: + D_ASSERT(0); + put_ldev(mdev); + return false; + } put_ldev(mdev); } @@ -4459,6 +4468,7 @@ static struct asender_cmd *get_asender_cmd(int cmd) [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, + [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, [P_MAX_CMD] = { 0, NULL }, }; if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 7bfeb79e7105..1d7510ebaa43 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -988,7 +988,9 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) put_ldev(mdev); } - if (likely((e->flags & EE_WAS_ERROR) == 0)) { + if (mdev->state.conn == C_AHEAD) { + ok = drbd_send_ack(mdev, P_RS_CANCEL, e); + } else if (likely((e->flags & EE_WAS_ERROR) == 0)) { if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { inc_rs_pending(mdev); ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); From da0a78161d2b2da4819a1f05a38bb1dcbe02d951 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Thu, 23 Dec 2010 14:24:33 +0100 Subject: [PATCH 081/122] drbd: Be more careful with SyncSource -> Ahead transitions We may not get from SyncSource to Ahead if we have sent some P_RS_DATA_REPLY packets to the peer and are waiting for P_WRITE_ACK. Again, this is not relevant for proper tuned systems, but makes sure that the not-tuned system does not get diverging bitmaps. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 3 ++- drivers/block/drbd/drbd_req.c | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index bf865bd83414..fd0957f9c230 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -4385,10 +4385,11 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) if (mdev->state.conn == C_AHEAD && atomic_read(&mdev->ap_in_flight) == 0 && + atomic_read(&mdev->rs_pending_cnt) == 0 && list_empty(&mdev->start_resync_work.list)) { struct drbd_work *w = &mdev->start_resync_work; w->cb = w_start_resync; - drbd_queue_work_front(&mdev->data.work, w); + drbd_queue_work(&mdev->data.work, w); } return true; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 889175110c91..a3f6b04ebaba 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1002,7 +1002,13 @@ allocate_barrier: congested = 1; } - if (congested) { + if (congested && atomic_read(&mdev->rs_pending_cnt) == 0) { + /* rs_pending_cnt must be zero, otherwise the two peers + might get different bitmaps. With sane configurations + the resync stalls long before we might want to go into + AHEAD mode. + We could force the resync into PAUSE mode here if + rs_pending_cnt is > 0 ... */ queue_barrier(mdev); if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) From 794abb753e29e85949b3719dbc2ab6a98711a47e Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 27 Dec 2010 11:51:23 +0100 Subject: [PATCH 082/122] drbd: Cleaned up the resync timer logic Besides removed a few lines of code, this moves the inspection of the state from before the queuing process to after the queuing. I.e. more closely to the actual invocation of the work. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_main.c | 2 +- drivers/block/drbd/drbd_worker.c | 56 +++++++++----------------------- 3 files changed, 17 insertions(+), 43 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index a529285b0cd3..dc11b7070ba9 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1540,7 +1540,7 @@ extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int); extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); -extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); +extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int); extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 90050ab7adf3..4074d6699307 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2978,7 +2978,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) INIT_LIST_HEAD(&mdev->start_resync_work.list); INIT_LIST_HEAD(&mdev->bm_io_work.w.list); - mdev->resync_work.cb = w_resync_inactive; + mdev->resync_work.cb = w_resync_timer; mdev->unplug_work.cb = w_send_write_hint; mdev->go_diskless.cb = w_go_diskless; mdev->md_sync_work.cb = w_md_sync; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 1d7510ebaa43..147f76b26810 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -253,13 +253,6 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) return w_send_read_req(mdev, w, 0); } -int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel) -{ - ERR_IF(cancel) return 1; - dev_err(DEV, "resync inactive, but callback triggered??\n"); - return 1; /* Simply ignore this! */ -} - void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest) { struct hash_desc desc; @@ -389,26 +382,25 @@ defer: return -EAGAIN; } +int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel) +{ + switch (mdev->state.conn) { + case C_VERIFY_S: + w_make_ov_request(mdev, w, cancel); + break; + case C_SYNC_TARGET: + w_make_resync_request(mdev, w, cancel); + break; + } + + return 1; +} + void resync_timer_fn(unsigned long data) { struct drbd_conf *mdev = (struct drbd_conf *) data; - int queue; - queue = 1; - switch (mdev->state.conn) { - case C_VERIFY_S: - mdev->resync_work.cb = w_make_ov_request; - break; - case C_SYNC_TARGET: - mdev->resync_work.cb = w_make_resync_request; - break; - default: - queue = 0; - mdev->resync_work.cb = w_resync_inactive; - } - - /* harmless race: list_empty outside data.work.q_lock */ - if (list_empty(&mdev->resync_work.list) && queue) + if (list_empty(&mdev->resync_work.list)) drbd_queue_work(&mdev->data.work, &mdev->resync_work); } @@ -525,15 +517,6 @@ static int w_make_resync_request(struct drbd_conf *mdev, if (unlikely(cancel)) return 1; - if (unlikely(mdev->state.conn < C_CONNECTED)) { - dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected"); - return 0; - } - - if (mdev->state.conn != C_SYNC_TARGET) - dev_err(DEV, "%s in w_make_resync_request\n", - drbd_conn_str(mdev->state.conn)); - if (mdev->rs_total == 0) { /* empty resync? */ drbd_resync_finished(mdev); @@ -546,7 +529,6 @@ static int w_make_resync_request(struct drbd_conf *mdev, to continue resync with a broken disk makes no sense at all */ dev_err(DEV, "Disk broke down during resync!\n"); - mdev->resync_work.cb = w_resync_inactive; return 1; } @@ -580,7 +562,6 @@ next_sector: if (bit == DRBD_END_OF_BITMAP) { mdev->bm_resync_fo = drbd_bm_bits(mdev); - mdev->resync_work.cb = w_resync_inactive; put_ldev(mdev); return 1; } @@ -676,7 +657,6 @@ next_sector: * resync data block, and the last bit is cleared. * until then resync "work" is "inactive" ... */ - mdev->resync_work.cb = w_resync_inactive; put_ldev(mdev); return 1; } @@ -697,17 +677,11 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca if (unlikely(cancel)) return 1; - if (unlikely(mdev->state.conn < C_CONNECTED)) { - dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected"); - return 0; - } - number = drbd_rs_number_requests(mdev); sector = mdev->ov_position; for (i = 0; i < number; i++) { if (sector >= capacity) { - mdev->resync_work.cb = w_resync_inactive; return 1; } From 110a204a354a5a69f99ed0bc8e6d779e6a94d410 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Mon, 3 Jan 2011 15:47:08 +0100 Subject: [PATCH 083/122] drbd: Remove useless / wrong comments Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index dc11b7070ba9..84b4575fdf43 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -680,13 +680,6 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) return thi->t_state; } - -/* - * Having this as the first member of a struct provides sort of "inheritance". - * "derived" structs can be "drbd_queue_work()"ed. - * The callback should know and cast back to the descendant struct. - * drbd_request and drbd_epoch_entry are descendants of drbd_work. - */ struct drbd_work; typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); struct drbd_work { @@ -715,9 +708,6 @@ struct drbd_request { * starting a new epoch... */ - /* up to here, the struct layout is identical to drbd_epoch_entry; - * we might be able to use that to our advantage... */ - struct list_head tl_requests; /* ring list in the transfer log */ struct bio *master_bio; /* master bio pointer */ unsigned long rq_state; /* see comments above _req_mod() */ From 2b8a90b55533c66258a1ff0fb27b8cffa95665c4 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 10 Jan 2011 11:15:17 +0100 Subject: [PATCH 084/122] drbd: Corrected off-by-one error in DRBD_MINOR_COUNT_MAX Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 7 ++++--- include/linux/drbd_limits.h | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 4074d6699307..da98bff7c333 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -85,7 +85,8 @@ MODULE_AUTHOR("Philipp Reisner , " MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); MODULE_VERSION(REL_VERSION); MODULE_LICENSE("GPL"); -MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)"); +MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (" + __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); #include @@ -115,7 +116,7 @@ module_param(fault_devs, int, 0644); #endif /* module parameter, defined */ -unsigned int minor_count = 32; +unsigned int minor_count = DRBD_MINOR_COUNT_DEF; int disable_sendpage; int allow_oos; unsigned int cn_idx = CN_IDX_DRBD; @@ -3456,7 +3457,7 @@ int __init drbd_init(void) return -EINVAL; } - if (1 > minor_count || minor_count > 255) { + if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { printk(KERN_ERR "drbd: invalid minor_count (%d)\n", minor_count); #ifdef MODULE diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index abf418724e52..bb264a5732de 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -16,7 +16,8 @@ #define DEBUG_RANGE_CHECK 0 #define DRBD_MINOR_COUNT_MIN 1 -#define DRBD_MINOR_COUNT_MAX 255 +#define DRBD_MINOR_COUNT_MAX 256 +#define DRBD_MINOR_COUNT_DEF 32 #define DRBD_DIALOG_REFRESH_MIN 0 #define DRBD_DIALOG_REFRESH_MAX 600 From 4a23f2649698272abcd9e0c9a992d65739f32792 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 11 Jan 2011 17:42:17 +0100 Subject: [PATCH 085/122] drbd: Do not full sync if a P_SYNC_UUID packet gets lost See also commit from 2009-08-15 "drbd_uuid_compare(): Do not full sync in case a P_SYNC_UUID packet gets lost." We saw cases where the History UUIDs where not as expected. So the detection of the special case did not trigger. With the sync UUID no longer being a random number, but deducible from the previous bitmap UUID, the detection of this special case becomes more reliable. The SyncUUID now is the previous bitmap UUID + 0x1000000000000. Rule 5a: Cs = H1p & H1p + Offset = Bp Connection was lost before SyncUUID Packet came through. Corrent (peer) UUIDs: Bp = H1p H1p = H2p H2p = 0 Become Sync target. Rule 7a: Cp = H1s & H1s + Offset = Bs Connection was lost before SyncUUID Packet came through. Correct (own) UUIDs: Bs = H1s H1s = H2s H2s = 0 Become Sync source. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 1 + drivers/block/drbd/drbd_main.c | 2 +- drivers/block/drbd/drbd_receiver.c | 35 ++++++++++++++++++------------ 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 84b4575fdf43..3c8eecd9666d 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -97,6 +97,7 @@ extern char usermode_helper[]; #define ID_SYNCER (-1ULL) #define ID_VACANT 0 #define is_syncer_block_id(id) ((id) == ID_SYNCER) +#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) struct drbd_conf; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index da98bff7c333..b3b6d3190f65 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2022,7 +2022,7 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) D_ASSERT(mdev->state.disk == D_UP_TO_DATE); - get_random_bytes(&uuid, sizeof(u64)); + uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET; drbd_uuid_set(mdev, UI_BITMAP, uuid); drbd_md_sync(mdev); p.uuid = cpu_to_be64(uuid); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index fd0957f9c230..7991183749e3 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2293,6 +2293,8 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, -2 C_SYNC_TARGET set BitMap -100 after split brain, disconnect -1000 unrelated data +-1091 requires proto 91 +-1096 requires proto 96 */ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) { @@ -2322,7 +2324,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { if (mdev->agreed_pro_version < 91) - return -1001; + return -1091; if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { @@ -2343,7 +2345,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { if (mdev->agreed_pro_version < 91) - return -1001; + return -1091; if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { @@ -2388,17 +2390,22 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l *rule_nr = 51; peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); if (self == peer) { - self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); - peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1); - if (self == peer) { + if (mdev->agreed_pro_version < 96 ? + (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == + (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : + peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { /* The last P_SYNC_UUID did not get though. Undo the last start of resync as sync source modifications of the peer's UUIDs. */ if (mdev->agreed_pro_version < 91) - return -1001; + return -1091; mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; + + dev_info(DEV, "Did not got last syncUUID packet, corrected:\n"); + drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); + return -1; } } @@ -2420,20 +2427,20 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l *rule_nr = 71; self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); if (self == peer) { - self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1); - peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); - if (self == peer) { + if (mdev->agreed_pro_version < 96 ? + (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == + (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : + self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { /* The last P_SYNC_UUID did not get though. Undo the last start of resync as sync source modifications of our UUIDs. */ if (mdev->agreed_pro_version < 91) - return -1001; + return -1091; _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); - dev_info(DEV, "Undid last start of resync:\n"); - + dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); @@ -2496,8 +2503,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol dev_alert(DEV, "Unrelated data, aborting!\n"); return C_MASK; } - if (hg == -1001) { - dev_alert(DEV, "To resolve this both sides have to support at least protocol 91\n"); + if (hg < -1000) { + dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); return C_MASK; } From 71c78cfba232de8f61a4b1bbb6e876424d133407 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Fri, 14 Jan 2011 19:20:34 +0100 Subject: [PATCH 086/122] drbd: Nothing should stop SyncSource -> Ahead transitions Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_req.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index a3f6b04ebaba..6f9d1bfcca58 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1002,7 +1002,7 @@ allocate_barrier: congested = 1; } - if (congested && atomic_read(&mdev->rs_pending_cnt) == 0) { + if (congested) { /* rs_pending_cnt must be zero, otherwise the two peers might get different bitmaps. With sane configurations the resync stalls long before we might want to go into From 370a43e7982dd497822097e0ae6022947ac2e7d4 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Fri, 14 Jan 2011 16:03:11 +0100 Subject: [PATCH 087/122] drbd: Work on the Ahead -> SyncSource transition The test if rs_pending_cnt == 0 was too weak. Using Test for unacked_cnt == 0 instead. Moved that into the worker. Since unacked_cnt gets already increased when an P_RS_DATA_REQ comes in. Also using a timer to make Ahead -> SyncSource -> Ahead cycles slower... Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 3 +++ drivers/block/drbd/drbd_main.c | 4 ++++ drivers/block/drbd/drbd_receiver.c | 8 +++----- drivers/block/drbd/drbd_worker.c | 16 +++++++++++++++- 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 3c8eecd9666d..cfe7fff459e3 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -850,6 +850,7 @@ enum { GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ NEW_CUR_UUID, /* Create new current UUID when thawing IO */ AL_SUSPENDED, /* Activity logging is currently suspended. */ + AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ }; struct drbd_bitmap; /* opaque for drbd_conf */ @@ -961,6 +962,7 @@ struct drbd_conf { start_resync_work; struct timer_list resync_timer; struct timer_list md_sync_timer; + struct timer_list start_resync_timer; #ifdef DRBD_DEBUG_MD_SYNC struct { unsigned int line; @@ -1544,6 +1546,7 @@ extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int); extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int); extern void resync_timer_fn(unsigned long data); +extern void start_resync_timer_fn(unsigned long data); /* drbd_receiver.c */ extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index b3b6d3190f65..8ec7c65988be 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2984,12 +2984,16 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) mdev->go_diskless.cb = w_go_diskless; mdev->md_sync_work.cb = w_md_sync; mdev->bm_io_work.w.cb = w_bitmap_io; + mdev->start_resync_work.cb = w_start_resync; init_timer(&mdev->resync_timer); init_timer(&mdev->md_sync_timer); + init_timer(&mdev->start_resync_timer); mdev->resync_timer.function = resync_timer_fn; mdev->resync_timer.data = (unsigned long) mdev; mdev->md_sync_timer.function = md_sync_timer_fn; mdev->md_sync_timer.data = (unsigned long) mdev; + mdev->start_resync_timer.function = start_resync_timer_fn; + mdev->start_resync_timer.data = (unsigned long) mdev; init_waitqueue_head(&mdev->misc_wait); init_waitqueue_head(&mdev->state_wait); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 7991183749e3..18cb8b609fe4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -4392,11 +4392,9 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) if (mdev->state.conn == C_AHEAD && atomic_read(&mdev->ap_in_flight) == 0 && - atomic_read(&mdev->rs_pending_cnt) == 0 && - list_empty(&mdev->start_resync_work.list)) { - struct drbd_work *w = &mdev->start_resync_work; - w->cb = w_start_resync; - drbd_queue_work(&mdev->data.work, w); + !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) { + mdev->start_resync_timer.expires = jiffies + HZ; + add_timer(&mdev->start_resync_timer); } return true; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 147f76b26810..3a95b701b5d0 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -712,10 +712,24 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca } +void start_resync_timer_fn(unsigned long data) +{ + struct drbd_conf *mdev = (struct drbd_conf *) data; + + drbd_queue_work(&mdev->data.work, &mdev->start_resync_work); +} + int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { - drbd_start_resync(mdev, C_SYNC_SOURCE); + if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { + dev_warn(DEV, "w_start_resync later...\n"); + mdev->start_resync_timer.expires = jiffies + HZ/10; + add_timer(&mdev->start_resync_timer); + return 1; + } + drbd_start_resync(mdev, C_SYNC_SOURCE); + clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags); return 1; } From 148efa165e9464927887b03c83a52c33b80b4431 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Sat, 15 Jan 2011 00:21:15 +0100 Subject: [PATCH 088/122] drbd: Do not drop net config if sending in drbd_send_protocol() fails Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 2 +- drivers/block/drbd/drbd_receiver.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8ec7c65988be..1061b9fff2b0 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1967,7 +1967,7 @@ int drbd_send_protocol(struct drbd_conf *mdev) else { dev_err(DEV, "--dry-run is not supported by peer"); kfree(p); - return 0; + return -1; } } p->conn_flags = cpu_to_be32(cf); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 18cb8b609fe4..fb8e86153fd0 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -906,7 +906,7 @@ retry: put_ldev(mdev); } - if (!drbd_send_protocol(mdev)) + if (drbd_send_protocol(mdev) == -1) return -1; drbd_send_sync_param(mdev, &mdev->sync_conf); drbd_send_sizes(mdev, 0, 0); From 94f2b05f03fbc605f83ae501682c85ff4535bb6d Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 17 Jan 2011 15:14:26 +0100 Subject: [PATCH 089/122] drbd: Killed an assert that is no longer valid The point is that drbd_disconnect() can be called with a cstate of WFConnection. That happens if the user issues "drbdsetup disconnect" while the drbd_connect() function executes. Then drbdd_init() will call drbdd(), which in turn will return without receiving any packets. Then drbdd_init() will end up calling drbd_disconnect() with a cstate of WFConnection. Bottom line: This assertion is wrong as it is, and we do not see value in fixing it. => Removing it. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index fb8e86153fd0..2207d2886f84 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3763,9 +3763,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) if (mdev->state.conn == C_STANDALONE) return; - if (mdev->state.conn >= C_WF_CONNECTION) - dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n", - drbd_conn_str(mdev->state.conn)); /* asender does not clean up anything. it must not interfere, either */ drbd_thread_stop(&mdev->asender); From 2deb8336d04106f215c21ad1b029e78d12033d02 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 17 Jan 2011 18:39:18 +0100 Subject: [PATCH 090/122] drbd: Fixed P_NEG_ACK processing for protocol A and B Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. The master bio might already be completed, therefore the request is no longer in the collision hash. => Do not try to validate block_id as request In Protocol B we might already have got a P_RECV_ACK but then get a P_NEG_ACK after wards. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 45 ++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 2207d2886f84..a7f5b6d134e3 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1633,9 +1633,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned u32 dp_flags; if (!get_ldev(mdev)) { - if (__ratelimit(&drbd_ratelimit_state)) - dev_err(DEV, "Can not write mirrored data block " - "to local disk.\n"); spin_lock(&mdev->peer_seq_lock); if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) mdev->peer_seq++; @@ -4247,8 +4244,6 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, return req; } } - dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n", - (void *)(unsigned long)id, (unsigned long long)sector); return NULL; } @@ -4266,7 +4261,9 @@ static int validate_req_change_req_state(struct drbd_conf *mdev, req = validator(mdev, id, sector); if (unlikely(!req)) { spin_unlock_irq(&mdev->req_lock); - dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); + + dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func, + (void *)(unsigned long)id, (unsigned long long)sector); return false; } __req_mod(req, what, &m); @@ -4321,20 +4318,44 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) { struct p_block_ack *p = (struct p_block_ack *)h; sector_t sector = be64_to_cpu(p->sector); - - if (__ratelimit(&drbd_ratelimit_state)) - dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n"); + int size = be32_to_cpu(p->blksize); + struct drbd_request *req; + struct bio_and_error m; update_peer_seq(mdev, be32_to_cpu(p->seq_num)); if (is_syncer_block_id(p->block_id)) { - int size = be32_to_cpu(p->blksize); dec_rs_pending(mdev); drbd_rs_failed_io(mdev, sector, size); return true; } - return validate_req_change_req_state(mdev, p->block_id, sector, - _ack_id_to_req, __func__ , neg_acked); + + spin_lock_irq(&mdev->req_lock); + req = _ack_id_to_req(mdev, p->block_id, sector); + if (!req) { + spin_unlock_irq(&mdev->req_lock); + if (mdev->net_conf->wire_protocol == DRBD_PROT_A || + mdev->net_conf->wire_protocol == DRBD_PROT_B) { + /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. + The master bio might already be completed, therefore the + request is no longer in the collision hash. + => Do not try to validate block_id as request. */ + /* In Protocol B we might already have got a P_RECV_ACK + but then get a P_NEG_ACK after wards. */ + drbd_set_out_of_sync(mdev, sector, size); + return true; + } else { + dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__, + (void *)(unsigned long)p->block_id, (unsigned long long)sector); + return false; + } + } + __req_mod(req, neg_acked, &m); + spin_unlock_irq(&mdev->req_lock); + + if (m.bio) + complete_master_bio(mdev, &m); + return true; } static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) From 6a35c45f890dc18c5527ac501b308058118f20e7 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Mon, 17 Jan 2011 20:27:30 +0100 Subject: [PATCH 091/122] drbd: Ensure that an epoch contains only requests of one kind The assert in drbd_req.c:755 forces us to have only requests of one kind in an epoch. The two kinds we distinguish here are: local-only or mirrored. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 7 ++++++- drivers/block/drbd/drbd_req.c | 29 ++++------------------------- drivers/block/drbd/drbd_req.h | 18 ++++++++++++++++++ 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 1061b9fff2b0..34ee8e44a751 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1261,9 +1261,14 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) drbd_resume_al(mdev); - if (os.conn == C_AHEAD && ns.conn != C_AHEAD) + /* Start a new epoch in case we start to mirror write requests */ + if (!drbd_should_do_remote(os) && drbd_should_do_remote(ns)) tl_forget(mdev); + /* Do not add local-only requests to an epoch with mirrored requests */ + if (drbd_should_do_remote(os) && !drbd_should_do_remote(ns)) + set_bit(CREATE_BARRIER, &mdev->flags); + ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC); if (ascw) { ascw->os = os; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 6f9d1bfcca58..336937a14d3f 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -762,27 +762,6 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); } -static bool drbd_should_do_remote(struct drbd_conf *mdev) -{ - union drbd_state s = mdev->state; - - return s.pdsk == D_UP_TO_DATE || - (s.pdsk >= D_INCONSISTENT && - s.conn >= C_WF_BITMAP_T && - s.conn < C_AHEAD); - /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. - That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* - states. */ -} -static bool drbd_should_send_oos(struct drbd_conf *mdev) -{ - union drbd_state s = mdev->state; - - return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; - /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary - since we enter state C_AHEAD only if proto >= 96 */ -} - static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) { const int rw = bio_rw(bio); @@ -854,8 +833,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns drbd_al_begin_io(mdev, sector); } - remote = remote && drbd_should_do_remote(mdev); - send_oos = rw == WRITE && drbd_should_send_oos(mdev); + remote = remote && drbd_should_do_remote(mdev->state); + send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); D_ASSERT(!(remote && send_oos)); if (!(local || remote) && !is_susp(mdev->state)) { @@ -896,8 +875,8 @@ allocate_barrier: } if (remote || send_oos) { - remote = drbd_should_do_remote(mdev); - send_oos = rw == WRITE && drbd_should_send_oos(mdev); + remote = drbd_should_do_remote(mdev->state); + send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); D_ASSERT(!(remote && send_oos)); if (!(remote || send_oos)) diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 077c47b1e9d7..32c1f2a31266 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -360,4 +360,22 @@ static inline int req_mod(struct drbd_request *req, return rv; } + +static inline bool drbd_should_do_remote(union drbd_state s) +{ + return s.pdsk == D_UP_TO_DATE || + (s.pdsk >= D_INCONSISTENT && + s.conn >= C_WF_BITMAP_T && + s.conn < C_AHEAD); + /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. + That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* + states. */ +} +static inline bool drbd_should_send_oos(union drbd_state s) +{ + return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; + /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary + since we enter state C_AHEAD only if proto >= 96 */ +} + #endif From 20ee639024e3d33111df0e343050b218c656bf16 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 18 Jan 2011 15:28:59 +0100 Subject: [PATCH 092/122] drbd: cleaned up __set_current_state() followed by schedule_timeout() calls Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 3 +-- drivers/block/drbd/drbd_receiver.c | 9 +++------ drivers/block/drbd/drbd_worker.c | 3 +-- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index a46bc0287e21..42e16e4edfa3 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -360,8 +360,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) if (rv == SS_TWO_PRIMARIES) { /* Maybe the peer is detected as dead very soon... retry at most once more in this case. */ - __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); + schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10); if (try < max_tries) try = max_tries - 1; continue; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index a7f5b6d134e3..3ccc6c33a330 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -768,8 +768,7 @@ static int drbd_connect(struct drbd_conf *mdev) if (s || ++try >= 3) break; /* give the other side time to call bind() & listen() */ - __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 10); + schedule_timeout_interruptible(HZ / 10); } if (s) { @@ -788,8 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev) } if (sock && msock) { - __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 10); + schedule_timeout_interruptible(HZ / 10); ok = drbd_socket_okay(mdev, &sock); ok = drbd_socket_okay(mdev, &msock) && ok; if (ok) @@ -4142,8 +4140,7 @@ int drbdd_init(struct drbd_thread *thi) h = drbd_connect(mdev); if (h == 0) { drbd_disconnect(mdev); - __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); + schedule_timeout_interruptible(HZ); } if (h == -1) { dev_warn(DEV, "Discarding network configuration.\n"); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 3a95b701b5d0..7b3d4dc4af84 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -777,8 +777,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) * queue (or even the read operations for those packets * is not finished by now). Retry in 100ms. */ - __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 10); + schedule_timeout_interruptible(HZ / 10); w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); if (w) { w->cb = w_resync_finished; From 6c922ed543bee1bc6685ade07be59f3fa49a7288 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Wed, 12 Jan 2011 11:51:13 +0100 Subject: [PATCH 093/122] drbd: only generate and send a new sync uuid after a successful state change Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 7b3d4dc4af84..ec42e04bb517 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1544,24 +1544,23 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) } write_unlock_irq(&global_state_lock); - if (side == C_SYNC_TARGET) - mdev->bm_resync_fo = 0; - - /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid - * with w_send_oos, or the sync target will get confused as to - * how much bits to resync. We cannot do that always, because for an - * empty resync and protocol < 95, we need to do it here, as we call - * drbd_resync_finished from here in that case. - * We drbd_gen_and_send_sync_uuid here for protocol < 96, - * and from after_state_ch otherwise. */ - if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96) - drbd_gen_and_send_sync_uuid(mdev); - if (r == SS_SUCCESS) { dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", drbd_conn_str(ns.conn), (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), (unsigned long) mdev->rs_total); + if (side == C_SYNC_TARGET) + mdev->bm_resync_fo = 0; + + /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid + * with w_send_oos, or the sync target will get confused as to + * how much bits to resync. We cannot do that always, because for an + * empty resync and protocol < 95, we need to do it here, as we call + * drbd_resync_finished from here in that case. + * We drbd_gen_and_send_sync_uuid here for protocol < 96, + * and from after_state_ch otherwise. */ + if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96) + drbd_gen_and_send_sync_uuid(mdev); if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) { /* This still has a race (about when exactly the peers From 194bfb32dba8345a7e0f83e9b1ee965e14d4b679 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Tue, 18 Jan 2011 10:38:01 +0100 Subject: [PATCH 094/122] drbd: serialize admin requests for new resync with pending bitmap io Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 42e16e4edfa3..434b621f76a9 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1873,6 +1873,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl { int retcode; + /* If there is still bitmap IO pending, probably because of a previous + * resync just being finished, wait for it before requesting a new resync. */ + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) @@ -1908,6 +1912,10 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re { int retcode; + /* If there is still bitmap IO pending, probably because of a previous + * resync just being finished, wait for it before requesting a new resync. */ + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); if (retcode < SS_SUCCESS) { @@ -1916,7 +1924,6 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re into a full resync. */ retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); if (retcode >= SS_SUCCESS) { - /* open coded drbd_bitmap_io() */ if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, "set_n_write from invalidate_peer")) retcode = ERR_IO_MD_DISK; From 54b956abef2c1ab339fd01792e69e4a921a5e487 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 20 Jan 2011 10:47:53 +0100 Subject: [PATCH 095/122] drbd: don't pointlessly queue bitmap send, if we lost connection This is a minor optimization and cleanup, and also considerably reduces some harmless (but noisy) race with the connection cleanup code. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 34ee8e44a751..1caced08a73e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1428,8 +1428,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, drbd_send_uuids(mdev); drbd_send_state(mdev); } - if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S) - drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)"); + /* No point in queuing send_bitmap if we don't have a connection + * anymore, so check also the _current_ state, not only the new state + * at the time this work was queued. */ + if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S && + mdev->state.conn == C_WF_BITMAP_S) + drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, + "send_bitmap (WFBitMapS)"); /* Lost contact to peer's copy of the data */ if ((os.pdsk >= D_INCONSISTENT && From 79a30d2d71f7be862de93228fe9b919ef664af52 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 20 Jan 2011 10:32:05 +0100 Subject: [PATCH 096/122] drbd: queue bitmap writeout more intelligently The "lazy writeout" of cleared bitmap pages happens during resync, and should happen again once the resync finishes cleanly, or is aborted. If resync finished cleanly, or was aborted because of peer disk failure, we trigger the writeout from worker context in the after state change work. If resync was aborted because of connection failure, we should not immediately trigger bitmap writeout, but rather postpone the writeout to after the connection cleanup happened. We now do it in the receiver context from drbd_disconnect(). If resync was aborted because of local disk failure, well, there is nothing to write to anymore. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 12 +++++++++++- drivers/block/drbd/drbd_receiver.c | 1 + 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 1caced08a73e..e0be4077d564 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1593,8 +1593,18 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) drbd_send_state(mdev); - if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) + /* This triggers bitmap writeout of potentially still unwritten pages + * if the resync finished cleanly, or aborted because of peer disk + * failure. Resync aborted because of connection failure does bitmap + * writeout from drbd_disconnect. + * For resync aborted because of local disk failure, we cannot do + * any bitmap writeout anymore. + */ + if (os.conn > C_CONNECTED && ns.conn == C_CONNECTED && + mdev->state.conn == C_CONNECTED && get_ldev(mdev)) { drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); + put_ldev(mdev); + } /* free tl_hash if we Got thawed and are C_STANDALONE */ if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 3ccc6c33a330..432fe8f6b5d2 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3811,6 +3811,7 @@ static void drbd_disconnect(struct drbd_conf *mdev) fp = FP_DONT_CARE; if (get_ldev(mdev)) { + drbd_bitmap_io(mdev, &drbd_bm_write, "write from disconnect"); fp = mdev->ldev->dc.fencing; put_ldev(mdev); } From cd88d030d41a9b0100fd5fee872024e6ebc8b276 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Thu, 20 Jan 2011 11:46:41 +0100 Subject: [PATCH 097/122] drbd: Provide hints with the error message when clearing the sync pause flag When the user clears the sync-pause flag, and sync stays in pause state, give hints to the user, why it still is in pause state. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 12 ++++++++++-- include/linux/drbd.h | 2 ++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 434b621f76a9..ffe3a97fef9b 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1952,9 +1952,17 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n struct drbd_nl_cfg_reply *reply) { int retcode = NO_ERROR; + union drbd_state s; - if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) - retcode = ERR_PAUSE_IS_CLEAR; + if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { + s = mdev->state; + if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { + retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : + s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; + } else { + retcode = ERR_PAUSE_IS_CLEAR; + } + } reply->ret_code = retcode; return 0; diff --git a/include/linux/drbd.h b/include/linux/drbd.h index d10431fab004..ba5c785d3f7d 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -153,6 +153,8 @@ enum drbd_ret_code { ERR_NEED_APV_93 = 153, ERR_STONITH_AND_PROT_A = 154, ERR_CONG_NOT_PROTO_A = 155, + ERR_PIC_AFTER_DEP = 156, + ERR_PIC_PEER_DEP = 157, /* insert new ones above this line */ AFTER_LAST_ERR_CODE From d07c9c10e5620c632aae9cac2b609033398f6139 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Thu, 20 Jan 2011 16:49:33 +0100 Subject: [PATCH 098/122] drbd: We can not process BIOs with a size of 0 Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 432fe8f6b5d2..e9354931eace 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1237,6 +1237,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ data_size -= dgs; + ERR_IF(data_size == 0) return NULL; ERR_IF(data_size & 0x1ff) return NULL; ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; From 62b0da3a244ac33d25a77861ef1cc0080103f2ff Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Thu, 20 Jan 2011 13:25:21 +0100 Subject: [PATCH 099/122] drbd: log UUIDs whenever they change All decisions about sync, sync direction, and wether or not to allow a connect or attach are based on our set of UUIDs to tag a data generation. Log changes to the UUIDs whenever they occur, logging "new current UUID P:Q:R:S" is more useful than "Creating new current UUID". Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 6 ++- drivers/block/drbd/drbd_main.c | 71 ++++++++++++++---------------- drivers/block/drbd/drbd_nl.c | 1 + drivers/block/drbd/drbd_receiver.c | 11 +++-- drivers/block/drbd/drbd_worker.c | 20 +++++---- 5 files changed, 58 insertions(+), 51 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index cfe7fff459e3..0a9059eb94db 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1240,11 +1240,11 @@ extern int _drbd_send_bitmap(struct drbd_conf *mdev); extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); extern void drbd_free_bc(struct drbd_backing_dev *ldev); extern void drbd_mdev_cleanup(struct drbd_conf *mdev); +void drbd_print_uuids(struct drbd_conf *mdev, const char *text); /* drbd_meta-data.c (still in drbd_main.c) */ extern void drbd_md_sync(struct drbd_conf *mdev); extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); -/* maybe define them below as inline? */ extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); @@ -2360,9 +2360,11 @@ static inline void dec_ap_bio(struct drbd_conf *mdev) } } -static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) +static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) { + int changed = mdev->ed_uuid != val; mdev->ed_uuid = val; + return changed; } static inline int seq_cmp(u32 a, u32 b) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index e0be4077d564..b68332a0e73e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1159,6 +1159,10 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, atomic_inc(&mdev->local_cnt); mdev->state = ns; + + if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) + drbd_print_uuids(mdev, "attached to UUIDs"); + wake_up(&mdev->misc_wait); wake_up(&mdev->state_wait); @@ -2035,6 +2039,24 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) return _drbd_send_uuids(mdev, 8); } +void drbd_print_uuids(struct drbd_conf *mdev, const char *text) +{ + if (get_ldev_if_state(mdev, D_NEGOTIATING)) { + u64 *uuid = mdev->ldev->md.uuid; + dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", + text, + (unsigned long long)uuid[UI_CURRENT], + (unsigned long long)uuid[UI_BITMAP], + (unsigned long long)uuid[UI_HISTORY_START], + (unsigned long long)uuid[UI_HISTORY_END]); + put_ldev(mdev); + } else { + dev_info(DEV, "%s effective data uuid: %016llX\n", + text, + (unsigned long long)mdev->ed_uuid); + } +} + int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) { struct p_rs_uuid p; @@ -2044,6 +2066,7 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET; drbd_uuid_set(mdev, UI_BITMAP, uuid); + drbd_print_uuids(mdev, "updated sync UUID"); drbd_md_sync(mdev); p.uuid = cpu_to_be64(uuid); @@ -3749,28 +3772,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) return rv; } -static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index) -{ - static char *uuid_str[UI_EXTENDED_SIZE] = { - [UI_CURRENT] = "CURRENT", - [UI_BITMAP] = "BITMAP", - [UI_HISTORY_START] = "HISTORY_START", - [UI_HISTORY_END] = "HISTORY_END", - [UI_SIZE] = "SIZE", - [UI_FLAGS] = "FLAGS", - }; - - if (index >= UI_EXTENDED_SIZE) { - dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n"); - return; - } - - dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n", - uuid_str[index], - (unsigned long long)mdev->ldev->md.uuid[index]); -} - - /** * drbd_md_mark_dirty() - Mark meta data super block as dirty * @mdev: DRBD device. @@ -3800,10 +3801,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) { int i; - for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) { + for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; - debug_drbd_uuid(mdev, i+1); - } } void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) @@ -3818,7 +3817,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) } mdev->ldev->md.uuid[idx] = val; - debug_drbd_uuid(mdev, idx); drbd_md_mark_dirty(mdev); } @@ -3828,7 +3826,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) if (mdev->ldev->md.uuid[idx]) { drbd_uuid_move_history(mdev); mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; - debug_drbd_uuid(mdev, UI_HISTORY_START); } _drbd_uuid_set(mdev, idx, val); } @@ -3843,14 +3840,16 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) { u64 val; + unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; + + if (bm_uuid) + dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); - dev_info(DEV, "Creating new current UUID\n"); - D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; - debug_drbd_uuid(mdev, UI_BITMAP); get_random_bytes(&val, sizeof(u64)); _drbd_uuid_set(mdev, UI_CURRENT, val); + drbd_print_uuids(mdev, "new current UUID"); /* get it to stable storage _now_ */ drbd_md_sync(mdev); } @@ -3864,16 +3863,12 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) drbd_uuid_move_history(mdev); mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; mdev->ldev->md.uuid[UI_BITMAP] = 0; - debug_drbd_uuid(mdev, UI_HISTORY_START); - debug_drbd_uuid(mdev, UI_BITMAP); } else { - if (mdev->ldev->md.uuid[UI_BITMAP]) - dev_warn(DEV, "bm UUID already set"); + unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; + if (bm_uuid) + dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); - mdev->ldev->md.uuid[UI_BITMAP] = val; - mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); - - debug_drbd_uuid(mdev, UI_BITMAP); + mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); } drbd_md_mark_dirty(mdev); } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index ffe3a97fef9b..ce6f2fe80852 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2151,6 +2151,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl if (skip_initial_sync) { drbd_send_uuids_skip_initial_sync(mdev); _drbd_uuid_set(mdev, UI_BITMAP, 0); + drbd_print_uuids(mdev, "cleared bitmap UUID"); spin_lock_irq(&mdev->req_lock); _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), CS_VERBOSE, NULL); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index e9354931eace..e5686a81f42c 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3024,7 +3024,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned { struct p_uuids *p = &mdev->data.rbuf.uuids; u64 *p_uuid; - int i; + int i, updated_uuids = 0; p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); @@ -3059,13 +3059,14 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), CS_VERBOSE, NULL); drbd_md_sync(mdev); + updated_uuids = 1; } put_ldev(mdev); } else if (mdev->state.disk < D_INCONSISTENT && mdev->state.role == R_PRIMARY) { /* I am a diskless primary, the peer just created a new current UUID for me. */ - drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); + updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); } /* Before we test for the disk state, we should wait until an eventually @@ -3074,7 +3075,10 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned new disk state... */ wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) - drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); + updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); + + if (updated_uuids) + drbd_print_uuids(mdev, "receiver updated UUIDs to"); return true; } @@ -3305,6 +3309,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); _drbd_uuid_set(mdev, UI_BITMAP, 0UL); + drbd_print_uuids(mdev, "updated sync uuid"); drbd_start_resync(mdev, C_SYNC_TARGET); put_ldev(mdev); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index ec42e04bb517..ff0eb308ee4a 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -871,14 +871,18 @@ int drbd_resync_finished(struct drbd_conf *mdev) } } - drbd_uuid_set_bm(mdev, 0UL); - - if (mdev->p_uuid) { - /* Now the two UUID sets are equal, update what we - * know of the peer. */ - int i; - for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) - mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; + if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { + /* for verify runs, we don't update uuids here, + * so there would be nothing to report. */ + drbd_uuid_set_bm(mdev, 0UL); + drbd_print_uuids(mdev, "updated UUIDs"); + if (mdev->p_uuid) { + /* Now the two UUID sets are equal, update what we + * know of the peer. */ + int i; + for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) + mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; + } } } From 20ceb2b22edaf51e59e76087efdc71a16a2858de Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 21 Jan 2011 10:56:44 +0100 Subject: [PATCH 100/122] drbd: describe bitmap locking for bulk operation in finer detail Now that we do no longer in-place endian-swap the bitmap, we allow selected bitmap operations (testing bits, sometimes even settting bits) during some bulk operations. This caused us to hit a lot of FIXME asserts similar to FIXME asender in drbd_bm_count_bits, bitmap locked for 'write from resync_finished' by worker Which now is nonsense: looking at the bitmap is perfectly legal as long as it is not being resized. This cosmetic patch defines some flags to describe expectations in finer detail, so the asserts in e.g. bm_change_bits_to() can be skipped if appropriate. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_bitmap.c | 48 +++++++++++-------------- drivers/block/drbd/drbd_int.h | 36 ++++++++++++++++--- drivers/block/drbd/drbd_main.c | 58 +++++++++++++++++++----------- drivers/block/drbd/drbd_nl.c | 19 ++++++---- drivers/block/drbd/drbd_receiver.c | 17 ++++++--- 5 files changed, 115 insertions(+), 63 deletions(-) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 25428bc28476..b62dd5f26c5d 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -104,26 +104,16 @@ struct drbd_bitmap { wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ - unsigned long bm_flags; + enum bm_flag bm_flags; /* debugging aid, in case we are still racy somewhere */ char *bm_why; struct task_struct *bm_task; }; -/* definition of bits in bm_flags */ -#define BM_LOCKED 0 -// #define BM_MD_IO_ERROR 1 unused now. -#define BM_P_VMALLOCED 2 - static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, unsigned long e, int val, const enum km_type km); -static int bm_is_locked(struct drbd_bitmap *b) -{ - return test_bit(BM_LOCKED, &b->bm_flags); -} - #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) { @@ -140,7 +130,7 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) b->bm_task == mdev->worker.task ? "worker" : "?"); } -void drbd_bm_lock(struct drbd_conf *mdev, char *why) +void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) { struct drbd_bitmap *b = mdev->bitmap; int trylock_failed; @@ -163,8 +153,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why) b->bm_task == mdev->worker.task ? "worker" : "?"); mutex_lock(&b->bm_change); } - if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) + if (BM_LOCKED_MASK & b->bm_flags) dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); + b->bm_flags |= flags & BM_LOCKED_MASK; b->bm_why = why; b->bm_task = current; @@ -178,9 +169,10 @@ void drbd_bm_unlock(struct drbd_conf *mdev) return; } - if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) + if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags)) dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); + b->bm_flags &= ~BM_LOCKED_MASK; b->bm_why = NULL; b->bm_task = NULL; mutex_unlock(&b->bm_change); @@ -421,9 +413,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) } if (vmalloced) - set_bit(BM_P_VMALLOCED, &b->bm_flags); + b->bm_flags |= BM_P_VMALLOCED; else - clear_bit(BM_P_VMALLOCED, &b->bm_flags); + b->bm_flags &= ~BM_P_VMALLOCED; return new_pages; } @@ -460,7 +452,7 @@ void drbd_bm_cleanup(struct drbd_conf *mdev) { ERR_IF (!mdev->bitmap) return; bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); - bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags)); + bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags)); kfree(mdev->bitmap); mdev->bitmap = NULL; } @@ -623,7 +615,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) ERR_IF(!b) return -ENOMEM; - drbd_bm_lock(mdev, "resize"); + drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK); dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", (unsigned long long)capacity); @@ -631,7 +623,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) if (capacity == b->bm_dev_capacity) goto out; - opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags); + opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); if (capacity == 0) { spin_lock_irq(&b->bm_lock); @@ -1030,7 +1022,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id * as we submit copies of pages anyways. */ if (!ctx.flags) - WARN_ON(!bm_is_locked(b)); + WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); num_pages = b->bm_number_of_pages; @@ -1220,7 +1212,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev, ERR_IF(!b->bm_pages) return i; spin_lock_irq(&b->bm_lock); - if (bm_is_locked(b)) + if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(mdev); i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); @@ -1246,13 +1238,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo * you must take drbd_bm_lock() first */ unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) { - /* WARN_ON(!bm_is_locked(mdev)); */ + /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ return __bm_find_next(mdev, bm_fo, 0, KM_USER1); } unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) { - /* WARN_ON(!bm_is_locked(mdev)); */ + /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ return __bm_find_next(mdev, bm_fo, 1, KM_USER1); } @@ -1322,7 +1314,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, ERR_IF(!b->bm_pages) return 0; spin_lock_irqsave(&b->bm_lock, flags); - if (bm_is_locked(b)) + if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) bm_print_lock_info(mdev); c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); @@ -1439,7 +1431,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) ERR_IF(!b->bm_pages) return 0; spin_lock_irqsave(&b->bm_lock, flags); - if (bm_is_locked(b)) + if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(mdev); if (bitnr < b->bm_bits) { p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); @@ -1474,7 +1466,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi ERR_IF(!b->bm_pages) return 1; spin_lock_irqsave(&b->bm_lock, flags); - if (bm_is_locked(b)) + if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(mdev); for (bitnr = s; bitnr <= e; bitnr++) { unsigned int idx = bm_bit_to_page_idx(b, bitnr); @@ -1522,7 +1514,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) ERR_IF(!b->bm_pages) return 0; spin_lock_irqsave(&b->bm_lock, flags); - if (bm_is_locked(b)) + if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(mdev); s = S2W(enr); @@ -1555,7 +1547,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) ERR_IF(!b->bm_pages) return 0; spin_lock_irq(&b->bm_lock); - if (bm_is_locked(b)) + if (BM_DONT_SET & b->bm_flags) bm_print_lock_info(mdev); weight = b->bm_set; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 0a9059eb94db..267d9897ca8c 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -855,6 +855,32 @@ enum { struct drbd_bitmap; /* opaque for drbd_conf */ +/* definition of bits in bm_flags to be used in drbd_bm_lock + * and drbd_bitmap_io and friends. */ +enum bm_flag { + /* do we need to kfree, or vfree bm_pages? */ + BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ + + /* currently locked for bulk operation */ + BM_LOCKED_MASK = 0x7, + + /* in detail, that is: */ + BM_DONT_CLEAR = 0x1, + BM_DONT_SET = 0x2, + BM_DONT_TEST = 0x4, + + /* (test bit, count bit) allowed (common case) */ + BM_LOCKED_TEST_ALLOWED = 0x3, + + /* testing bits, as well as setting new bits allowed, but clearing bits + * would be unexpected. Used during bitmap receive. Setting new bits + * requires sending of "out-of-sync" information, though. */ + BM_LOCKED_SET_ALLOWED = 0x1, + + /* clear is not expected while bitmap is locked for bulk operation */ +}; + + /* TODO sort members for performance * MAYBE group them further */ @@ -920,6 +946,7 @@ struct drbd_md_io { struct bm_io_work { struct drbd_work w; char *why; + enum bm_flag flags; int (*io_fn)(struct drbd_conf *mdev); void (*done)(struct drbd_conf *mdev, int rv); }; @@ -1242,7 +1269,6 @@ extern void drbd_free_bc(struct drbd_backing_dev *ldev); extern void drbd_mdev_cleanup(struct drbd_conf *mdev); void drbd_print_uuids(struct drbd_conf *mdev, const char *text); -/* drbd_meta-data.c (still in drbd_main.c) */ extern void drbd_md_sync(struct drbd_conf *mdev); extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); @@ -1263,10 +1289,12 @@ extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), void (*done)(struct drbd_conf *, int), - char *why); + char *why, enum bm_flag flags); +extern int drbd_bitmap_io(struct drbd_conf *mdev, + int (*io_fn)(struct drbd_conf *), + char *why, enum bm_flag flags); extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); -extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); extern void drbd_go_diskless(struct drbd_conf *mdev); extern void drbd_ldev_destroy(struct drbd_conf *mdev); @@ -1452,7 +1480,7 @@ extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, unsigned long *buffer); -extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); +extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags); extern void drbd_bm_unlock(struct drbd_conf *mdev); /* drbd_main.c */ diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index b68332a0e73e..a9e9b496e73b 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1320,7 +1320,9 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv) } } -int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why) +int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, + int (*io_fn)(struct drbd_conf *), + char *why, enum bm_flag flags) { int rv; @@ -1328,10 +1330,8 @@ int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_ /* open coded non-blocking drbd_suspend_io(mdev); */ set_bit(SUSPEND_IO, &mdev->flags); - if (!is_susp(mdev->state)) - D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); - drbd_bm_lock(mdev, why); + drbd_bm_lock(mdev, why, flags); rv = io_fn(mdev); drbd_bm_unlock(mdev); @@ -1438,7 +1438,8 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S && mdev->state.conn == C_WF_BITMAP_S) drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, - "send_bitmap (WFBitMapS)"); + "send_bitmap (WFBitMapS)", + BM_LOCKED_TEST_ALLOWED); /* Lost contact to peer's copy of the data */ if ((os.pdsk >= D_INCONSISTENT && @@ -1469,7 +1470,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* D_DISKLESS Peer becomes secondary */ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) - drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer"); + /* We may still be Primary ourselves. + * No harm done if the bitmap still changes, + * redirtied pages will follow later. */ + drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, + "demote diskless peer", BM_LOCKED_SET_ALLOWED); put_ldev(mdev); } @@ -1478,7 +1483,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, * if there is a resync going on still */ if (os.role == R_PRIMARY && ns.role == R_SECONDARY && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) { - drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote"); + /* No changes to the bitmap expected this time, so assert that, + * even though no harm was done if it did change. */ + drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, + "demote", BM_LOCKED_TEST_ALLOWED); put_ldev(mdev); } @@ -1512,12 +1520,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* We are in the progress to start a full sync... */ if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) - drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync"); + /* no other bitmap changes expected during this phase */ + drbd_queue_bitmap_io(mdev, + &drbd_bmio_set_n_write, &abw_start_sync, + "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED); /* We are invalidating our self... */ if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED && os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) - drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); + /* other bitmap operation expected during this phase */ + drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, + "set_n_write from invalidate", BM_LOCKED_MASK); /* first half of local IO error, failure to attach, * or administrative detach */ @@ -1599,14 +1612,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* This triggers bitmap writeout of potentially still unwritten pages * if the resync finished cleanly, or aborted because of peer disk - * failure. Resync aborted because of connection failure does bitmap - * writeout from drbd_disconnect. + * failure, or because of connection loss. * For resync aborted because of local disk failure, we cannot do * any bitmap writeout anymore. + * No harm done if some bits change during this phase. */ - if (os.conn > C_CONNECTED && ns.conn == C_CONNECTED && - mdev->state.conn == C_CONNECTED && get_ldev(mdev)) { - drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); + if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) { + drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, + "write from resync_finished", BM_LOCKED_SET_ALLOWED); put_ldev(mdev); } @@ -3929,7 +3942,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); if (get_ldev(mdev)) { - drbd_bm_lock(mdev, work->why); + drbd_bm_lock(mdev, work->why, work->flags); rv = work->io_fn(mdev); drbd_bm_unlock(mdev); put_ldev(mdev); @@ -3944,6 +3957,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) clear_bit(BITMAP_IO_QUEUED, &mdev->flags); work->why = NULL; + work->flags = 0; return 1; } @@ -3998,7 +4012,7 @@ void drbd_go_diskless(struct drbd_conf *mdev) void drbd_queue_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), void (*done)(struct drbd_conf *, int), - char *why) + char *why, enum bm_flag flags) { D_ASSERT(current == mdev->worker.task); @@ -4012,6 +4026,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, mdev->bm_io_work.io_fn = io_fn; mdev->bm_io_work.done = done; mdev->bm_io_work.why = why; + mdev->bm_io_work.flags = flags; spin_lock_irq(&mdev->req_lock); set_bit(BITMAP_IO, &mdev->flags); @@ -4031,19 +4046,22 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, * freezes application IO while that the actual IO operations runs. This * functions MAY NOT be called from worker context. */ -int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why) +int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), + char *why, enum bm_flag flags) { int rv; D_ASSERT(current != mdev->worker.task); - drbd_suspend_io(mdev); + if ((flags & BM_LOCKED_SET_ALLOWED) == 0) + drbd_suspend_io(mdev); - drbd_bm_lock(mdev, why); + drbd_bm_lock(mdev, why, flags); rv = io_fn(mdev); drbd_bm_unlock(mdev); - drbd_resume_io(mdev); + if ((flags & BM_LOCKED_SET_ALLOWED) == 0) + drbd_resume_io(mdev); return rv; } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index ce6f2fe80852..bc0bcb964603 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -648,7 +648,9 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_ dev_info(DEV, "Writing the whole bitmap, %s\n", la_size_changed && md_moved ? "size changed and md moved" : la_size_changed ? "size changed" : "md moved"); - err = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ + /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ + err = drbd_bitmap_io(mdev, &drbd_bm_write, + "size changed", BM_LOCKED_MASK); if (err) { rv = dev_size_error; goto out; @@ -1160,12 +1162,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { dev_info(DEV, "Assuming that all blocks are out of sync " "(aka FullSync)\n"); - if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { + if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, + "set_n_write from attaching", BM_LOCKED_MASK)) { retcode = ERR_IO_MD_DISK; goto force_diskless_dec; } } else { - if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { + if (drbd_bitmap_io(mdev, &drbd_bm_read, + "read from attaching", BM_LOCKED_MASK) < 0) { retcode = ERR_IO_MD_DISK; goto force_diskless_dec; } @@ -1173,7 +1177,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp if (cp_discovered) { drbd_al_apply_to_bm(mdev); - if (drbd_bitmap_io(mdev, &drbd_bm_write, "crashed primary apply AL")) { + if (drbd_bitmap_io(mdev, &drbd_bm_write, + "crashed primary apply AL", BM_LOCKED_MASK)) { retcode = ERR_IO_MD_DISK; goto force_diskless_dec; } @@ -1925,7 +1930,8 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); if (retcode >= SS_SUCCESS) { if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, - "set_n_write from invalidate_peer")) + "set_n_write from invalidate_peer", + BM_LOCKED_SET_ALLOWED)) retcode = ERR_IO_MD_DISK; } } else @@ -2143,7 +2149,8 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ if (args.clear_bm) { - err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); + err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, + "clear_n_write from new_c_uuid", BM_LOCKED_MASK); if (err) { dev_err(DEV, "Writing bitmap failed with %d\n",err); retcode = ERR_IO_MD_DISK; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index e5686a81f42c..e13134f83fae 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2599,7 +2599,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol if (abs(hg) >= 2) { dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); - if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) + if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", + BM_LOCKED_SET_ALLOWED)) return C_MASK; } @@ -3053,7 +3054,8 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned if (skip_initial_sync) { dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, - "clear_n_write from receive_uuids"); + "clear_n_write from receive_uuids", + BM_LOCKED_TEST_ALLOWED); _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); _drbd_uuid_set(mdev, UI_BITMAP, 0); _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), @@ -3494,7 +3496,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne int ok = false; struct p_header80 *h = &mdev->data.rbuf.header.h80; - /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */ + drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); + /* you are supposed to send additional out-of-sync information + * if you actually set bits during this phase */ /* maybe we should use some per thread scratch page, * and allocate that during initial device creation? */ @@ -3568,7 +3572,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne ok = true; out: - /* drbd_bm_unlock(mdev); by intention no lock */ + drbd_bm_unlock(mdev); if (ok && mdev->state.conn == C_WF_BITMAP_S) drbd_start_resync(mdev, C_SYNC_SOURCE); free_page((unsigned long) buffer); @@ -3817,7 +3821,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) fp = FP_DONT_CARE; if (get_ldev(mdev)) { - drbd_bitmap_io(mdev, &drbd_bm_write, "write from disconnect"); fp = mdev->ldev->dc.fencing; put_ldev(mdev); } @@ -3846,6 +3849,10 @@ static void drbd_disconnect(struct drbd_conf *mdev) drbd_request_state(mdev, NS(conn, C_STANDALONE)); } + /* serialize with bitmap writeout triggered by the state change, + * if any. */ + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + /* tcp_close and release of sendpage pages can be deferred. I don't * want to use SO_LINGER, because apparently it can be deferred for * more than 20 seconds (longest time I checked). From 0ddc5549f88dfc4a4c919693e9a86095e89e080b Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 21 Jan 2011 12:35:15 +0100 Subject: [PATCH 101/122] drbd: silence some noisy log messages during disconnect If we fail to send the information that we lost our disk, we have no connection, and no disk: no access to data anymore. That is either expected (deconfiguration), or there will be so much noise in the logs that "Sending state failed" is not useful at all. Drop it. If the reason for a shorter than expected receive was a signal, which we sent because we already decided to disconnect, these additional log messages are confusing and useless. This patch follows this pattern: - dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); + if (!signal_pending(current)) + dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); Also make them all dev_warn for consistency. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 6 ++-- drivers/block/drbd/drbd_receiver.c | 45 +++++++++++++++++++----------- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a9e9b496e73b..c3760f33d52c 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1585,8 +1585,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (drbd_send_state(mdev)) dev_warn(DEV, "Notified peer that I'm now diskless.\n"); - else - dev_err(DEV, "Sending state for being diskless failed\n"); /* corresponding get_ldev in __drbd_set_state * this may finaly trigger drbd_ldev_destroy. */ put_ldev(mdev); @@ -1868,8 +1866,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, sent = drbd_send(mdev, sock, h, size, msg_flags); ok = (sent == size); - if (!ok) - dev_err(DEV, "short sent %s size=%d sent=%d\n", + if (!ok && !signal_pending(current)) + dev_warn(DEV, "short sent %s size=%d sent=%d\n", cmdname(cmd), (int)size, sent); return ok; } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index e13134f83fae..a56b107e01eb 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -930,7 +930,8 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi r = drbd_recv(mdev, h, sizeof(*h)); if (unlikely(r != sizeof(*h))) { - dev_err(DEV, "short read expecting header on sock: r=%d\n", r); + if (!signal_pending(current)) + dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); return false; } @@ -1229,8 +1230,10 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ if (dgs) { rr = drbd_recv(mdev, dig_in, dgs); if (rr != dgs) { - dev_warn(DEV, "short read receiving data digest: read %d expected %d\n", - rr, dgs); + if (!signal_pending(current)) + dev_warn(DEV, + "short read receiving data digest: read %d expected %d\n", + rr, dgs); return NULL; } } @@ -1270,8 +1273,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ kunmap(page); if (rr != len) { drbd_free_ee(mdev, e); - dev_warn(DEV, "short read receiving data: read %d expected %d\n", - rr, len); + if (!signal_pending(current)) + dev_warn(DEV, "short read receiving data: read %d expected %d\n", + rr, len); return NULL; } ds -= rr; @@ -1311,8 +1315,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); if (rr != min_t(int, data_size, PAGE_SIZE)) { rv = 0; - dev_warn(DEV, "short read receiving data: read %d expected %d\n", - rr, min_t(int, data_size, PAGE_SIZE)); + if (!signal_pending(current)) + dev_warn(DEV, + "short read receiving data: read %d expected %d\n", + rr, min_t(int, data_size, PAGE_SIZE)); break; } data_size -= rr; @@ -1337,8 +1343,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, if (dgs) { rr = drbd_recv(mdev, dig_in, dgs); if (rr != dgs) { - dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n", - rr, dgs); + if (!signal_pending(current)) + dev_warn(DEV, + "short read receiving data reply digest: read %d expected %d\n", + rr, dgs); return 0; } } @@ -1359,9 +1367,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, expect); kunmap(bvec->bv_page); if (rr != expect) { - dev_warn(DEV, "short read receiving data reply: " - "read %d expected %d\n", - rr, expect); + if (!signal_pending(current)) + dev_warn(DEV, "short read receiving data reply: " + "read %d expected %d\n", + rr, expect); return 0; } data_size -= rr; @@ -3696,7 +3705,8 @@ static void drbdd(struct drbd_conf *mdev) if (shs) { rv = drbd_recv(mdev, &header->h80.payload, shs); if (unlikely(rv != shs)) { - dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); + if (!signal_pending(current)) + dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); goto err_out; } } @@ -3953,7 +3963,8 @@ static int drbd_do_handshake(struct drbd_conf *mdev) rv = drbd_recv(mdev, &p->head.payload, expect); if (rv != expect) { - dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv); + if (!signal_pending(current)) + dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv); return 0; } @@ -4055,7 +4066,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) rv = drbd_recv(mdev, peers_ch, length); if (rv != length) { - dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); + if (!signal_pending(current)) + dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv); rv = 0; goto fail; } @@ -4102,7 +4114,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) rv = drbd_recv(mdev, response , resp_size); if (rv != resp_size) { - dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv); + if (!signal_pending(current)) + dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv); rv = 0; goto fail; } From e636db5b956950b8b9bfbeb766a637f84bae1e3b Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 21 Jan 2011 17:10:37 +0100 Subject: [PATCH 102/122] drbd: fix potential imbalance of ap_in_flight When we receive a barrier ack, we walk the ring list of drbd requests in the transfer log of the respective epoch, do some housekeeping, and free those objects. We tried to keep epochs of mirrored and unmirrored drbd requests separate, and assert that no local-only requests are present in a barrier_acked epoch. It turns out that this has quite a number of corner cases and would add bloated code without functional benefit. We now revert the (insufficient) commits drbd: Fixed an issue with AHEAD -> SYNC_SOURCE transitions drbd: Ensure that an epoch contains only requests of one kind and instead fix the processing of barrier acks to cope with a mix of local-only and mirrored requests. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 25 ------------------------- drivers/block/drbd/drbd_req.c | 9 +++++---- 2 files changed, 5 insertions(+), 29 deletions(-) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index c3760f33d52c..9043772de400 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -337,23 +337,6 @@ bail: } -/* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach - * those requests from the newsest barrier when changing to an other cstate. - * - * That headless list vanishes when the last request finished its write or - * send out_of_sync packet. */ -static void tl_forget(struct drbd_conf *mdev) -{ - struct drbd_tl_epoch *b; - - if (test_bit(CREATE_BARRIER, &mdev->flags)) - return; - - b = mdev->newest_tle; - list_del(&b->requests); - _tl_add_barrier(mdev, b); -} - /** * _tl_restart() - Walks the transfer log, and applies an action to all requests * @mdev: DRBD device. @@ -1265,14 +1248,6 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) drbd_resume_al(mdev); - /* Start a new epoch in case we start to mirror write requests */ - if (!drbd_should_do_remote(os) && drbd_should_do_remote(ns)) - tl_forget(mdev); - - /* Do not add local-only requests to an epoch with mirrored requests */ - if (drbd_should_do_remote(os) && !drbd_should_do_remote(ns)) - set_bit(CREATE_BARRIER, &mdev->flags); - ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC); if (ascw) { ascw->os = os; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 336937a14d3f..c28be4e5e57c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -712,10 +712,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, dev_err(DEV, "FIXME (barrier_acked but pending)\n"); list_move(&req->tl_requests, &mdev->out_of_sequence_requests); } - D_ASSERT(req->rq_state & RQ_NET_SENT); - req->rq_state |= RQ_NET_DONE; - if (mdev->net_conf->wire_protocol == DRBD_PROT_A) - atomic_sub(req->size>>9, &mdev->ap_in_flight); + if ((req->rq_state & RQ_NET_MASK) != 0) { + req->rq_state |= RQ_NET_DONE; + if (mdev->net_conf->wire_protocol == DRBD_PROT_A) + atomic_sub(req->size>>9, &mdev->ap_in_flight); + } _req_may_be_done(req, m); /* Allowed while state.susp */ break; From 873b0d5f98ab70e4df7a62b2ef0305373f88f330 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Fri, 21 Jan 2011 22:53:48 +0100 Subject: [PATCH 103/122] drbd: serialize admin requests for new verify run with pending bitmap io This is an addendum to drbd: serialize admin requests for new resync with pending bitmap io It avoids a race that could trigger "FIXME" assert log messages. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_nl.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index bc0bcb964603..87aaa7fc4d27 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2106,6 +2106,11 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, reply->ret_code = ERR_MANDATORY_TAG; return 0; } + + /* If there is still bitmap IO pending, e.g. previous resync or verify + * just being finished, wait for it before requesting a new resync. */ + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + /* w_make_ov_request expects position to be aligned */ mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); From 039312b6481e2928f3be19fee94c83327d93e4c7 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Fri, 21 Jan 2011 14:13:22 +0100 Subject: [PATCH 104/122] drbd: Removed left over, now wrong comments Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_req.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index c28be4e5e57c..94fd5a2be559 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -983,13 +983,7 @@ allocate_barrier: } if (congested) { - /* rs_pending_cnt must be zero, otherwise the two peers - might get different bitmaps. With sane configurations - the resync stalls long before we might want to go into - AHEAD mode. - We could force the resync into PAUSE mode here if - rs_pending_cnt is > 0 ... */ - queue_barrier(mdev); + queue_barrier(mdev); /* last barrier, after mirrored writes */ if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); From 10f6d9926cd17afff9dc03c967706419798b4929 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Mon, 24 Jan 2011 14:47:09 +0100 Subject: [PATCH 105/122] drbd: don't BUG_ON, if bio_add_page of a single page to an empty bio fails Just deal with it more gracefully, if we fail to add even a single page to an empty bio. We used to BUG_ON() there, but it has been observed in some Xen deployment, so we need to handle that case more robustly now. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 45 ++++++++++++++++++++---------- drivers/block/drbd/drbd_worker.c | 7 +++-- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index a56b107e01eb..9e9fc3413137 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1073,6 +1073,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) * @mdev: DRBD device. * @e: epoch entry * @rw: flag field, see bio->bi_rw + * + * May spread the pages to multiple bios, + * depending on bio_add_page restrictions. + * + * Returns 0 if all bios have been submitted, + * -ENOMEM if we could not allocate enough bios, + * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a + * single page to an empty bio (which should never happen and likely indicates + * that the lower level IO stack is in some way broken). This has been observed + * on certain Xen deployments. */ /* TODO allocate from our own bio_set. */ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, @@ -1085,6 +1095,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, unsigned ds = e->size; unsigned n_bios = 0; unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; + int err = -ENOMEM; /* In most cases, we will only need one bio. But in case the lower * level restrictions happen to be different at this offset on this @@ -1110,8 +1121,17 @@ next_bio: page_chain_for_each(page) { unsigned len = min_t(unsigned, ds, PAGE_SIZE); if (!bio_add_page(bio, page, len, 0)) { - /* a single page must always be possible! */ - BUG_ON(bio->bi_vcnt == 0); + /* A single page must always be possible! + * But in case it fails anyways, + * we deal with it, and complain (below). */ + if (bio->bi_vcnt == 0) { + dev_err(DEV, + "bio_add_page failed for len=%u, " + "bi_vcnt=0 (bi_sector=%llu)\n", + len, (unsigned long long)bio->bi_sector); + err = -ENOSPC; + goto fail; + } goto next_bio; } ds -= len; @@ -1137,7 +1157,7 @@ fail: bios = bios->bi_next; bio_put(bio); } - return -ENOMEM; + return err; } static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) @@ -1436,9 +1456,8 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) return true; - /* drbd_submit_ee currently fails for one reason only: - * not being able to allocate enough bios. - * Is dropping the connection going to help? */ + /* don't care for the reason here */ + dev_err(DEV, "submit failed, triggering re-connect\n"); spin_lock_irq(&mdev->req_lock); list_del(&e->w.list); spin_unlock_irq(&mdev->req_lock); @@ -1837,9 +1856,8 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) return true; - /* drbd_submit_ee currently fails for one reason only: - * not being able to allocate enough bios. - * Is dropping the connection going to help? */ + /* don't care for the reason here */ + dev_err(DEV, "submit failed, triggering re-connect\n"); spin_lock_irq(&mdev->req_lock); list_del(&e->w.list); hlist_del_init(&e->colision); @@ -1848,9 +1866,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned drbd_al_complete_io(mdev, e->sector); out_interrupted: - /* yes, the epoch_size now is imbalanced. - * but we drop the connection anyways, so we don't have a chance to - * receive a barrier... atomic_inc(&mdev->epoch_size); */ + drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); put_ldev(mdev); drbd_free_ee(mdev, e); return false; @@ -2096,9 +2112,8 @@ submit: if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) return true; - /* drbd_submit_ee currently fails for one reason only: - * not being able to allocate enough bios. - * Is dropping the connection going to help? */ + /* don't care for the reason here */ + dev_err(DEV, "submit failed, triggering re-connect\n"); spin_lock_irq(&mdev->req_lock); list_del(&e->w.list); spin_unlock_irq(&mdev->req_lock); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index ff0eb308ee4a..cfd324b9f95b 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -369,9 +369,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) return 0; - /* drbd_submit_ee currently fails for one reason only: - * not being able to allocate enough bios. - * Is dropping the connection going to help? */ + /* If it failed because of ENOMEM, retry should help. If it failed + * because bio_add_page failed (probably broken lower level driver), + * retry may or may not help. + * If it does not, you may need to force disconnect. */ spin_lock_irq(&mdev->req_lock); list_del(&e->w.list); spin_unlock_irq(&mdev->req_lock); From e99dc367b3aafb1ce2d5d92e94834d07b299e1d7 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Mon, 24 Jan 2011 14:58:39 +0100 Subject: [PATCH 106/122] drbd: kill dead code This code became obsolete and unused last December with drbd: bitmap keep track of changes vs on-disk bitmap Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 93 -------------------------------- 1 file changed, 93 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 090fc2ce0df4..a6050791401b 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -569,99 +569,6 @@ static void atodb_endio(struct bio *bio, int error) put_ldev(mdev); } -/* sector to word */ -#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) - -/* activity log to on disk bitmap -- prepare bio unless that sector - * is already covered by previously prepared bios */ -static int atodb_prepare_unless_covered(struct drbd_conf *mdev, - struct bio **bios, - unsigned int enr, - struct drbd_atodb_wait *wc) __must_hold(local) -{ - struct bio *bio; - struct page *page; - sector_t on_disk_sector; - unsigned int page_offset = PAGE_SIZE; - int offset; - int i = 0; - int err = -ENOMEM; - - /* We always write aligned, full 4k blocks, - * so we can ignore the logical_block_size (for now) */ - enr &= ~7U; - on_disk_sector = enr + mdev->ldev->md.md_offset - + mdev->ldev->md.bm_offset; - - D_ASSERT(!(on_disk_sector & 7U)); - - /* Check if that enr is already covered by an already created bio. - * Caution, bios[] is not NULL terminated, - * but only initialized to all NULL. - * For completely scattered activity log, - * the last invocation iterates over all bios, - * and finds the last NULL entry. - */ - while ((bio = bios[i])) { - if (bio->bi_sector == on_disk_sector) - return 0; - i++; - } - /* bios[i] == NULL, the next not yet used slot */ - - /* GFP_KERNEL, we are not in the write-out path */ - bio = bio_alloc(GFP_KERNEL, 1); - if (bio == NULL) - return -ENOMEM; - - if (i > 0) { - const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; - page_offset = prev_bv->bv_offset + prev_bv->bv_len; - page = prev_bv->bv_page; - } - if (page_offset == PAGE_SIZE) { - page = alloc_page(__GFP_HIGHMEM); - if (page == NULL) - goto out_bio_put; - page_offset = 0; - } else { - get_page(page); - } - - offset = S2W(enr); - drbd_bm_get_lel(mdev, offset, - min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), - kmap(page) + page_offset); - kunmap(page); - - bio->bi_private = wc; - bio->bi_end_io = atodb_endio; - bio->bi_bdev = mdev->ldev->md_bdev; - bio->bi_sector = on_disk_sector; - - if (bio_add_page(bio, page, 4096, page_offset) != 4096) - goto out_put_page; - - atomic_inc(&wc->count); - /* we already know that we may do this... - * get_ldev_if_state(mdev,D_ATTACHING); - * just get the extra reference, so that the local_cnt reflects - * the number of pending IO requests DRBD at its backing device. - */ - atomic_inc(&mdev->local_cnt); - - bios[i] = bio; - - return 0; - -out_put_page: - err = -EINVAL; - put_page(page); -out_bio_put: - bio_put(bio); - return err; -} - /** * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents * @mdev: DRBD device. From fdda6544ad4d3284246e717b7108f7f497b45295 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Mon, 24 Jan 2011 15:11:01 +0100 Subject: [PATCH 107/122] drbd: improve log message if received sector offset exceeds local capacity Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_receiver.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 9e9fc3413137..3d62ac7cdc4a 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1267,7 +1267,8 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ /* even though we trust out peer, * we sometimes have to double check. */ if (sector + (data_size>>9) > capacity) { - dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", + dev_err(DEV, "request from peer beyond end of local disk: " + "capacity: %llus < sector: %llus + size: %u\n", (unsigned long long)capacity, (unsigned long long)sector, data_size); return NULL; From c5a91619793d444e5103ec5841045bf878718398 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Tue, 25 Jan 2011 17:33:38 +0100 Subject: [PATCH 108/122] drbd: Remove unused function atodb_endio() Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_actlog.c | 27 --------------------------- drivers/block/drbd/drbd_worker.c | 15 ++++++--------- include/linux/drbd.h | 2 +- 3 files changed, 7 insertions(+), 37 deletions(-) diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index a6050791401b..2a1642bc451d 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -542,33 +542,6 @@ cancel: return 1; } -static void atodb_endio(struct bio *bio, int error) -{ - struct drbd_atodb_wait *wc = bio->bi_private; - struct drbd_conf *mdev = wc->mdev; - struct page *page; - int uptodate = bio_flagged(bio, BIO_UPTODATE); - - /* strange behavior of some lower level drivers... - * fail the request by clearing the uptodate flag, - * but do not return any error?! */ - if (!error && !uptodate) - error = -EIO; - - drbd_chk_io_error(mdev, error, true); - if (error && wc->error == 0) - wc->error = error; - - if (atomic_dec_and_test(&wc->count)) - complete(&wc->io_done); - - page = bio->bi_io_vec[0].bv_page; - put_page(page); - bio_put(bio); - mdev->bm_writ_cnt++; - put_ldev(mdev); -} - /** * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents * @mdev: DRBD device. diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index cfd324b9f95b..3d70d8d015d9 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -44,15 +44,12 @@ static int w_make_resync_request(struct drbd_conf *mdev, -/* defined here: - drbd_md_io_complete - drbd_endio_sec - drbd_endio_pri - - * more endio handlers: - atodb_endio in drbd_actlog.c - drbd_bm_async_io_complete in drbd_bitmap.c - +/* endio handlers: + * drbd_md_io_complete (defined here) + * drbd_endio_pri (defined here) + * drbd_endio_sec (defined here) + * bm_async_io_complete (defined in drbd_bitmap.c) + * * For all these callbacks, note the following: * The callbacks will be called in irq context by the IDE drivers, * and in Softirqs/Tasklets/BH context by the SCSI drivers. diff --git a/include/linux/drbd.h b/include/linux/drbd.h index ba5c785d3f7d..d18d673ebc78 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -53,7 +53,7 @@ extern const char *drbd_buildtag(void); -#define REL_VERSION "8.3.9" +#define REL_VERSION "8.3.10" #define API_VERSION 88 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 96 From 7fde2be93080c028c20078a2d6abec8a95891192 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 1 Mar 2011 11:08:28 +0100 Subject: [PATCH 109/122] drbd: Implemented real timeout checking for request processing time Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_int.h | 1 + drivers/block/drbd/drbd_main.c | 3 +++ drivers/block/drbd/drbd_receiver.c | 3 +++ drivers/block/drbd/drbd_req.c | 39 ++++++++++++++++++++++++++++++ drivers/block/drbd/drbd_req.h | 1 + 5 files changed, 47 insertions(+) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 267d9897ca8c..81030d8d654b 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -990,6 +990,7 @@ struct drbd_conf { struct timer_list resync_timer; struct timer_list md_sync_timer; struct timer_list start_resync_timer; + struct timer_list request_timer; #ifdef DRBD_DEBUG_MD_SYNC struct { unsigned int line; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9043772de400..dfc85f32d317 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3017,12 +3017,15 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) init_timer(&mdev->resync_timer); init_timer(&mdev->md_sync_timer); init_timer(&mdev->start_resync_timer); + init_timer(&mdev->request_timer); mdev->resync_timer.function = resync_timer_fn; mdev->resync_timer.data = (unsigned long) mdev; mdev->md_sync_timer.function = md_sync_timer_fn; mdev->md_sync_timer.data = (unsigned long) mdev; mdev->start_resync_timer.function = start_resync_timer_fn; mdev->start_resync_timer.data = (unsigned long) mdev; + mdev->request_timer.function = request_timer_fn; + mdev->request_timer.data = (unsigned long) mdev; init_waitqueue_head(&mdev->misc_wait); init_waitqueue_head(&mdev->state_wait); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 3d62ac7cdc4a..fe1564c7d8b6 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -912,6 +912,7 @@ retry: drbd_send_state(mdev); clear_bit(USE_DEGR_WFC_T, &mdev->flags); clear_bit(RESIZE_PENDING, &mdev->flags); + mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ return 1; @@ -3822,6 +3823,8 @@ static void drbd_disconnect(struct drbd_conf *mdev) atomic_set(&mdev->rs_pending_cnt, 0); wake_up(&mdev->misc_wait); + del_timer(&mdev->request_timer); + /* make sure syncer is stopped and w_resume_next_sg queued */ del_timer_sync(&mdev->resync_timer); resync_timer_fn((unsigned long)mdev); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 94fd5a2be559..c2cc28a55907 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1194,3 +1194,42 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct } return limit; } + +void request_timer_fn(unsigned long data) +{ + struct drbd_conf *mdev = (struct drbd_conf *) data; + struct drbd_request *req; /* oldest request */ + struct list_head *le; + unsigned long et = 0; /* effective timeout = ko_count * timeout */ + + if (get_net_conf(mdev)) { + et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count; + put_net_conf(mdev); + } + if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) + return; /* Recurring timer stopped */ + + spin_lock_irq(&mdev->req_lock); + le = &mdev->oldest_tle->requests; + if (list_empty(le)) { + spin_unlock_irq(&mdev->req_lock); + mod_timer(&mdev->request_timer, jiffies + et); + return; + } + + le = le->prev; + req = list_entry(le, struct drbd_request, tl_requests); + if (time_is_before_eq_jiffies(req->start_time + et)) { + if (req->rq_state & RQ_NET_PENDING) { + dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); + _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL); + } else { + dev_warn(DEV, "Local backing block device frozen?\n"); + mod_timer(&mdev->request_timer, jiffies + et); + } + } else { + mod_timer(&mdev->request_timer, req->start_time + et); + } + + spin_unlock_irq(&mdev->req_lock); +} diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 32c1f2a31266..32e2c3e6a813 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -322,6 +322,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m); extern void complete_master_bio(struct drbd_conf *mdev, struct bio_and_error *m); +extern void request_timer_fn(unsigned long data); /* use this if you don't want to deal with calling complete_master_bio() * outside the spinlock, e.g. when walking some list on cleanup. */ From 8f21420ebd5ca5a751e2f606b49b0acd2a2af314 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Tue, 1 Mar 2011 15:52:35 +0100 Subject: [PATCH 110/122] drbd: Fixed handling of read errors on a 'VerifyT' node Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 3d70d8d015d9..7db29080f363 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1081,25 +1081,27 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) if (unlikely(cancel)) goto out; - if (unlikely((e->flags & EE_WAS_ERROR) != 0)) - goto out; - digest_size = crypto_hash_digestsize(mdev->verify_tfm); - /* FIXME if this allocation fails, online verify will not terminate! */ digest = kmalloc(digest_size, GFP_NOIO); - if (digest) { - drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); - inc_rs_pending(mdev); - ok = drbd_send_drequest_csum(mdev, e->sector, e->size, - digest, digest_size, P_OV_REPLY); - if (!ok) - dec_rs_pending(mdev); - kfree(digest); + if (!digest) { + ok = 0; /* terminate the connection in case the allocation failed */ + goto out; } + if (likely(!(e->flags & EE_WAS_ERROR))) + drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); + else + memset(digest, 0, digest_size); + + inc_rs_pending(mdev); + ok = drbd_send_drequest_csum(mdev, e->sector, e->size, + digest, digest_size, P_OV_REPLY); + if (!ok) + dec_rs_pending(mdev); + kfree(digest); + out: drbd_free_ee(mdev, e); - dec_unacked(mdev); return ok; From 7961243b7bdd62d72b47eb2c0bee776c51a8a8e2 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 2 Mar 2011 23:14:44 +0100 Subject: [PATCH 111/122] drbd: Fixed handling of read errors on a 'VerifyS' node Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_worker.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 7db29080f363..f7e6c92f8d03 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1151,10 +1151,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) eq = !memcmp(digest, di->digest, digest_size); kfree(digest); } - } else { - ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); - if (__ratelimit(&drbd_ratelimit_state)) - dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); } dec_unacked(mdev); From 03567812d81dd87a810b2bd1e804f4001de03da5 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 13 Jan 2011 10:43:40 +0100 Subject: [PATCH 112/122] drbd: drop code present under #ifdef which is relevant to 2.6.28 and below Signed-off-by: Or Gerlitz Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_req.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index c2cc28a55907..5c0c8be1bb0a 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1127,11 +1127,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) const int sps = 1 << HT_SHIFT; /* sectors per slot */ const int mask = sps - 1; const sector_t first_sectors = sps - (sect & mask); - bp = bio_split(bio, -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) - bio_split_pool, -#endif - first_sectors); + bp = bio_split(bio, first_sectors); /* we need to get a "reference count" (ap_bio_cnt) * to avoid races with the disconnect/reconnect/suspend code. From 957c2ec558caff09a3bdf333871fc617830f063d Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Fri, 11 Mar 2011 20:06:09 +0100 Subject: [PATCH 113/122] cciss: export resettable host attribute This attribute, requested by Redhat, allows kexec-tools to know whether the controller can honor the reset_devices kernel parameter and actually reset the controller. For kdump to work properly it is necessary that the reset_devices parameter be honored. This attribute enables kexec-tools to warn the user if they attempt to designate a non-resettable controller as the dump device. Signed-off-by: Stephen M. Cameron Signed-off-by: Jens Axboe --- .../ABI/testing/sysfs-bus-pci-devices-cciss | 12 ++++++ drivers/block/cciss.c | 39 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss index 4f29e5f1ebfa..f5bb0a3bb8c0 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss +++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss @@ -59,3 +59,15 @@ Kernel Version: 2.6.31 Contact: iss_storagedev@hp.com Description: Displays the usage count (number of opens) of logical drive Y of controller X. + +Where: /sys/bus/pci/devices//ccissX/resettable +Date: February 2011 +Kernel Version: 2.6.38 +Contact: iss_storagedev@hp.com +Description: Value of 1 indicates the controller can honor the reset_devices + kernel parameter. Value of 0 indicates reset_devices cannot be + honored. This is to allow, for example, kexec tools to be able + to warn the user if they designate an unresettable device as + a dump device, as kdump requires resetting the device in order + to work reliably. + diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 35658f445fca..eeed7aeb0b83 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -556,6 +556,44 @@ static void __devinit cciss_procinit(ctlr_info_t *h) #define to_hba(n) container_of(n, struct ctlr_info, dev) #define to_drv(n) container_of(n, drive_info_struct, dev) +/* List of controllers which cannot be reset on kexec with reset_devices */ +static u32 unresettable_controller[] = { + 0x324a103C, /* Smart Array P712m */ + 0x324b103C, /* SmartArray P711m */ + 0x3223103C, /* Smart Array P800 */ + 0x3234103C, /* Smart Array P400 */ + 0x3235103C, /* Smart Array P400i */ + 0x3211103C, /* Smart Array E200i */ + 0x3212103C, /* Smart Array E200 */ + 0x3213103C, /* Smart Array E200i */ + 0x3214103C, /* Smart Array E200i */ + 0x3215103C, /* Smart Array E200i */ + 0x3237103C, /* Smart Array E500 */ + 0x323D103C, /* Smart Array P700m */ + 0x409C0E11, /* Smart Array 6400 */ + 0x409D0E11, /* Smart Array 6400 EM */ +}; + +static int ctlr_is_resettable(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) + if (unresettable_controller[i] == h->board_id) + return 0; + return 1; +} + +static ssize_t host_show_resettable(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ctlr_info *h = to_hba(dev); + + return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); +} +static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); + static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -741,6 +779,7 @@ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); static struct attribute *cciss_host_attrs[] = { &dev_attr_rescan.attr, + &dev_attr_resettable.attr, NULL }; From 978eb516a4e1a1b47163518d6f5d5e81ab27a583 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Fri, 11 Mar 2011 20:07:38 +0100 Subject: [PATCH 114/122] cciss: Add missing allocation in scsi_cmd_stack_setup and corresponding deallocation This bit got lost somewhere along the way. Without this, panic. Signed-off-by: Stephen M. Cameron Cc: stable@kernel.org Signed-off-by: Jens Axboe --- drivers/block/cciss_scsi.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 727d0225b7d0..3bfbde8b4013 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -226,6 +226,13 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) return -ENOMEM; } + stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL); + if (!stk->elem) { + pci_free_consistent(h->pdev, size, stk->pool, + stk->cmd_pool_handle); + return -1; + } + for (i=0; ielem[i] = &stk->pool[i]; stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + @@ -255,6 +262,8 @@ scsi_cmd_stack_free(ctlr_info_t *h) pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); stk->pool = NULL; cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); + kfree(stk->elem); + stk->elem = NULL; } #if 0 From 4a765046553a88e4ec80ad84d2131b9e69ab4ab0 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Sat, 12 Mar 2011 10:02:11 +0100 Subject: [PATCH 115/122] cciss: hoist tag masking out of loop In process_nonindexed_cmd, hoist figuring of masked tag out of loop since it is the same throughout. Signed-off-by: Stephen M. Cameron Signed-off-by: Jens Axboe --- drivers/block/cciss.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index eeed7aeb0b83..0a296c1eff4f 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -3433,14 +3433,12 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) /* process completion of a non-indexed command */ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) { - u32 tag; CommandList_struct *c = NULL; __u32 busaddr_masked, tag_masked; - tag = cciss_tag_discard_error_bits(raw_tag); + tag_masked = cciss_tag_discard_error_bits(raw_tag); list_for_each_entry(c, &h->cmpQ, list) { busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); - tag_masked = cciss_tag_discard_error_bits(tag); if (busaddr_masked == tag_masked) { finish_cmd(h, c, raw_tag); return next_command(h); From 0498cc2a9e81de97674adde8ced8a1462a397013 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Sat, 12 Mar 2011 10:02:16 +0100 Subject: [PATCH 116/122] cciss: Inform controller we are using 32-bit tags. Controller will DMA only 32-bits of the tag per command on completion if it knows we are only using 32-bit tags. Signed-off-by: Stephen M. Cameron Signed-off-by: Jens Axboe --- drivers/block/cciss.c | 25 +++++++++++++++---------- drivers/block/cciss_cmd.h | 1 + 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 0a296c1eff4f..27b04a32b0c5 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -231,7 +231,7 @@ static const struct block_device_operations cciss_fops = { */ static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) { - if (likely(h->transMethod == CFGTBL_Trans_Performant)) + if (likely(h->transMethod & CFGTBL_Trans_Performant)) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); } @@ -3175,10 +3175,13 @@ static inline u32 cciss_tag_to_index(u32 tag) return tag >> DIRECT_LOOKUP_SHIFT; } -static inline u32 cciss_tag_discard_error_bits(u32 tag) +static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) { -#define CCISS_ERROR_BITS 0x03 - return tag & ~CCISS_ERROR_BITS; +#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) +#define CCISS_SIMPLE_ERROR_BITS 0x03 + if (likely(h->transMethod & CFGTBL_Trans_Performant)) + return tag & ~CCISS_PERF_ERROR_BITS; + return tag & ~CCISS_SIMPLE_ERROR_BITS; } static inline void cciss_mark_tag_indexed(u32 *tag) @@ -3398,7 +3401,7 @@ static inline u32 next_command(ctlr_info_t *h) { u32 a; - if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h); if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { @@ -3436,9 +3439,9 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) CommandList_struct *c = NULL; __u32 busaddr_masked, tag_masked; - tag_masked = cciss_tag_discard_error_bits(raw_tag); + tag_masked = cciss_tag_discard_error_bits(h, raw_tag); list_for_each_entry(c, &h->cmpQ, list) { - busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); + busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); if (busaddr_masked == tag_masked) { finish_cmd(h, c, raw_tag); return next_command(h); @@ -3790,7 +3793,8 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) } } -static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) +static __devinit void cciss_enter_performant_mode(ctlr_info_t *h, + u32 use_short_tags) { /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different @@ -3845,7 +3849,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) writel(0, &h->transtable->RepQCtrAddrHigh32); writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); writel(0, &h->transtable->RepQAddr0High32); - writel(CFGTBL_Trans_Performant, + writel(CFGTBL_Trans_Performant | use_short_tags, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); @@ -3892,7 +3896,8 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) goto clean_up; - cciss_enter_performant_mode(h); + cciss_enter_performant_mode(h, + trans_support & CFGTBL_Trans_use_short_tags); /* Change the access methods to the performant access methods */ h->access = SA5_performant_access; diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index 35463d2f0ee7..cd441bef031f 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h @@ -56,6 +56,7 @@ #define CFGTBL_Trans_Simple 0x00000002l #define CFGTBL_Trans_Performant 0x00000004l +#define CFGTBL_Trans_use_short_tags 0x20000000l #define CFGTBL_BusType_Ultra2 0x00000001l #define CFGTBL_BusType_Ultra3 0x00000002l From 16011131ced8bdb317e1bf03324ca78a27fa0a1c Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Sat, 12 Mar 2011 10:02:21 +0100 Subject: [PATCH 117/122] cciss: Mask off error bits of c->busaddr in cmd_special_free when calling pci_free_consistent Signed-off-by: Stephen M. Cameron Signed-off-by: Jens Axboe --- drivers/block/cciss.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 27b04a32b0c5..f21116cb8b82 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -193,7 +193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, u64 *cfg_offset); static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); - +static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, @@ -1012,8 +1012,8 @@ static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) temp64.val32.upper = c->ErrDesc.Addr.upper; pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), c->err_info, (dma_addr_t) temp64.val); - pci_free_consistent(h->pdev, sizeof(CommandList_struct), - c, (dma_addr_t) c->busaddr); + pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, + (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); } static inline ctlr_info_t *get_host(struct gendisk *disk) From fcab1c112ade881d884cd7b8161f7543194d12e1 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Sat, 12 Mar 2011 10:02:24 +0100 Subject: [PATCH 118/122] cciss: remove unnecessary casts Signed-off-by: Stephen M. Cameron Signed-off-by: Jens Axboe --- drivers/block/cciss.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index f21116cb8b82..25267e92ce31 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1529,8 +1529,7 @@ static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - ioc = (BIG_IOCTL_Command_struct *) - kmalloc(sizeof(*ioc), GFP_KERNEL); + ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) { status = -ENOMEM; goto cleanup1; From 6d9a4f9e21486fa83526a9a9fdf88b9b2cdfd299 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Sat, 12 Mar 2011 10:02:30 +0100 Subject: [PATCH 119/122] cciss: fix missed command status value CMD_UNABORTABLE and fix a nearby typo, "do" that should have been "due" Signed-off-by: Stephen M. Cameron Signed-off-by: Jens Axboe --- drivers/block/cciss.c | 11 +++++++++++ drivers/block/cciss_scsi.c | 13 +++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 25267e92ce31..9bf13988f1a2 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -2691,6 +2691,10 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) c->Request.CDB[0]); return_status = IO_NEEDS_RETRY; break; + case CMD_UNABORTABLE: + dev_warn(&h->pdev->dev, "cmd unabortable\n"); + return_status = IO_ERROR; + break; default: dev_warn(&h->pdev->dev, "cmd 0x%02x returned " "unknown status %x\n", c->Request.CDB[0], @@ -3141,6 +3145,13 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; + case CMD_UNABORTABLE: + dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? + DID_PASSTHROUGH : DID_ERROR); + break; default: dev_warn(&h->pdev->dev, "cmd %p returned " "unknown status %x\n", cmd, diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 3bfbde8b4013..8fa2a2e90b9f 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -833,13 +833,18 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, break; case CMD_UNSOLICITED_ABORT: cmd->result = DID_ABORT << 16; - dev_warn(&h->pdev->dev, "%p aborted do to an " + dev_warn(&h->pdev->dev, "%p aborted due to an " "unsolicited abort\n", c); break; case CMD_TIMEOUT: cmd->result = DID_TIME_OUT << 16; dev_warn(&h->pdev->dev, "%p timedout\n", c); break; + case CMD_UNABORTABLE: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "c %p command " + "unabortable\n", c); + break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, @@ -1016,11 +1021,15 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) break; case CMD_UNSOLICITED_ABORT: dev_warn(&h->pdev->dev, - "%p aborted do to an unsolicited abort\n", c); + "%p aborted due to an unsolicited abort\n", c); break; case CMD_TIMEOUT: dev_warn(&h->pdev->dev, "%p timedout\n", c); break; + case CMD_UNABORTABLE: + dev_warn(&h->pdev->dev, + "%p unabortable\n", c); + break; default: dev_warn(&h->pdev->dev, "%p returned unknown status %x\n", From b66538014f7eae121afa99ca18488fd67536a1bf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 12 Mar 2011 13:47:51 +0100 Subject: [PATCH 120/122] Revert "cciss: Add missing allocation in scsi_cmd_stack_setup and corresponding deallocation" This reverts commit 978eb516a4e1a1b47163518d6f5d5e81ab27a583. The commit was broken, relying on other changes that have not been committed yet. Signed-off-by: Jens Axboe --- drivers/block/cciss_scsi.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 8fa2a2e90b9f..df793803f5ae 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -226,13 +226,6 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) return -ENOMEM; } - stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL); - if (!stk->elem) { - pci_free_consistent(h->pdev, size, stk->pool, - stk->cmd_pool_handle); - return -1; - } - for (i=0; ielem[i] = &stk->pool[i]; stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + @@ -262,8 +255,6 @@ scsi_cmd_stack_free(ctlr_info_t *h) pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); stk->pool = NULL; cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); - kfree(stk->elem); - stk->elem = NULL; } #if 0 From f0ff1357ce391265edbf844792da7da9a694f4bd Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 17 Mar 2011 15:02:51 +0100 Subject: [PATCH 121/122] drbd: need include for bitops functions declarations Signed-off-by: Stephen Rothwell Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_bitmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index b62dd5f26c5d..f0ae63d2df65 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -28,6 +28,9 @@ #include #include #include + +#include + #include "drbd_int.h" From 1ddd5049545e0aa1a0ed19bca4d9c9c3ce1ac8a2 Mon Sep 17 00:00:00 2001 From: Bud Brown Date: Wed, 23 Mar 2011 20:47:11 +0100 Subject: [PATCH 122/122] cciss: fix lost command issue Under certain workloads a command may seem to get lost. IOW, the Smart Array thinks all commands have been completed but we still have commands in our completion queue. This may lead to system instability, filesystems going read-only, or even panics depending on the affected filesystem. We add an extra read to force the write to complete. Testing shows this extra read avoids the problem. Signed-off-by: Mike Miller Cc: stable@kernel.org Signed-off-by: Jens Axboe --- drivers/block/cciss.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 579f74918493..554bbd907d14 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) h->ctlr, c->busaddr); #endif /* CCISS_DEBUG */ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); h->commands_outstanding++; if ( h->commands_outstanding > h->max_outstanding) h->max_outstanding = h->commands_outstanding;