md/raid5: schedule_construction should abort if nothing to do.
Since commit 1ed850f356
md/raid5: make sure to_read and to_write never go negative.
It has been possible for handle_stripe_dirtying to be called
when there isn't actually any work to do.
It then calls schedule_reconstruction() which will set R5_LOCKED
on the parity block(s) even when nothing else is happening.
This then causes problems in do_release_stripe().
So add checks to schedule_reconstruction() so that if it doesn't
find anything to do, it just aborts.
This bug was introduced in v3.7, so the patch is suitable
for -stable kernels since then.
Cc: stable@vger.kernel.org (v3.7+)
Reported-by: majianpeng <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
f3378b4870
commit
ce7d363aaf
|
@ -2283,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||||
int level = conf->level;
|
int level = conf->level;
|
||||||
|
|
||||||
if (rcw) {
|
if (rcw) {
|
||||||
/* if we are not expanding this is a proper write request, and
|
|
||||||
* there will be bios with new data to be drained into the
|
|
||||||
* stripe cache
|
|
||||||
*/
|
|
||||||
if (!expand) {
|
|
||||||
sh->reconstruct_state = reconstruct_state_drain_run;
|
|
||||||
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
|
||||||
} else
|
|
||||||
sh->reconstruct_state = reconstruct_state_run;
|
|
||||||
|
|
||||||
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
|
||||||
|
|
||||||
for (i = disks; i--; ) {
|
for (i = disks; i--; ) {
|
||||||
struct r5dev *dev = &sh->dev[i];
|
struct r5dev *dev = &sh->dev[i];
|
||||||
|
@ -2306,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||||
s->locked++;
|
s->locked++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* if we are not expanding this is a proper write request, and
|
||||||
|
* there will be bios with new data to be drained into the
|
||||||
|
* stripe cache
|
||||||
|
*/
|
||||||
|
if (!expand) {
|
||||||
|
if (!s->locked)
|
||||||
|
/* False alarm, nothing to do */
|
||||||
|
return;
|
||||||
|
sh->reconstruct_state = reconstruct_state_drain_run;
|
||||||
|
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
||||||
|
} else
|
||||||
|
sh->reconstruct_state = reconstruct_state_run;
|
||||||
|
|
||||||
|
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
||||||
|
|
||||||
if (s->locked + conf->max_degraded == disks)
|
if (s->locked + conf->max_degraded == disks)
|
||||||
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
|
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
|
||||||
atomic_inc(&conf->pending_full_writes);
|
atomic_inc(&conf->pending_full_writes);
|
||||||
|
@ -2314,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||||
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
|
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
|
||||||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
|
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
|
||||||
|
|
||||||
sh->reconstruct_state = reconstruct_state_prexor_drain_run;
|
|
||||||
set_bit(STRIPE_OP_PREXOR, &s->ops_request);
|
|
||||||
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
|
||||||
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
|
||||||
|
|
||||||
for (i = disks; i--; ) {
|
for (i = disks; i--; ) {
|
||||||
struct r5dev *dev = &sh->dev[i];
|
struct r5dev *dev = &sh->dev[i];
|
||||||
if (i == pd_idx)
|
if (i == pd_idx)
|
||||||
|
@ -2333,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||||
s->locked++;
|
s->locked++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!s->locked)
|
||||||
|
/* False alarm - nothing to do */
|
||||||
|
return;
|
||||||
|
sh->reconstruct_state = reconstruct_state_prexor_drain_run;
|
||||||
|
set_bit(STRIPE_OP_PREXOR, &s->ops_request);
|
||||||
|
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
||||||
|
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* keep the parity disk(s) locked while asynchronous operations
|
/* keep the parity disk(s) locked while asynchronous operations
|
||||||
|
|
Loading…
Reference in New Issue