md bugfixes for 3.17

- raid6 data corruption during recovery
  - raid6 livelock
  - raid10 memory leaks.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIVAwUAU/L7ITnsnt1WYoG5AQLO7Q/+K5r4cJAqs2NwCKrzkMbRvnMnjEi/SnFk
 aCyVs4v52Q3LhR8Vjj3sFqDsDcNp4OWdJcU6E23s2yPwODboF4PS3oh31yHZ0LJx
 r3pkWm6aNm+6VS+oNCfmilnmUfV615hzDdgL8km0LId3/RZzFf9hfrTZT1SSVzIK
 b8WZP+R8+OhofRp8Qt+98bz8C9M+GAAnLI1ku2sstoCvhExbytTXsr/B/n3pN/W5
 lqLvMsFdydvtmQ66Ak28bFPZ7j2snw+pg5avKAxF5Nly8iNzmTSRWsLEgc9d7X52
 9WKMlZcum8OamioM0qceggUMD58HsR3shNkdLUZNDcImKzSd2dGUsYOSqNGqGRqC
 WDuz8SCQLvHULfJFIpDHkTVgYUnnCUzeDTy9LJlDJ3MI9Eln7eDYsx/klhA5cYNO
 lPUMYQdNqBZASNxrlhg4i5rjrwZXZT4BLPwi794lgY31fpDnyGriEpjKfUCYUznG
 KbJZNoZTayTLUYVrTV4WWPsiEbbjQcsdTK2Ez50Fiv3bG5OU04czlEs+9ZQb17Lk
 HwgVzwSlGhLYv+NIngQI1C8Ga0DD0CViKyouypS3EStR7TKv3iWRRaxdKJhwsoFc
 mjet/cyL8LlCx8Os7fJL12SQYe/UOCejib0nDvek+qs0D8yrjiwONov+YbHG39Yw
 /R3m50gHbcU=
 =Pi8y
 -----END PGP SIGNATURE-----

Merge tag 'md/3.17-fixes' of git://neil.brown.name/md

Pull md bugfixes from Neil Brown:
 "Here are the bug-fixes I promised :-)

  Funny how you start looking for one and other start appearing.

   - raid6 data corruption during recovery
   - raid6 livelock
   - raid10 memory leaks"

* tag 'md/3.17-fixes' of git://neil.brown.name/md:
  md/raid10: always initialise ->state on newly allocated r10_bio
  md/raid10: avoid memory leak on error path during reshape.
  md/raid10: Fix memory leak when raid10 reshape completes.
  md/raid10: fix memory leak when reshaping a RAID10.
  md/raid6: avoid data corruption during recovery of double-degraded RAID6
  md/raid5: avoid livelock caused by non-aligned writes.
This commit is contained in:
Linus Torvalds 2014-08-19 09:47:01 -05:00
commit 63d871cb0b
2 changed files with 9 additions and 2 deletions

View File

@ -2953,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
*/ */
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
end_reshape(conf); end_reshape(conf);
close_sync(conf);
return 0; return 0;
} }
@ -3081,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
} }
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->state = 0;
raise_barrier(conf, rb2 != NULL); raise_barrier(conf, rb2 != NULL);
atomic_set(&r10_bio->remaining, 0); atomic_set(&r10_bio->remaining, 0);
@ -3269,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (sync_blocks < max_sync) if (sync_blocks < max_sync)
max_sync = sync_blocks; max_sync = sync_blocks;
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->state = 0;
r10_bio->mddev = mddev; r10_bio->mddev = mddev;
atomic_set(&r10_bio->remaining, 0); atomic_set(&r10_bio->remaining, 0);
@ -4384,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
read_more: read_more:
/* Now schedule reads for blocks from sector_nr to last */ /* Now schedule reads for blocks from sector_nr to last */
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->state = 0;
raise_barrier(conf, sectors_done != 0); raise_barrier(conf, sectors_done != 0);
atomic_set(&r10_bio->remaining, 0); atomic_set(&r10_bio->remaining, 0);
r10_bio->mddev = mddev; r10_bio->mddev = mddev;
@ -4398,6 +4402,7 @@ read_more:
* on all the target devices. * on all the target devices.
*/ */
// FIXME // FIXME
mempool_free(r10_bio, conf->r10buf_pool);
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return sectors_done; return sectors_done;
} }
@ -4410,7 +4415,7 @@ read_more:
read_bio->bi_private = r10_bio; read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read; read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ; read_bio->bi_rw = READ;
read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0; read_bio->bi_vcnt = 0;
read_bio->bi_iter.bi_size = 0; read_bio->bi_iter.bi_size = 0;

View File

@ -2922,7 +2922,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
(!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
!test_bit(R5_OVERWRITE, &fdev[0]->flags)) || !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
(sh->raid_conf->level == 6 && s->failed && s->to_write && (sh->raid_conf->level == 6 && s->failed && s->to_write &&
s->to_write < sh->raid_conf->raid_disks - 2 && s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 &&
(!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
/* we would like to get this block, possibly by computing it, /* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync * otherwise read it if the backing disk is insync
@ -3817,6 +3817,8 @@ static void handle_stripe(struct stripe_head *sh)
set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_Wantwrite, &dev->flags);
if (prexor) if (prexor)
continue; continue;
if (s.failed > 1)
continue;
if (!test_bit(R5_Insync, &dev->flags) || if (!test_bit(R5_Insync, &dev->flags) ||
((i == sh->pd_idx || i == sh->qd_idx) && ((i == sh->pd_idx || i == sh->qd_idx) &&
s.failed == 0)) s.failed == 0))