Assorted fixes for md in 4.3-rc

Two tagged for -stable
 One is really a cleanup to match and improve kmemcache interface.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJWDjJNAAoJEDnsnt1WYoG5bOkP/ioJ8DZWkobOWSpnjbNCKIyg
 xrX3FlTq8MJHPfeqGDzfznjYTZ7vb9ZYkZNkn1HUIOXKCkG0hqr1GL1eVZmKAbgZ
 B3nuyIuArZe+IXQ5mMoMXn5qpp7/2mO/JPaqBBrUmxHMx+c+Xx0LC0QUdL7GXzY5
 oQ8SahoLrl7Xl4/i9dSuhVD9rDhzuC7ZmykLkYrtquxFC69tH4PRUWak0RXXvHsE
 mzADdqCwATLUu2FvEudoaCecXHxRmcn47CuALcqdaZF+VVPe8WsjIySmeVDRCixZ
 k9njCdNiqtoKzb87MJECclYbCdHUVcKMNqaOoBkLaZnJumNFABwrPP3LnMtdaNpy
 TrjYh3x5/xrdOgmWBML2gK/suEtaN2hgT6KyI38rAwlYQlEppxd94ZbIH0Q0wY+L
 Unhcn28h56janKYVzyumA0Z5p6fbpxkI2OLEws4HzSqq6Ajpuc7yxDSCbUmE2vXL
 WIoVAgH6PEr5sUCMH7xxqWejoXDi1KinPPVELKuMTWCiwRFr3CnZZzPXGJX5DXSG
 nS9HCR35WmXuQx9pqC4/YOk7HBmllnNMHUrFlOYCzAn2qbjsCZ0whNlKe78qvN2z
 +OYiVRF8KmSNAkP+S47sxeyEEYMi4aKVNe1ur1jVjYmA5keIdmjbnIRjGXfSNzff
 PdvMqZcGouq4jsz2fqQf
 =yqg5
 -----END PGP SIGNATURE-----

Merge tag 'md/4.3-fixes' of git://neil.brown.name/md

Pull md fixes from Neil Brown:
 "Assorted fixes for md in 4.3-rc.

  Two tagged for -stable, and one is really a cleanup to match and
  improve kmemcache interface.

* tag 'md/4.3-fixes' of git://neil.brown.name/md:
  md/bitmap: don't pass -1 to bitmap_storage_alloc.
  md/raid1: Avoid raid1 resync getting stuck
  md: drop null test before destroy functions
  md: clear CHANGE_PENDING in readonly array
  md/raid0: apply base queue limits *before* disk_stack_limits
  md/raid5: don't index beyond end of array in need_this_block().
  raid5: update analysis state for failed stripe
  md: wait for pending superblock updates before switching to read-only
This commit is contained in:
Linus Torvalds 2015-10-04 11:47:28 +01:00
commit 15ecf9a986
7 changed files with 28 additions and 26 deletions

View File

@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
ret = bitmap_storage_alloc(&store, chunks,
!bitmap->mddev->bitmap_info.external,
bitmap->cluster_slot);
mddev_is_clustered(bitmap->mddev)
? bitmap->cluster_slot : 0);
if (ret)
goto err;

View File

@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev)
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
goto unlock;
}

View File

@ -470,7 +470,6 @@ static int multipath_run (struct mddev *mddev)
return 0;
out_free_conf:
if (conf->pool)
mempool_destroy(conf->pool);
kfree(conf->multipaths);
kfree(conf);

View File

@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
struct md_rdev *rdev;
bool discard_supported = false;
rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
}
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);
rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
}
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else

View File

@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
}
if (bio && bio_data_dir(bio) == WRITE) {
if (bio->bi_iter.bi_sector >=
conf->mddev->curr_resync_completed) {
if (bio->bi_iter.bi_sector >= conf->next_resync) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
conf->r1buf_pool = NULL;
spin_lock_irq(&conf->resync_lock);
conf->next_resync = 0;
conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
conf->start_next_window = MaxSector;
conf->current_window_requests +=
conf->next_window_requests;
@ -2843,7 +2842,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
@ -2946,7 +2944,6 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);

View File

@ -3486,7 +3486,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
mdname(mddev));
if (conf) {
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
@ -3682,7 +3681,6 @@ static int run(struct mddev *mddev)
out_free_conf:
md_unregister_thread(&mddev->thread);
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
@ -3696,7 +3694,6 @@ static void raid10_free(struct mddev *mddev, void *priv)
{
struct r10conf *conf = priv;
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);

View File

@ -2271,7 +2271,6 @@ static void shrink_stripes(struct r5conf *conf)
drop_one_stripe(conf))
;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
if (bi)
s->to_read--;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
*/
clear_bit(R5_LOCKED, &sh->dev[i].flags);
}
s->to_write = 0;
s->written = 0;
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
*/
return 0;
for (i = 0; i < s->failed; i++) {
for (i = 0; i < s->failed && i < 2; i++) {
if (fdev[i]->towrite &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
!test_bit(R5_OVERWRITE, &fdev[i]->flags))
@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
sh->sector < sh->raid_conf->mddev->recovery_cp)
/* reconstruct-write isn't being forced */
return 0;
for (i = 0; i < s->failed; i++) {
for (i = 0; i < s->failed && i < 2; i++) {
if (s->failed_num[i] != sh->pd_idx &&
s->failed_num[i] != sh->qd_idx &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&