Merge tag 'md-next-20230623' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.5/block-late
Pull MD fixes from Song. * tag 'md-next-20230623' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md: raid10: avoid spin_lock from fastpath from raid10_unplug() md: fix 'delete_mutex' deadlock md: use mddev->external to select holder in export_rdev() md/raid1-10: fix casting from randomized structure in raid1_submit_write() md/raid10: fix the condition to call bio_end_io_acct()
This commit is contained in:
commit
c36591f682
|
@ -643,7 +643,6 @@ void mddev_init(struct mddev *mddev)
|
|||
{
|
||||
mutex_init(&mddev->open_mutex);
|
||||
mutex_init(&mddev->reconfig_mutex);
|
||||
mutex_init(&mddev->delete_mutex);
|
||||
mutex_init(&mddev->bitmap_info.mutex);
|
||||
INIT_LIST_HEAD(&mddev->disks);
|
||||
INIT_LIST_HEAD(&mddev->all_mddevs);
|
||||
|
@ -749,26 +748,15 @@ static void mddev_free(struct mddev *mddev)
|
|||
|
||||
static const struct attribute_group md_redundancy_group;
|
||||
|
||||
static void md_free_rdev(struct mddev *mddev)
|
||||
void mddev_unlock(struct mddev *mddev)
|
||||
{
|
||||
struct md_rdev *rdev;
|
||||
struct md_rdev *tmp;
|
||||
LIST_HEAD(delete);
|
||||
|
||||
mutex_lock(&mddev->delete_mutex);
|
||||
if (list_empty(&mddev->deleting))
|
||||
goto out;
|
||||
if (!list_empty(&mddev->deleting))
|
||||
list_splice_init(&mddev->deleting, &delete);
|
||||
|
||||
list_for_each_entry_safe(rdev, tmp, &mddev->deleting, same_set) {
|
||||
list_del_init(&rdev->same_set);
|
||||
kobject_del(&rdev->kobj);
|
||||
export_rdev(rdev, mddev);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&mddev->delete_mutex);
|
||||
}
|
||||
|
||||
void mddev_unlock(struct mddev *mddev)
|
||||
{
|
||||
if (mddev->to_remove) {
|
||||
/* These cannot be removed under reconfig_mutex as
|
||||
* an access to the files will try to take reconfig_mutex
|
||||
|
@ -808,7 +796,11 @@ void mddev_unlock(struct mddev *mddev)
|
|||
} else
|
||||
mutex_unlock(&mddev->reconfig_mutex);
|
||||
|
||||
md_free_rdev(mddev);
|
||||
list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
|
||||
list_del_init(&rdev->same_set);
|
||||
kobject_del(&rdev->kobj);
|
||||
export_rdev(rdev, mddev);
|
||||
}
|
||||
|
||||
md_wakeup_thread(mddev->thread);
|
||||
wake_up(&mddev->sb_wait);
|
||||
|
@ -2458,7 +2450,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
|
|||
if (test_bit(AutoDetected, &rdev->flags))
|
||||
md_autodetect_dev(rdev->bdev->bd_dev);
|
||||
#endif
|
||||
blkdev_put(rdev->bdev, mddev->major_version == -2 ? &claim_rdev : rdev);
|
||||
blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
|
||||
rdev->bdev = NULL;
|
||||
kobject_put(&rdev->kobj);
|
||||
}
|
||||
|
@ -2488,9 +2480,7 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev)
|
|||
* reconfig_mutex is held, hence it can't be called under
|
||||
* reconfig_mutex and it's delayed to mddev_unlock().
|
||||
*/
|
||||
mutex_lock(&mddev->delete_mutex);
|
||||
list_add(&rdev->same_set, &mddev->deleting);
|
||||
mutex_unlock(&mddev->delete_mutex);
|
||||
}
|
||||
|
||||
static void export_array(struct mddev *mddev)
|
||||
|
@ -6140,7 +6130,7 @@ static void md_clean(struct mddev *mddev)
|
|||
mddev->resync_min = 0;
|
||||
mddev->resync_max = MaxSector;
|
||||
mddev->reshape_position = MaxSector;
|
||||
mddev->external = 0;
|
||||
/* we still need mddev->external in export_rdev, do not clear it yet */
|
||||
mddev->persistent = 0;
|
||||
mddev->level = LEVEL_NONE;
|
||||
mddev->clevel[0] = 0;
|
||||
|
|
|
@ -531,11 +531,9 @@ struct mddev {
|
|||
|
||||
/*
|
||||
* Temporarily store rdev that will be finally removed when
|
||||
* reconfig_mutex is unlocked.
|
||||
* reconfig_mutex is unlocked, protected by reconfig_mutex.
|
||||
*/
|
||||
struct list_head deleting;
|
||||
/* Protect the deleting list */
|
||||
struct mutex delete_mutex;
|
||||
|
||||
bool has_superblocks:1;
|
||||
bool fail_last_dev:1;
|
||||
|
|
|
@ -116,7 +116,7 @@ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
|
|||
|
||||
static inline void raid1_submit_write(struct bio *bio)
|
||||
{
|
||||
struct md_rdev *rdev = (struct md_rdev *)bio->bi_bdev;
|
||||
struct md_rdev *rdev = (void *)bio->bi_bdev;
|
||||
|
||||
bio->bi_next = NULL;
|
||||
bio_set_dev(bio, rdev->bdev);
|
||||
|
|
|
@ -325,7 +325,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
|
|||
if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
|
||||
if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
|
||||
if (r10_bio->start_time)
|
||||
bio_end_io_acct(bio, r10_bio->start_time);
|
||||
bio_endio(bio);
|
||||
/*
|
||||
|
@ -1118,7 +1118,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||
spin_lock_irq(&conf->device_lock);
|
||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_barrier);
|
||||
wake_up_barrier(conf);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
kfree(plug);
|
||||
return;
|
||||
|
@ -1127,7 +1127,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||
/* we aren't scheduling, so we can do the write-out directly. */
|
||||
bio = bio_list_get(&plug->pending);
|
||||
raid1_prepare_flush_writes(mddev->bitmap);
|
||||
wake_up(&conf->wait_barrier);
|
||||
wake_up_barrier(conf);
|
||||
|
||||
while (bio) { /* submit pending writes */
|
||||
struct bio *next = bio->bi_next;
|
||||
|
|
Loading…
Reference in New Issue