Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.3/block
Pull MD updates from Song: "Non-urgent fixes: md: don't update recovery_cp when curr_resync is ACTIVE md: Free writes_pending in md_stop Performance optimization: md: Change active_io to percpu" * 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md: md: use MD_RESYNC_* whenever possible md: Free writes_pending in md_stop md: Change active_io to percpu md: Factor out is_md_suspended helper md: don't update recovery_cp when curr_resync is ACTIVE
This commit is contained in:
commit
839c717bec
|
@ -380,6 +380,10 @@ EXPORT_SYMBOL_GPL(md_new_event);
|
||||||
static LIST_HEAD(all_mddevs);
|
static LIST_HEAD(all_mddevs);
|
||||||
static DEFINE_SPINLOCK(all_mddevs_lock);
|
static DEFINE_SPINLOCK(all_mddevs_lock);
|
||||||
|
|
||||||
|
static bool is_md_suspended(struct mddev *mddev)
|
||||||
|
{
|
||||||
|
return percpu_ref_is_dying(&mddev->active_io);
|
||||||
|
}
|
||||||
/* Rather than calling directly into the personality make_request function,
|
/* Rather than calling directly into the personality make_request function,
|
||||||
* IO requests come here first so that we can check if the device is
|
* IO requests come here first so that we can check if the device is
|
||||||
* being suspended pending a reconfiguration.
|
* being suspended pending a reconfiguration.
|
||||||
|
@ -389,7 +393,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
|
||||||
*/
|
*/
|
||||||
static bool is_suspended(struct mddev *mddev, struct bio *bio)
|
static bool is_suspended(struct mddev *mddev, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (mddev->suspended)
|
if (is_md_suspended(mddev))
|
||||||
return true;
|
return true;
|
||||||
if (bio_data_dir(bio) != WRITE)
|
if (bio_data_dir(bio) != WRITE)
|
||||||
return false;
|
return false;
|
||||||
|
@ -405,12 +409,10 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
|
||||||
void md_handle_request(struct mddev *mddev, struct bio *bio)
|
void md_handle_request(struct mddev *mddev, struct bio *bio)
|
||||||
{
|
{
|
||||||
check_suspended:
|
check_suspended:
|
||||||
rcu_read_lock();
|
|
||||||
if (is_suspended(mddev, bio)) {
|
if (is_suspended(mddev, bio)) {
|
||||||
DEFINE_WAIT(__wait);
|
DEFINE_WAIT(__wait);
|
||||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||||
if (bio->bi_opf & REQ_NOWAIT) {
|
if (bio->bi_opf & REQ_NOWAIT) {
|
||||||
rcu_read_unlock();
|
|
||||||
bio_wouldblock_error(bio);
|
bio_wouldblock_error(bio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -419,23 +421,19 @@ check_suspended:
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
if (!is_suspended(mddev, bio))
|
if (!is_suspended(mddev, bio))
|
||||||
break;
|
break;
|
||||||
rcu_read_unlock();
|
|
||||||
schedule();
|
schedule();
|
||||||
rcu_read_lock();
|
|
||||||
}
|
}
|
||||||
finish_wait(&mddev->sb_wait, &__wait);
|
finish_wait(&mddev->sb_wait, &__wait);
|
||||||
}
|
}
|
||||||
atomic_inc(&mddev->active_io);
|
if (!percpu_ref_tryget_live(&mddev->active_io))
|
||||||
rcu_read_unlock();
|
goto check_suspended;
|
||||||
|
|
||||||
if (!mddev->pers->make_request(mddev, bio)) {
|
if (!mddev->pers->make_request(mddev, bio)) {
|
||||||
atomic_dec(&mddev->active_io);
|
percpu_ref_put(&mddev->active_io);
|
||||||
wake_up(&mddev->sb_wait);
|
|
||||||
goto check_suspended;
|
goto check_suspended;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
|
percpu_ref_put(&mddev->active_io);
|
||||||
wake_up(&mddev->sb_wait);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(md_handle_request);
|
EXPORT_SYMBOL(md_handle_request);
|
||||||
|
|
||||||
|
@ -483,11 +481,10 @@ void mddev_suspend(struct mddev *mddev)
|
||||||
lockdep_assert_held(&mddev->reconfig_mutex);
|
lockdep_assert_held(&mddev->reconfig_mutex);
|
||||||
if (mddev->suspended++)
|
if (mddev->suspended++)
|
||||||
return;
|
return;
|
||||||
synchronize_rcu();
|
|
||||||
wake_up(&mddev->sb_wait);
|
wake_up(&mddev->sb_wait);
|
||||||
set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
|
set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
|
||||||
smp_mb__after_atomic();
|
percpu_ref_kill(&mddev->active_io);
|
||||||
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
|
wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
|
||||||
mddev->pers->quiesce(mddev, 1);
|
mddev->pers->quiesce(mddev, 1);
|
||||||
clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
|
clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
|
||||||
wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
|
wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
|
||||||
|
@ -505,6 +502,7 @@ void mddev_resume(struct mddev *mddev)
|
||||||
lockdep_assert_held(&mddev->reconfig_mutex);
|
lockdep_assert_held(&mddev->reconfig_mutex);
|
||||||
if (--mddev->suspended)
|
if (--mddev->suspended)
|
||||||
return;
|
return;
|
||||||
|
percpu_ref_resurrect(&mddev->active_io);
|
||||||
wake_up(&mddev->sb_wait);
|
wake_up(&mddev->sb_wait);
|
||||||
mddev->pers->quiesce(mddev, 0);
|
mddev->pers->quiesce(mddev, 0);
|
||||||
|
|
||||||
|
@ -683,7 +681,6 @@ void mddev_init(struct mddev *mddev)
|
||||||
timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
|
timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
|
||||||
atomic_set(&mddev->active, 1);
|
atomic_set(&mddev->active, 1);
|
||||||
atomic_set(&mddev->openers, 0);
|
atomic_set(&mddev->openers, 0);
|
||||||
atomic_set(&mddev->active_io, 0);
|
|
||||||
spin_lock_init(&mddev->lock);
|
spin_lock_init(&mddev->lock);
|
||||||
atomic_set(&mddev->flush_pending, 0);
|
atomic_set(&mddev->flush_pending, 0);
|
||||||
init_waitqueue_head(&mddev->sb_wait);
|
init_waitqueue_head(&mddev->sb_wait);
|
||||||
|
@ -5760,6 +5757,12 @@ static void md_safemode_timeout(struct timer_list *t)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int start_dirty_degraded;
|
static int start_dirty_degraded;
|
||||||
|
static void active_io_release(struct percpu_ref *ref)
|
||||||
|
{
|
||||||
|
struct mddev *mddev = container_of(ref, struct mddev, active_io);
|
||||||
|
|
||||||
|
wake_up(&mddev->sb_wait);
|
||||||
|
}
|
||||||
|
|
||||||
int md_run(struct mddev *mddev)
|
int md_run(struct mddev *mddev)
|
||||||
{
|
{
|
||||||
|
@ -5840,10 +5843,15 @@ int md_run(struct mddev *mddev)
|
||||||
nowait = nowait && bdev_nowait(rdev->bdev);
|
nowait = nowait && bdev_nowait(rdev->bdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = percpu_ref_init(&mddev->active_io, active_io_release,
|
||||||
|
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (!bioset_initialized(&mddev->bio_set)) {
|
if (!bioset_initialized(&mddev->bio_set)) {
|
||||||
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto exit_active_io;
|
||||||
}
|
}
|
||||||
if (!bioset_initialized(&mddev->sync_set)) {
|
if (!bioset_initialized(&mddev->sync_set)) {
|
||||||
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||||
|
@ -6031,6 +6039,8 @@ abort:
|
||||||
bioset_exit(&mddev->sync_set);
|
bioset_exit(&mddev->sync_set);
|
||||||
exit_bio_set:
|
exit_bio_set:
|
||||||
bioset_exit(&mddev->bio_set);
|
bioset_exit(&mddev->bio_set);
|
||||||
|
exit_active_io:
|
||||||
|
percpu_ref_exit(&mddev->active_io);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(md_run);
|
EXPORT_SYMBOL_GPL(md_run);
|
||||||
|
@ -6156,7 +6166,7 @@ static void md_clean(struct mddev *mddev)
|
||||||
mddev->new_level = LEVEL_NONE;
|
mddev->new_level = LEVEL_NONE;
|
||||||
mddev->new_layout = 0;
|
mddev->new_layout = 0;
|
||||||
mddev->new_chunk_sectors = 0;
|
mddev->new_chunk_sectors = 0;
|
||||||
mddev->curr_resync = 0;
|
mddev->curr_resync = MD_RESYNC_NONE;
|
||||||
atomic64_set(&mddev->resync_mismatches, 0);
|
atomic64_set(&mddev->resync_mismatches, 0);
|
||||||
mddev->suspend_lo = mddev->suspend_hi = 0;
|
mddev->suspend_lo = mddev->suspend_hi = 0;
|
||||||
mddev->sync_speed_min = mddev->sync_speed_max = 0;
|
mddev->sync_speed_min = mddev->sync_speed_max = 0;
|
||||||
|
@ -6219,7 +6229,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
|
||||||
static void mddev_detach(struct mddev *mddev)
|
static void mddev_detach(struct mddev *mddev)
|
||||||
{
|
{
|
||||||
md_bitmap_wait_behind_writes(mddev);
|
md_bitmap_wait_behind_writes(mddev);
|
||||||
if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
|
if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
|
||||||
mddev->pers->quiesce(mddev, 1);
|
mddev->pers->quiesce(mddev, 1);
|
||||||
mddev->pers->quiesce(mddev, 0);
|
mddev->pers->quiesce(mddev, 0);
|
||||||
}
|
}
|
||||||
|
@ -6255,6 +6265,8 @@ void md_stop(struct mddev *mddev)
|
||||||
*/
|
*/
|
||||||
__md_stop_writes(mddev);
|
__md_stop_writes(mddev);
|
||||||
__md_stop(mddev);
|
__md_stop(mddev);
|
||||||
|
percpu_ref_exit(&mddev->writes_pending);
|
||||||
|
percpu_ref_exit(&mddev->active_io);
|
||||||
bioset_exit(&mddev->bio_set);
|
bioset_exit(&mddev->bio_set);
|
||||||
bioset_exit(&mddev->sync_set);
|
bioset_exit(&mddev->sync_set);
|
||||||
}
|
}
|
||||||
|
@ -7828,6 +7840,7 @@ static void md_free_disk(struct gendisk *disk)
|
||||||
struct mddev *mddev = disk->private_data;
|
struct mddev *mddev = disk->private_data;
|
||||||
|
|
||||||
percpu_ref_exit(&mddev->writes_pending);
|
percpu_ref_exit(&mddev->writes_pending);
|
||||||
|
percpu_ref_exit(&mddev->active_io);
|
||||||
bioset_exit(&mddev->bio_set);
|
bioset_exit(&mddev->bio_set);
|
||||||
bioset_exit(&mddev->sync_set);
|
bioset_exit(&mddev->sync_set);
|
||||||
|
|
||||||
|
@ -8531,7 +8544,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
|
||||||
return true;
|
return true;
|
||||||
wait_event(mddev->sb_wait,
|
wait_event(mddev->sb_wait,
|
||||||
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
|
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
|
||||||
mddev->suspended);
|
is_md_suspended(mddev));
|
||||||
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
|
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
|
||||||
percpu_ref_put(&mddev->writes_pending);
|
percpu_ref_put(&mddev->writes_pending);
|
||||||
return false;
|
return false;
|
||||||
|
@ -8883,7 +8896,7 @@ void md_do_sync(struct md_thread *thread)
|
||||||
atomic_set(&mddev->recovery_active, 0);
|
atomic_set(&mddev->recovery_active, 0);
|
||||||
last_check = 0;
|
last_check = 0;
|
||||||
|
|
||||||
if (j>2) {
|
if (j >= MD_RESYNC_ACTIVE) {
|
||||||
pr_debug("md: resuming %s of %s from checkpoint.\n",
|
pr_debug("md: resuming %s of %s from checkpoint.\n",
|
||||||
desc, mdname(mddev));
|
desc, mdname(mddev));
|
||||||
mddev->curr_resync = j;
|
mddev->curr_resync = j;
|
||||||
|
@ -8955,7 +8968,7 @@ void md_do_sync(struct md_thread *thread)
|
||||||
if (j > max_sectors)
|
if (j > max_sectors)
|
||||||
/* when skipping, extra large numbers can be returned. */
|
/* when skipping, extra large numbers can be returned. */
|
||||||
j = max_sectors;
|
j = max_sectors;
|
||||||
if (j > 2)
|
if (j >= MD_RESYNC_ACTIVE)
|
||||||
mddev->curr_resync = j;
|
mddev->curr_resync = j;
|
||||||
mddev->curr_mark_cnt = io_sectors;
|
mddev->curr_mark_cnt = io_sectors;
|
||||||
if (last_check == 0)
|
if (last_check == 0)
|
||||||
|
@ -9030,7 +9043,7 @@ void md_do_sync(struct md_thread *thread)
|
||||||
mddev->pers->sync_request(mddev, max_sectors, &skipped);
|
mddev->pers->sync_request(mddev, max_sectors, &skipped);
|
||||||
|
|
||||||
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
|
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
|
||||||
mddev->curr_resync >= MD_RESYNC_ACTIVE) {
|
mddev->curr_resync > MD_RESYNC_ACTIVE) {
|
||||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
|
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
|
||||||
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
|
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
|
||||||
if (mddev->curr_resync >= mddev->recovery_cp) {
|
if (mddev->curr_resync >= mddev->recovery_cp) {
|
||||||
|
@ -9259,7 +9272,7 @@ void md_check_recovery(struct mddev *mddev)
|
||||||
wake_up(&mddev->sb_wait);
|
wake_up(&mddev->sb_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mddev->suspended)
|
if (is_md_suspended(mddev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (mddev->bitmap)
|
if (mddev->bitmap)
|
||||||
|
|
|
@ -315,7 +315,7 @@ struct mddev {
|
||||||
unsigned long sb_flags;
|
unsigned long sb_flags;
|
||||||
|
|
||||||
int suspended;
|
int suspended;
|
||||||
atomic_t active_io;
|
struct percpu_ref active_io;
|
||||||
int ro;
|
int ro;
|
||||||
int sysfs_active; /* set when sysfs deletes
|
int sysfs_active; /* set when sysfs deletes
|
||||||
* are happening, so run/
|
* are happening, so run/
|
||||||
|
|
Loading…
Reference in New Issue