dm: use fsleep() instead of msleep() for deterministic sleep duration

Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
Heinz Mauelshagen 2023-01-25 22:44:39 +01:00 committed by Mike Snitzer
parent 0d78954a2d
commit 238d991f05
4 changed files with 10 additions and 10 deletions

View File

@ -296,7 +296,7 @@ static int __init dm_init_init(void)
if (waitfor[i]) {
DMINFO("waiting for device %s ...", waitfor[i]);
while (!dm_get_dev_t(waitfor[i]))
msleep(5);
fsleep(5000);
}
}

View File

@ -110,7 +110,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
* The reason for this is unknown but possibly due to jiffies rounding errors
* or read/write cache inside the disk.
*/
#define SLEEP_MSEC 100
#define SLEEP_USEC 100000
/*
* Maximum number of sleep events. There is a theoretical livelock if more
@ -158,7 +158,7 @@ try_again:
if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
slept++;
spin_unlock_irq(&throttle_spinlock);
msleep(SLEEP_MSEC);
fsleep(SLEEP_USEC);
goto try_again;
}

View File

@ -298,12 +298,12 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
/*
* This conflicting I/O is extremely improbable in the caller,
* so msleep(1) is sufficient and there is no need for a wait queue.
* so fsleep(1000) is sufficient and there is no need for a wait queue.
*/
static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
{
while (__chunk_is_tracked(s, chunk))
msleep(1);
fsleep(1000);
}
/*
@ -1494,7 +1494,7 @@ static void snapshot_dtr(struct dm_target *ti)
unregister_snapshot(s);
while (atomic_read(&s->pending_exceptions_count))
msleep(1);
fsleep(1000);
/*
* Ensure instructions in mempool_exit aren't reordered
* before atomic_read.

View File

@ -436,7 +436,7 @@ retry:
r = ti->type->prepare_ioctl(ti, bdev);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
msleep(10);
fsleep(10000);
goto retry;
}
@ -2455,7 +2455,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
/* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
mutex_unlock(&md->suspend_lock);
@ -2467,7 +2467,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
*/
if (wait)
while (atomic_read(&md->holders))
msleep(1);
fsleep(1000);
else if (atomic_read(&md->holders))
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
@ -2544,7 +2544,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st
break;
}
msleep(5);
fsleep(5000);
}
return r;