super: wait for nascent superblocks
Recent patches experiment with making it possible to allocate a new superblock before opening the relevant block device. Naturally this has intricate side-effects that we get to learn about while developing this. Superblock allocators such as sget{_fc}() return with s_umount of the new superblock held and lock ordering currently requires that block level locks such as bdev_lock and open_mutex rank above s_umount. Beforeaca740cecb
("fs: open block device after superblock creation") ordering was guaranteed to be correct as block devices were opened prior to superblock allocation and thus s_umount wasn't held. But now s_umount must be dropped before opening block devices to avoid locking violations. This has consequences. The main one being that iterators over @super_blocks and @fs_supers that grab a temporary reference to the superblock can now also grab s_umount before the caller has managed to open block devices and called fill_super(). So whereas before such iterators or concurrent mounts would have simply slept on s_umount until SB_BORN was set or the superblock was discard due to initalization failure they can now needlessly spin through sget{_fc}(). If the caller is sleeping on bdev_lock or open_mutex one caller waiting on SB_BORN will always spin somewhere and potentially this can go on for quite a while. It should be possible to drop s_umount while allowing iterators to wait on a nascent superblock to either be born or discarded. This patch implements a wait_var_event() mechanism allowing iterators to sleep until they are woken when the superblock is born or discarded. This also allows us to avoid relooping through @fs_supers and @super_blocks if a superblock isn't yet born or dying. Link:aca740cecb
("fs: open block device after superblock creation") Reviewed-by: Jan Kara <jack@suse.cz> Message-Id: <20230818-vfs-super-fixes-v3-v3-3-9f0b1876e46b@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
d8ce82efde
commit
5e87491415
204
fs/super.c
204
fs/super.c
|
@ -50,7 +50,7 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
|
||||||
"sb_internal",
|
"sb_internal",
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void super_lock(struct super_block *sb, bool excl)
|
static inline void __super_lock(struct super_block *sb, bool excl)
|
||||||
{
|
{
|
||||||
if (excl)
|
if (excl)
|
||||||
down_write(&sb->s_umount);
|
down_write(&sb->s_umount);
|
||||||
|
@ -66,14 +66,9 @@ static inline void super_unlock(struct super_block *sb, bool excl)
|
||||||
up_read(&sb->s_umount);
|
up_read(&sb->s_umount);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void super_lock_excl(struct super_block *sb)
|
static inline void __super_lock_excl(struct super_block *sb)
|
||||||
{
|
{
|
||||||
super_lock(sb, true);
|
__super_lock(sb, true);
|
||||||
}
|
|
||||||
|
|
||||||
static inline void super_lock_shared(struct super_block *sb)
|
|
||||||
{
|
|
||||||
super_lock(sb, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void super_unlock_excl(struct super_block *sb)
|
static inline void super_unlock_excl(struct super_block *sb)
|
||||||
|
@ -86,6 +81,99 @@ static inline void super_unlock_shared(struct super_block *sb)
|
||||||
super_unlock(sb, false);
|
super_unlock(sb, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool wait_born(struct super_block *sb)
|
||||||
|
{
|
||||||
|
unsigned int flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pairs with smp_store_release() in super_wake() and ensures
|
||||||
|
* that we see SB_BORN or SB_DYING after we're woken.
|
||||||
|
*/
|
||||||
|
flags = smp_load_acquire(&sb->s_flags);
|
||||||
|
return flags & (SB_BORN | SB_DYING);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* super_lock - wait for superblock to become ready and lock it
|
||||||
|
* @sb: superblock to wait for
|
||||||
|
* @excl: whether exclusive access is required
|
||||||
|
*
|
||||||
|
* If the superblock has neither passed through vfs_get_tree() or
|
||||||
|
* generic_shutdown_super() yet wait for it to happen. Either superblock
|
||||||
|
* creation will succeed and SB_BORN is set by vfs_get_tree() or we're
|
||||||
|
* woken and we'll see SB_DYING.
|
||||||
|
*
|
||||||
|
* The caller must have acquired a temporary reference on @sb->s_count.
|
||||||
|
*
|
||||||
|
* Return: This returns true if SB_BORN was set, false if SB_DYING was
|
||||||
|
* set. The function acquires s_umount and returns with it held.
|
||||||
|
*/
|
||||||
|
static __must_check bool super_lock(struct super_block *sb, bool excl)
|
||||||
|
{
|
||||||
|
|
||||||
|
lockdep_assert_not_held(&sb->s_umount);
|
||||||
|
|
||||||
|
relock:
|
||||||
|
__super_lock(sb, excl);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Has gone through generic_shutdown_super() in the meantime.
|
||||||
|
* @sb->s_root is NULL and @sb->s_active is 0. No one needs to
|
||||||
|
* grab a reference to this. Tell them so.
|
||||||
|
*/
|
||||||
|
if (sb->s_flags & SB_DYING)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Has called ->get_tree() successfully. */
|
||||||
|
if (sb->s_flags & SB_BORN)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
super_unlock(sb, excl);
|
||||||
|
|
||||||
|
/* wait until the superblock is ready or dying */
|
||||||
|
wait_var_event(&sb->s_flags, wait_born(sb));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Neither SB_BORN nor SB_DYING are ever unset so we never loop.
|
||||||
|
* Just reacquire @sb->s_umount for the caller.
|
||||||
|
*/
|
||||||
|
goto relock;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* wait and acquire read-side of @sb->s_umount */
|
||||||
|
static inline bool super_lock_shared(struct super_block *sb)
|
||||||
|
{
|
||||||
|
return super_lock(sb, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* wait and acquire write-side of @sb->s_umount */
|
||||||
|
static inline bool super_lock_excl(struct super_block *sb)
|
||||||
|
{
|
||||||
|
return super_lock(sb, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* wake waiters */
|
||||||
|
#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
|
||||||
|
static void super_wake(struct super_block *sb, unsigned int flag)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
|
||||||
|
WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pairs with smp_load_acquire() in super_lock() to make sure
|
||||||
|
* all initializations in the superblock are seen by the user
|
||||||
|
* seeing SB_BORN sent.
|
||||||
|
*/
|
||||||
|
smp_store_release(&sb->s_flags, sb->s_flags | flag);
|
||||||
|
/*
|
||||||
|
* Pairs with the barrier in prepare_to_wait_event() to make sure
|
||||||
|
* ___wait_var_event() either sees SB_BORN set or
|
||||||
|
* waitqueue_active() check in wake_up_var() sees the waiter.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
wake_up_var(&sb->s_flags);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One thing we have to be careful of with a per-sb shrinker is that we don't
|
* One thing we have to be careful of with a per-sb shrinker is that we don't
|
||||||
* drop the last active reference to the superblock from within the shrinker.
|
* drop the last active reference to the superblock from within the shrinker.
|
||||||
|
@ -393,7 +481,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
|
||||||
void deactivate_super(struct super_block *s)
|
void deactivate_super(struct super_block *s)
|
||||||
{
|
{
|
||||||
if (!atomic_add_unless(&s->s_active, -1, 1)) {
|
if (!atomic_add_unless(&s->s_active, -1, 1)) {
|
||||||
super_lock_excl(s);
|
__super_lock_excl(s);
|
||||||
deactivate_locked_super(s);
|
deactivate_locked_super(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -415,10 +503,12 @@ EXPORT_SYMBOL(deactivate_super);
|
||||||
*/
|
*/
|
||||||
static int grab_super(struct super_block *s) __releases(sb_lock)
|
static int grab_super(struct super_block *s) __releases(sb_lock)
|
||||||
{
|
{
|
||||||
|
bool born;
|
||||||
|
|
||||||
s->s_count++;
|
s->s_count++;
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
super_lock_excl(s);
|
born = super_lock_excl(s);
|
||||||
if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
|
if (born && atomic_inc_not_zero(&s->s_active)) {
|
||||||
put_super(s);
|
put_super(s);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -447,8 +537,8 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
|
||||||
bool super_trylock_shared(struct super_block *sb)
|
bool super_trylock_shared(struct super_block *sb)
|
||||||
{
|
{
|
||||||
if (down_read_trylock(&sb->s_umount)) {
|
if (down_read_trylock(&sb->s_umount)) {
|
||||||
if (!hlist_unhashed(&sb->s_instances) &&
|
if (!(sb->s_flags & SB_DYING) && sb->s_root &&
|
||||||
sb->s_root && (sb->s_flags & SB_BORN))
|
(sb->s_flags & SB_BORN))
|
||||||
return true;
|
return true;
|
||||||
super_unlock_shared(sb);
|
super_unlock_shared(sb);
|
||||||
}
|
}
|
||||||
|
@ -475,7 +565,7 @@ bool super_trylock_shared(struct super_block *sb)
|
||||||
void retire_super(struct super_block *sb)
|
void retire_super(struct super_block *sb)
|
||||||
{
|
{
|
||||||
WARN_ON(!sb->s_bdev);
|
WARN_ON(!sb->s_bdev);
|
||||||
super_lock_excl(sb);
|
__super_lock_excl(sb);
|
||||||
if (sb->s_iflags & SB_I_PERSB_BDI) {
|
if (sb->s_iflags & SB_I_PERSB_BDI) {
|
||||||
bdi_unregister(sb->s_bdi);
|
bdi_unregister(sb->s_bdi);
|
||||||
sb->s_iflags &= ~SB_I_PERSB_BDI;
|
sb->s_iflags &= ~SB_I_PERSB_BDI;
|
||||||
|
@ -557,6 +647,13 @@ void generic_shutdown_super(struct super_block *sb)
|
||||||
/* should be initialized for __put_super_and_need_restart() */
|
/* should be initialized for __put_super_and_need_restart() */
|
||||||
hlist_del_init(&sb->s_instances);
|
hlist_del_init(&sb->s_instances);
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
|
/*
|
||||||
|
* Broadcast to everyone that grabbed a temporary reference to this
|
||||||
|
* superblock before we removed it from @fs_supers that the superblock
|
||||||
|
* is dying. Every walker of @fs_supers outside of sget{_fc}() will now
|
||||||
|
* discard this superblock and treat it as dead.
|
||||||
|
*/
|
||||||
|
super_wake(sb, SB_DYING);
|
||||||
super_unlock_excl(sb);
|
super_unlock_excl(sb);
|
||||||
if (sb->s_bdi != &noop_backing_dev_info) {
|
if (sb->s_bdi != &noop_backing_dev_info) {
|
||||||
if (sb->s_iflags & SB_I_PERSB_BDI)
|
if (sb->s_iflags & SB_I_PERSB_BDI)
|
||||||
|
@ -631,6 +728,11 @@ retry:
|
||||||
s->s_type = fc->fs_type;
|
s->s_type = fc->fs_type;
|
||||||
s->s_iflags |= fc->s_iflags;
|
s->s_iflags |= fc->s_iflags;
|
||||||
strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
|
strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
|
||||||
|
/*
|
||||||
|
* Make the superblock visible on @super_blocks and @fs_supers.
|
||||||
|
* It's in a nascent state and users should wait on SB_BORN or
|
||||||
|
* SB_DYING to be set.
|
||||||
|
*/
|
||||||
list_add_tail(&s->s_list, &super_blocks);
|
list_add_tail(&s->s_list, &super_blocks);
|
||||||
hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
|
hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
|
@ -740,7 +842,8 @@ static void __iterate_supers(void (*f)(struct super_block *))
|
||||||
|
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||||
if (hlist_unhashed(&sb->s_instances))
|
/* Pairs with memory marrier in super_wake(). */
|
||||||
|
if (smp_load_acquire(&sb->s_flags) & SB_DYING)
|
||||||
continue;
|
continue;
|
||||||
sb->s_count++;
|
sb->s_count++;
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
|
@ -770,13 +873,13 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
|
||||||
|
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||||
if (hlist_unhashed(&sb->s_instances))
|
bool born;
|
||||||
continue;
|
|
||||||
sb->s_count++;
|
sb->s_count++;
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
|
|
||||||
super_lock_shared(sb);
|
born = super_lock_shared(sb);
|
||||||
if (sb->s_root && (sb->s_flags & SB_BORN))
|
if (born && sb->s_root)
|
||||||
f(sb, arg);
|
f(sb, arg);
|
||||||
super_unlock_shared(sb);
|
super_unlock_shared(sb);
|
||||||
|
|
||||||
|
@ -806,11 +909,13 @@ void iterate_supers_type(struct file_system_type *type,
|
||||||
|
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
|
hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
|
||||||
|
bool born;
|
||||||
|
|
||||||
sb->s_count++;
|
sb->s_count++;
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
|
|
||||||
super_lock_shared(sb);
|
born = super_lock_shared(sb);
|
||||||
if (sb->s_root && (sb->s_flags & SB_BORN))
|
if (born && sb->s_root)
|
||||||
f(sb, arg);
|
f(sb, arg);
|
||||||
super_unlock_shared(sb);
|
super_unlock_shared(sb);
|
||||||
|
|
||||||
|
@ -841,14 +946,11 @@ struct super_block *get_active_super(struct block_device *bdev)
|
||||||
if (!bdev)
|
if (!bdev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
restart:
|
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||||
if (hlist_unhashed(&sb->s_instances))
|
|
||||||
continue;
|
|
||||||
if (sb->s_bdev == bdev) {
|
if (sb->s_bdev == bdev) {
|
||||||
if (!grab_super(sb))
|
if (!grab_super(sb))
|
||||||
goto restart;
|
return NULL;
|
||||||
super_unlock_excl(sb);
|
super_unlock_excl(sb);
|
||||||
return sb;
|
return sb;
|
||||||
}
|
}
|
||||||
|
@ -862,22 +964,21 @@ struct super_block *user_get_super(dev_t dev, bool excl)
|
||||||
struct super_block *sb;
|
struct super_block *sb;
|
||||||
|
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
rescan:
|
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||||
if (hlist_unhashed(&sb->s_instances))
|
|
||||||
continue;
|
|
||||||
if (sb->s_dev == dev) {
|
if (sb->s_dev == dev) {
|
||||||
|
bool born;
|
||||||
|
|
||||||
sb->s_count++;
|
sb->s_count++;
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
super_lock(sb, excl);
|
|
||||||
/* still alive? */
|
/* still alive? */
|
||||||
if (sb->s_root && (sb->s_flags & SB_BORN))
|
born = super_lock(sb, excl);
|
||||||
|
if (born && sb->s_root)
|
||||||
return sb;
|
return sb;
|
||||||
super_unlock(sb, excl);
|
super_unlock(sb, excl);
|
||||||
/* nope, got unmounted */
|
/* nope, got unmounted */
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
__put_super(sb);
|
__put_super(sb);
|
||||||
goto rescan;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
|
@ -921,7 +1022,7 @@ int reconfigure_super(struct fs_context *fc)
|
||||||
if (!hlist_empty(&sb->s_pins)) {
|
if (!hlist_empty(&sb->s_pins)) {
|
||||||
super_unlock_excl(sb);
|
super_unlock_excl(sb);
|
||||||
group_pin_kill(&sb->s_pins);
|
group_pin_kill(&sb->s_pins);
|
||||||
super_lock_excl(sb);
|
__super_lock_excl(sb);
|
||||||
if (!sb->s_root)
|
if (!sb->s_root)
|
||||||
return 0;
|
return 0;
|
||||||
if (sb->s_writers.frozen != SB_UNFROZEN)
|
if (sb->s_writers.frozen != SB_UNFROZEN)
|
||||||
|
@ -984,9 +1085,9 @@ cancel_readonly:
|
||||||
|
|
||||||
static void do_emergency_remount_callback(struct super_block *sb)
|
static void do_emergency_remount_callback(struct super_block *sb)
|
||||||
{
|
{
|
||||||
super_lock_excl(sb);
|
bool born = super_lock_excl(sb);
|
||||||
if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
|
|
||||||
!sb_rdonly(sb)) {
|
if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
|
||||||
struct fs_context *fc;
|
struct fs_context *fc;
|
||||||
|
|
||||||
fc = fs_context_for_reconfigure(sb->s_root,
|
fc = fs_context_for_reconfigure(sb->s_root,
|
||||||
|
@ -1020,8 +1121,9 @@ void emergency_remount(void)
|
||||||
|
|
||||||
static void do_thaw_all_callback(struct super_block *sb)
|
static void do_thaw_all_callback(struct super_block *sb)
|
||||||
{
|
{
|
||||||
super_lock_excl(sb);
|
bool born = super_lock_excl(sb);
|
||||||
if (sb->s_root && sb->s_flags & SB_BORN) {
|
|
||||||
|
if (born && sb->s_root) {
|
||||||
emergency_thaw_bdev(sb);
|
emergency_thaw_bdev(sb);
|
||||||
thaw_super_locked(sb);
|
thaw_super_locked(sb);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1212,9 +1314,9 @@ EXPORT_SYMBOL(get_tree_keyed);
|
||||||
*/
|
*/
|
||||||
static bool super_lock_shared_active(struct super_block *sb)
|
static bool super_lock_shared_active(struct super_block *sb)
|
||||||
{
|
{
|
||||||
super_lock_shared(sb);
|
bool born = super_lock_shared(sb);
|
||||||
if (!sb->s_root ||
|
|
||||||
(sb->s_flags & (SB_ACTIVE | SB_BORN)) != (SB_ACTIVE | SB_BORN)) {
|
if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
|
||||||
super_unlock_shared(sb);
|
super_unlock_shared(sb);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1374,7 +1476,7 @@ int get_tree_bdev(struct fs_context *fc,
|
||||||
*/
|
*/
|
||||||
super_unlock_excl(s);
|
super_unlock_excl(s);
|
||||||
error = setup_bdev_super(s, fc->sb_flags, fc);
|
error = setup_bdev_super(s, fc->sb_flags, fc);
|
||||||
super_lock_excl(s);
|
__super_lock_excl(s);
|
||||||
if (!error)
|
if (!error)
|
||||||
error = fill_super(s, fc);
|
error = fill_super(s, fc);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -1426,7 +1528,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
|
||||||
*/
|
*/
|
||||||
super_unlock_excl(s);
|
super_unlock_excl(s);
|
||||||
error = setup_bdev_super(s, flags, NULL);
|
error = setup_bdev_super(s, flags, NULL);
|
||||||
super_lock_excl(s);
|
__super_lock_excl(s);
|
||||||
if (!error)
|
if (!error)
|
||||||
error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
|
error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -1566,13 +1668,13 @@ int vfs_get_tree(struct fs_context *fc)
|
||||||
WARN_ON(!sb->s_bdi);
|
WARN_ON(!sb->s_bdi);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write barrier is for super_cache_count(). We place it before setting
|
* super_wake() contains a memory barrier which also care of
|
||||||
* SB_BORN as the data dependency between the two functions is the
|
* ordering for super_cache_count(). We place it before setting
|
||||||
* superblock structure contents that we just set up, not the SB_BORN
|
* SB_BORN as the data dependency between the two functions is
|
||||||
* flag.
|
* the superblock structure contents that we just set up, not
|
||||||
|
* the SB_BORN flag.
|
||||||
*/
|
*/
|
||||||
smp_wmb();
|
super_wake(sb, SB_BORN);
|
||||||
sb->s_flags |= SB_BORN;
|
|
||||||
|
|
||||||
error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
|
error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
|
@ -1715,7 +1817,7 @@ int freeze_super(struct super_block *sb)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
atomic_inc(&sb->s_active);
|
atomic_inc(&sb->s_active);
|
||||||
super_lock_excl(sb);
|
__super_lock_excl(sb);
|
||||||
if (sb->s_writers.frozen != SB_UNFROZEN) {
|
if (sb->s_writers.frozen != SB_UNFROZEN) {
|
||||||
deactivate_locked_super(sb);
|
deactivate_locked_super(sb);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -1737,7 +1839,7 @@ int freeze_super(struct super_block *sb)
|
||||||
/* Release s_umount to preserve sb_start_write -> s_umount ordering */
|
/* Release s_umount to preserve sb_start_write -> s_umount ordering */
|
||||||
super_unlock_excl(sb);
|
super_unlock_excl(sb);
|
||||||
sb_wait_write(sb, SB_FREEZE_WRITE);
|
sb_wait_write(sb, SB_FREEZE_WRITE);
|
||||||
super_lock_excl(sb);
|
__super_lock_excl(sb);
|
||||||
|
|
||||||
/* Now we go and block page faults... */
|
/* Now we go and block page faults... */
|
||||||
sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
|
sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
|
||||||
|
@ -1820,7 +1922,7 @@ out:
|
||||||
*/
|
*/
|
||||||
int thaw_super(struct super_block *sb)
|
int thaw_super(struct super_block *sb)
|
||||||
{
|
{
|
||||||
super_lock_excl(sb);
|
__super_lock_excl(sb);
|
||||||
return thaw_super_locked(sb);
|
return thaw_super_locked(sb);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(thaw_super);
|
EXPORT_SYMBOL(thaw_super);
|
||||||
|
|
|
@ -1095,6 +1095,7 @@ extern int send_sigurg(struct fown_struct *fown);
|
||||||
#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
|
#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
|
||||||
|
|
||||||
/* These sb flags are internal to the kernel */
|
/* These sb flags are internal to the kernel */
|
||||||
|
#define SB_DYING BIT(24)
|
||||||
#define SB_SUBMOUNT BIT(26)
|
#define SB_SUBMOUNT BIT(26)
|
||||||
#define SB_FORCE BIT(27)
|
#define SB_FORCE BIT(27)
|
||||||
#define SB_NOSEC BIT(28)
|
#define SB_NOSEC BIT(28)
|
||||||
|
|
Loading…
Reference in New Issue