fs/ufs: restore s_lock mutex
Commit 0244756edc
("ufs: sb mutex merge + mutex_destroy") generated
deadlocks in read/write mode on mkdir.
This patch partially reverts it keeping fixes by Andrew Morton and
mutex_destroy()
[AV: fixed a missing bit in ufs_remount()]
Signed-off-by: Fabian Frederick <fabf@skynet.be>
Reported-by: Ian Campbell <ian.campbell@citrix.com>
Suggested-by: Jan Kara <jack@suse.cz>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Evgeniy Dushistov <dushistov@mail.ru>
Cc: Alexey Khoroshilov <khoroshilov@ispras.ru>
Cc: Roger Pau Monne <roger.pau@citrix.com>
Cc: Ian Jackson <Ian.Jackson@eu.citrix.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
13b987ea27
commit
cdd9eefdf9
|
@ -52,7 +52,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
|
||||||
if (ufs_fragnum(fragment) + count > uspi->s_fpg)
|
if (ufs_fragnum(fragment) + count > uspi->s_fpg)
|
||||||
ufs_error (sb, "ufs_free_fragments", "internal error");
|
ufs_error (sb, "ufs_free_fragments", "internal error");
|
||||||
|
|
||||||
lock_ufs(sb);
|
mutex_lock(&UFS_SB(sb)->s_lock);
|
||||||
|
|
||||||
cgno = ufs_dtog(uspi, fragment);
|
cgno = ufs_dtog(uspi, fragment);
|
||||||
bit = ufs_dtogd(uspi, fragment);
|
bit = ufs_dtogd(uspi, fragment);
|
||||||
|
@ -116,12 +116,12 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
|
||||||
ubh_sync_block(UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
ufs_mark_sb_dirty(sb);
|
ufs_mark_sb_dirty(sb);
|
||||||
|
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT\n");
|
UFSD("EXIT\n");
|
||||||
return;
|
return;
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT (FAILED)\n");
|
UFSD("EXIT (FAILED)\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock_ufs(sb);
|
mutex_lock(&UFS_SB(sb)->s_lock);
|
||||||
|
|
||||||
do_more:
|
do_more:
|
||||||
overflow = 0;
|
overflow = 0;
|
||||||
|
@ -211,12 +211,12 @@ do_more:
|
||||||
}
|
}
|
||||||
|
|
||||||
ufs_mark_sb_dirty(sb);
|
ufs_mark_sb_dirty(sb);
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT\n");
|
UFSD("EXIT\n");
|
||||||
return;
|
return;
|
||||||
|
|
||||||
failed_unlock:
|
failed_unlock:
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
failed:
|
failed:
|
||||||
UFSD("EXIT (FAILED)\n");
|
UFSD("EXIT (FAILED)\n");
|
||||||
return;
|
return;
|
||||||
|
@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||||
usb1 = ubh_get_usb_first(uspi);
|
usb1 = ubh_get_usb_first(uspi);
|
||||||
*err = -ENOSPC;
|
*err = -ENOSPC;
|
||||||
|
|
||||||
lock_ufs(sb);
|
mutex_lock(&UFS_SB(sb)->s_lock);
|
||||||
tmp = ufs_data_ptr_to_cpu(sb, p);
|
tmp = ufs_data_ptr_to_cpu(sb, p);
|
||||||
|
|
||||||
if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
|
if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
|
||||||
|
@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||||
"fragment %llu, tmp %llu\n",
|
"fragment %llu, tmp %llu\n",
|
||||||
(unsigned long long)fragment,
|
(unsigned long long)fragment,
|
||||||
(unsigned long long)tmp);
|
(unsigned long long)tmp);
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
return INVBLOCK;
|
return INVBLOCK;
|
||||||
}
|
}
|
||||||
if (fragment < UFS_I(inode)->i_lastfrag) {
|
if (fragment < UFS_I(inode)->i_lastfrag) {
|
||||||
UFSD("EXIT (ALREADY ALLOCATED)\n");
|
UFSD("EXIT (ALREADY ALLOCATED)\n");
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (tmp) {
|
if (tmp) {
|
||||||
UFSD("EXIT (ALREADY ALLOCATED)\n");
|
UFSD("EXIT (ALREADY ALLOCATED)\n");
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||||
* There is not enough space for user on the device
|
* There is not enough space for user on the device
|
||||||
*/
|
*/
|
||||||
if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
|
if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT (FAILED)\n");
|
UFSD("EXIT (FAILED)\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||||
ufs_clear_frags(inode, result + oldcount,
|
ufs_clear_frags(inode, result + oldcount,
|
||||||
newcount - oldcount, locked_page != NULL);
|
newcount - oldcount, locked_page != NULL);
|
||||||
}
|
}
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT, result %llu\n", (unsigned long long)result);
|
UFSD("EXIT, result %llu\n", (unsigned long long)result);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||||
fragment + count);
|
fragment + count);
|
||||||
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
|
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
|
||||||
locked_page != NULL);
|
locked_page != NULL);
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT, result %llu\n", (unsigned long long)result);
|
UFSD("EXIT, result %llu\n", (unsigned long long)result);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||||
*err = 0;
|
*err = 0;
|
||||||
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
|
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
|
||||||
fragment + count);
|
fragment + count);
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
if (newcount < request)
|
if (newcount < request)
|
||||||
ufs_free_fragments (inode, result + newcount, request - newcount);
|
ufs_free_fragments (inode, result + newcount, request - newcount);
|
||||||
ufs_free_fragments (inode, tmp, oldcount);
|
ufs_free_fragments (inode, tmp, oldcount);
|
||||||
|
@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT (FAILED)\n");
|
UFSD("EXIT (FAILED)\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode)
|
||||||
|
|
||||||
ino = inode->i_ino;
|
ino = inode->i_ino;
|
||||||
|
|
||||||
lock_ufs(sb);
|
mutex_lock(&UFS_SB(sb)->s_lock);
|
||||||
|
|
||||||
if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
|
if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
|
||||||
ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
|
ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode)
|
||||||
bit = ufs_inotocgoff (ino);
|
bit = ufs_inotocgoff (ino);
|
||||||
ucpi = ufs_load_cylinder (sb, cg);
|
ucpi = ufs_load_cylinder (sb, cg);
|
||||||
if (!ucpi) {
|
if (!ucpi) {
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ucg = ubh_get_ucg(UCPI_UBH(ucpi));
|
ucg = ubh_get_ucg(UCPI_UBH(ucpi));
|
||||||
|
@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode)
|
||||||
ubh_sync_block(UCPI_UBH(ucpi));
|
ubh_sync_block(UCPI_UBH(ucpi));
|
||||||
|
|
||||||
ufs_mark_sb_dirty(sb);
|
ufs_mark_sb_dirty(sb);
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
UFSD("EXIT\n");
|
UFSD("EXIT\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
|
||||||
sbi = UFS_SB(sb);
|
sbi = UFS_SB(sb);
|
||||||
uspi = sbi->s_uspi;
|
uspi = sbi->s_uspi;
|
||||||
|
|
||||||
lock_ufs(sb);
|
mutex_lock(&sbi->s_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to place the inode in its parent directory
|
* Try to place the inode in its parent directory
|
||||||
|
@ -331,21 +331,21 @@ cg_found:
|
||||||
sync_dirty_buffer(bh);
|
sync_dirty_buffer(bh);
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
}
|
}
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&sbi->s_lock);
|
||||||
|
|
||||||
UFSD("allocating inode %lu\n", inode->i_ino);
|
UFSD("allocating inode %lu\n", inode->i_ino);
|
||||||
UFSD("EXIT\n");
|
UFSD("EXIT\n");
|
||||||
return inode;
|
return inode;
|
||||||
|
|
||||||
fail_remove_inode:
|
fail_remove_inode:
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&sbi->s_lock);
|
||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
unlock_new_inode(inode);
|
unlock_new_inode(inode);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
UFSD("EXIT (FAILED): err %d\n", err);
|
UFSD("EXIT (FAILED): err %d\n", err);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
failed:
|
failed:
|
||||||
unlock_ufs(sb);
|
mutex_unlock(&sbi->s_lock);
|
||||||
make_bad_inode(inode);
|
make_bad_inode(inode);
|
||||||
iput (inode);
|
iput (inode);
|
||||||
UFSD("EXIT (FAILED): err %d\n", err);
|
UFSD("EXIT (FAILED): err %d\n", err);
|
||||||
|
|
|
@ -694,6 +694,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
|
||||||
unsigned flags;
|
unsigned flags;
|
||||||
|
|
||||||
lock_ufs(sb);
|
lock_ufs(sb);
|
||||||
|
mutex_lock(&UFS_SB(sb)->s_lock);
|
||||||
|
|
||||||
UFSD("ENTER\n");
|
UFSD("ENTER\n");
|
||||||
|
|
||||||
|
@ -711,6 +712,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
|
||||||
ufs_put_cstotal(sb);
|
ufs_put_cstotal(sb);
|
||||||
|
|
||||||
UFSD("EXIT\n");
|
UFSD("EXIT\n");
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1277,6 +1279,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
|
||||||
|
|
||||||
sync_filesystem(sb);
|
sync_filesystem(sb);
|
||||||
lock_ufs(sb);
|
lock_ufs(sb);
|
||||||
|
mutex_lock(&UFS_SB(sb)->s_lock);
|
||||||
uspi = UFS_SB(sb)->s_uspi;
|
uspi = UFS_SB(sb)->s_uspi;
|
||||||
flags = UFS_SB(sb)->s_flags;
|
flags = UFS_SB(sb)->s_flags;
|
||||||
usb1 = ubh_get_usb_first(uspi);
|
usb1 = ubh_get_usb_first(uspi);
|
||||||
|
@ -1290,6 +1293,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
|
||||||
new_mount_opt = 0;
|
new_mount_opt = 0;
|
||||||
ufs_set_opt (new_mount_opt, ONERROR_LOCK);
|
ufs_set_opt (new_mount_opt, ONERROR_LOCK);
|
||||||
if (!ufs_parse_options (data, &new_mount_opt)) {
|
if (!ufs_parse_options (data, &new_mount_opt)) {
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -1297,12 +1301,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
|
||||||
new_mount_opt |= ufstype;
|
new_mount_opt |= ufstype;
|
||||||
} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
|
} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
|
||||||
pr_err("ufstype can't be changed during remount\n");
|
pr_err("ufstype can't be changed during remount\n");
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
|
if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
|
||||||
UFS_SB(sb)->s_mount_opt = new_mount_opt;
|
UFS_SB(sb)->s_mount_opt = new_mount_opt;
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1326,6 +1332,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
|
||||||
*/
|
*/
|
||||||
#ifndef CONFIG_UFS_FS_WRITE
|
#ifndef CONFIG_UFS_FS_WRITE
|
||||||
pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
|
pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
#else
|
#else
|
||||||
|
@ -1335,11 +1342,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
|
||||||
ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
|
ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
|
||||||
ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
|
ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
|
||||||
pr_err("this ufstype is read-only supported\n");
|
pr_err("this ufstype is read-only supported\n");
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (!ufs_read_cylinder_structures(sb)) {
|
if (!ufs_read_cylinder_structures(sb)) {
|
||||||
pr_err("failed during remounting\n");
|
pr_err("failed during remounting\n");
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
@ -1347,6 +1356,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
UFS_SB(sb)->s_mount_opt = new_mount_opt;
|
UFS_SB(sb)->s_mount_opt = new_mount_opt;
|
||||||
|
mutex_unlock(&UFS_SB(sb)->s_lock);
|
||||||
unlock_ufs(sb);
|
unlock_ufs(sb);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ struct ufs_sb_info {
|
||||||
int work_queued; /* non-zero if the delayed work is queued */
|
int work_queued; /* non-zero if the delayed work is queued */
|
||||||
struct delayed_work sync_work; /* FS sync delayed work */
|
struct delayed_work sync_work; /* FS sync delayed work */
|
||||||
spinlock_t work_lock; /* protects sync_work and work_queued */
|
spinlock_t work_lock; /* protects sync_work and work_queued */
|
||||||
|
struct mutex s_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ufs_inode_info {
|
struct ufs_inode_info {
|
||||||
|
|
Loading…
Reference in New Issue