-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEq1nRK9aeMoq1VSgcnJ2qBz9kQNkFAmKiO9UACgkQnJ2qBz9k QNk9+Af/RjaJEozyj/He7nqj1xncN6bIJzeyOqQVJNkHBsKYt7oDFvSuYI1Kbzk+ x7/x8dRtVR3kRZCO6VarETkzGp6Nw10RdzFKqT2FRmQ66wVZaXPQeqVZqwXSKdtR qgU892e9S2SqUH9EyUwk3D/HwLr1VNKKp6B0N+By7EwKmZdyTg5siFJ26+z+QpJQ wo84nN/m6GgHSm+c8kMFa+cs635tMY3+vP4nviUKyuDTxW3Yu6maIa5973WLiFqo EZSLtSfXYasjoOl5fN3AaO0dAl8fRJIh6wsgbeQI/NeUYMIqKWslW+5esq1SwreS r1+Xig8MmxDJ/1I3i/L/aDM7FipY9A== =kMe8 -----END PGP SIGNATURE----- Merge tag 'fs_for_v5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs Pull ext2, writeback, and quota fixes and cleanups from Jan Kara: "A fix for race in writeback code and two cleanups in quota and ext2" * tag 'fs_for_v5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: quota: Prevent memory allocation recursion while holding dq_lock writeback: Fix inode->i_io_list not be protected by inode->i_lock error fs: Fix syntax errors in comments
This commit is contained in:
commit
3d9f55c57b
|
@ -1549,7 +1549,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
|
|||
if (IS_ERR(raw_inode))
|
||||
return -EIO;
|
||||
|
||||
/* For fields not not tracking in the in-memory inode,
|
||||
/* For fields not tracking in the in-memory inode,
|
||||
* initialise them to zero for new inodes. */
|
||||
if (ei->i_state & EXT2_STATE_NEW)
|
||||
memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
|
||||
|
|
|
@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
|
|||
struct list_head *head)
|
||||
{
|
||||
assert_spin_locked(&wb->list_lock);
|
||||
assert_spin_locked(&inode->i_lock);
|
||||
|
||||
list_move(&inode->i_io_list, head);
|
||||
|
||||
|
@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct list_head *delaying_queue,
|
|||
inode = wb_inode(delaying_queue->prev);
|
||||
if (inode_dirtied_after(inode, dirtied_before))
|
||||
break;
|
||||
spin_lock(&inode->i_lock);
|
||||
list_move(&inode->i_io_list, &tmp);
|
||||
moved++;
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_state |= I_SYNC_QUEUED;
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (sb_is_blkdev_sb(inode->i_sb))
|
||||
|
@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct list_head *delaying_queue,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Move inodes from one superblock together */
|
||||
/*
|
||||
* Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
|
||||
* we don't take inode->i_lock here because it is just a pointless overhead.
|
||||
* Inode is already marked as I_SYNC_QUEUED so writeback list handling is
|
||||
* fully under our control.
|
||||
*/
|
||||
while (!list_empty(&tmp)) {
|
||||
sb = wb_inode(tmp.prev)->i_sb;
|
||||
list_for_each_prev_safe(pos, node, &tmp) {
|
||||
|
@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct super_block *sb,
|
|||
* We'll have another go at writing back this inode
|
||||
* when we completed a full scan of b_io.
|
||||
*/
|
||||
spin_unlock(&inode->i_lock);
|
||||
requeue_io(inode, wb);
|
||||
spin_unlock(&inode->i_lock);
|
||||
trace_writeback_sb_inodes_requeue(inode);
|
||||
continue;
|
||||
}
|
||||
|
@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int dirtytime = 0;
|
||||
struct bdi_writeback *wb = NULL;
|
||||
|
||||
trace_writeback_mark_inode_dirty(inode, flags);
|
||||
|
||||
|
@ -2409,6 +2416,17 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|||
inode->i_state &= ~I_DIRTY_TIME;
|
||||
inode->i_state |= flags;
|
||||
|
||||
/*
|
||||
* Grab inode's wb early because it requires dropping i_lock and we
|
||||
* need to make sure following checks happen atomically with dirty
|
||||
* list handling so that we don't move inodes under flush worker's
|
||||
* hands.
|
||||
*/
|
||||
if (!was_dirty) {
|
||||
wb = locked_inode_to_wb_and_lock_list(inode);
|
||||
spin_lock(&inode->i_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the inode is queued for writeback by flush worker, just
|
||||
* update its dirty state. Once the flush worker is done with
|
||||
|
@ -2416,7 +2434,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|||
* list, based upon its state.
|
||||
*/
|
||||
if (inode->i_state & I_SYNC_QUEUED)
|
||||
goto out_unlock_inode;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Only add valid (hashed) inodes to the superblock's
|
||||
|
@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|||
*/
|
||||
if (!S_ISBLK(inode->i_mode)) {
|
||||
if (inode_unhashed(inode))
|
||||
goto out_unlock_inode;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (inode->i_state & I_FREEING)
|
||||
goto out_unlock_inode;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* If the inode was already on b_dirty/b_io/b_more_io, don't
|
||||
* reposition it (that would break b_dirty time-ordering).
|
||||
*/
|
||||
if (!was_dirty) {
|
||||
struct bdi_writeback *wb;
|
||||
struct list_head *dirty_list;
|
||||
bool wakeup_bdi = false;
|
||||
|
||||
wb = locked_inode_to_wb_and_lock_list(inode);
|
||||
|
||||
inode->dirtied_when = jiffies;
|
||||
if (dirtytime)
|
||||
inode->dirtied_time_when = jiffies;
|
||||
|
@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|||
dirty_list);
|
||||
|
||||
spin_unlock(&wb->list_lock);
|
||||
spin_unlock(&inode->i_lock);
|
||||
trace_writeback_dirty_inode_enqueue(inode);
|
||||
|
||||
/*
|
||||
|
@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|||
return;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
if (wb)
|
||||
spin_unlock(&wb->list_lock);
|
||||
out_unlock_inode:
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
* Inode locking rules:
|
||||
*
|
||||
* inode->i_lock protects:
|
||||
* inode->i_state, inode->i_hash, __iget()
|
||||
* inode->i_state, inode->i_hash, __iget(), inode->i_io_list
|
||||
* Inode LRU list locks protect:
|
||||
* inode->i_sb->s_inode_lru, inode->i_lru
|
||||
* inode->i_sb->s_inode_list_lock protects:
|
||||
|
|
|
@ -79,6 +79,7 @@
|
|||
#include <linux/capability.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include "../internal.h" /* ugh */
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -425,9 +426,11 @@ EXPORT_SYMBOL(mark_info_dirty);
|
|||
int dquot_acquire(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0, ret2 = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
|
||||
ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
|
||||
if (ret < 0)
|
||||
|
@ -458,6 +461,7 @@ int dquot_acquire(struct dquot *dquot)
|
|||
smp_mb__before_atomic();
|
||||
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out_iolock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -469,9 +473,11 @@ EXPORT_SYMBOL(dquot_acquire);
|
|||
int dquot_commit(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
if (!clear_dquot_dirty(dquot))
|
||||
goto out_lock;
|
||||
/* Inactive dquot can be only if there was error during read/init
|
||||
|
@ -481,6 +487,7 @@ int dquot_commit(struct dquot *dquot)
|
|||
else
|
||||
ret = -EIO;
|
||||
out_lock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -492,9 +499,11 @@ EXPORT_SYMBOL(dquot_commit);
|
|||
int dquot_release(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0, ret2 = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
/* Check whether we are not racing with some other dqget() */
|
||||
if (dquot_is_busy(dquot))
|
||||
goto out_dqlock;
|
||||
|
@ -510,6 +519,7 @@ int dquot_release(struct dquot *dquot)
|
|||
}
|
||||
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out_dqlock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue