xfs: use iomap_valid method to detect stale cached iomaps

Now that iomap supports a mechanism to validate cached iomaps for
buffered write operations, hook it up to the XFS buffered write ops
so that we can avoid data corruptions that result from stale cached
iomaps. See:

https://lore.kernel.org/linux-xfs/20220817093627.GZ3600936@dread.disaster.area/

or the ->iomap_valid() introduction commit for exact details of the
corruption vector.

The validity cookie we store in the iomap is based on the type of
iomap we return. It is expected that the iomap->flags we set in
xfs_bmbt_to_iomap() is not perturbed by the iomap core and are
returned to us in the iomap passed via the .iomap_valid() callback.
This ensures that the validity cookie is always checking the correct
inode fork sequence numbers to detect potential changes that affect
the extent cached by the iomap.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
Dave Chinner 2022-11-29 09:09:17 +11:00 committed by Dave Chinner
parent d7b6404116
commit 304a68b9c6
5 changed files with 87 additions and 27 deletions

View File

@ -4551,7 +4551,8 @@ xfs_bmapi_convert_delalloc(
* the extent. Just return the real extent at this offset. * the extent. Just return the real extent at this offset.
*/ */
if (!isnullstartblock(bma.got.br_startblock)) { if (!isnullstartblock(bma.got.br_startblock)) {
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags); xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
xfs_iomap_inode_sequence(ip, flags));
*seq = READ_ONCE(ifp->if_seq); *seq = READ_ONCE(ifp->if_seq);
goto out_trans_cancel; goto out_trans_cancel;
} }
@ -4599,7 +4600,8 @@ xfs_bmapi_convert_delalloc(
XFS_STATS_INC(mp, xs_xstrat_quick); XFS_STATS_INC(mp, xs_xstrat_quick);
ASSERT(!isnullstartblock(bma.got.br_startblock)); ASSERT(!isnullstartblock(bma.got.br_startblock));
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags); xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
xfs_iomap_inode_sequence(ip, flags));
*seq = READ_ONCE(ifp->if_seq); *seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK) if (whichfork == XFS_COW_FORK)

View File

@ -372,7 +372,7 @@ retry:
isnullstartblock(imap.br_startblock)) isnullstartblock(imap.br_startblock))
goto allocate_blocks; goto allocate_blocks;
xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0); xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap); trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
return 0; return 0;
allocate_blocks: allocate_blocks:

View File

@ -48,13 +48,45 @@ xfs_alert_fsblock_zero(
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
u64
xfs_iomap_inode_sequence(
struct xfs_inode *ip,
u16 iomap_flags)
{
u64 cookie = 0;
if (iomap_flags & IOMAP_F_XATTR)
return READ_ONCE(ip->i_af.if_seq);
if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
return cookie | READ_ONCE(ip->i_df.if_seq);
}
/*
* Check that the iomap passed to us is still valid for the given offset and
* length.
*/
static bool
xfs_iomap_valid(
struct inode *inode,
const struct iomap *iomap)
{
return iomap->validity_cookie ==
xfs_iomap_inode_sequence(XFS_I(inode), iomap->flags);
}
const struct iomap_page_ops xfs_iomap_page_ops = {
.iomap_valid = xfs_iomap_valid,
};
int int
xfs_bmbt_to_iomap( xfs_bmbt_to_iomap(
struct xfs_inode *ip, struct xfs_inode *ip,
struct iomap *iomap, struct iomap *iomap,
struct xfs_bmbt_irec *imap, struct xfs_bmbt_irec *imap,
unsigned int mapping_flags, unsigned int mapping_flags,
u16 iomap_flags) u16 iomap_flags,
u64 sequence_cookie)
{ {
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
struct xfs_buftarg *target = xfs_inode_buftarg(ip); struct xfs_buftarg *target = xfs_inode_buftarg(ip);
@ -91,6 +123,9 @@ xfs_bmbt_to_iomap(
if (xfs_ipincount(ip) && if (xfs_ipincount(ip) &&
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
iomap->flags |= IOMAP_F_DIRTY; iomap->flags |= IOMAP_F_DIRTY;
iomap->validity_cookie = sequence_cookie;
iomap->page_ops = &xfs_iomap_page_ops;
return 0; return 0;
} }
@ -195,7 +230,8 @@ xfs_iomap_write_direct(
xfs_fileoff_t offset_fsb, xfs_fileoff_t offset_fsb,
xfs_fileoff_t count_fsb, xfs_fileoff_t count_fsb,
unsigned int flags, unsigned int flags,
struct xfs_bmbt_irec *imap) struct xfs_bmbt_irec *imap,
u64 *seq)
{ {
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp; struct xfs_trans *tp;
@ -285,6 +321,7 @@ xfs_iomap_write_direct(
error = xfs_alert_fsblock_zero(ip, imap); error = xfs_alert_fsblock_zero(ip, imap);
out_unlock: out_unlock:
*seq = xfs_iomap_inode_sequence(ip, 0);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error; return error;
@ -743,6 +780,7 @@ xfs_direct_write_iomap_begin(
bool shared = false; bool shared = false;
u16 iomap_flags = 0; u16 iomap_flags = 0;
unsigned int lockmode = XFS_ILOCK_SHARED; unsigned int lockmode = XFS_ILOCK_SHARED;
u64 seq;
ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO)); ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
@ -811,9 +849,10 @@ xfs_direct_write_iomap_begin(
goto out_unlock; goto out_unlock;
} }
seq = xfs_iomap_inode_sequence(ip, iomap_flags);
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
allocate_blocks: allocate_blocks:
error = -EAGAIN; error = -EAGAIN;
@ -839,24 +878,26 @@ allocate_blocks:
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
flags, &imap); flags, &imap, &seq);
if (error) if (error)
return error; return error;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
iomap_flags | IOMAP_F_NEW); iomap_flags | IOMAP_F_NEW, seq);
out_found_cow: out_found_cow:
xfs_iunlock(ip, lockmode);
length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount); length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
if (imap.br_startblock != HOLESTARTBLOCK) { if (imap.br_startblock != HOLESTARTBLOCK) {
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0); seq = xfs_iomap_inode_sequence(ip, 0);
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
if (error) if (error)
return error; goto out_unlock;
} }
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED); seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
xfs_iunlock(ip, lockmode);
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
out_unlock: out_unlock:
if (lockmode) if (lockmode)
@ -915,6 +956,7 @@ xfs_buffered_write_iomap_begin(
int allocfork = XFS_DATA_FORK; int allocfork = XFS_DATA_FORK;
int error = 0; int error = 0;
unsigned int lockmode = XFS_ILOCK_EXCL; unsigned int lockmode = XFS_ILOCK_EXCL;
u64 seq;
if (xfs_is_shutdown(mp)) if (xfs_is_shutdown(mp))
return -EIO; return -EIO;
@ -1094,26 +1136,31 @@ retry:
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail. * them out if the write happens to fail.
*/ */
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap); trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq);
found_imap: found_imap:
seq = xfs_iomap_inode_sequence(ip, 0);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
found_cow: found_cow:
xfs_iunlock(ip, XFS_ILOCK_EXCL); seq = xfs_iomap_inode_sequence(ip, 0);
if (imap.br_startoff <= offset_fsb) { if (imap.br_startoff <= offset_fsb) {
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0); error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
if (error) if (error)
return error; goto out_unlock;
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
IOMAP_F_SHARED); IOMAP_F_SHARED, seq);
} }
xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb); xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL);
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq);
out_unlock: out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
@ -1193,6 +1240,7 @@ xfs_read_iomap_begin(
int nimaps = 1, error = 0; int nimaps = 1, error = 0;
bool shared = false; bool shared = false;
unsigned int lockmode = XFS_ILOCK_SHARED; unsigned int lockmode = XFS_ILOCK_SHARED;
u64 seq;
ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO))); ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
@ -1206,13 +1254,14 @@ xfs_read_iomap_begin(
&nimaps, 0); &nimaps, 0);
if (!error && (flags & IOMAP_REPORT)) if (!error && (flags & IOMAP_REPORT))
error = xfs_reflink_trim_around_shared(ip, &imap, &shared); error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
if (error) if (error)
return error; return error;
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
shared ? IOMAP_F_SHARED : 0); shared ? IOMAP_F_SHARED : 0, seq);
} }
const struct iomap_ops xfs_read_iomap_ops = { const struct iomap_ops xfs_read_iomap_ops = {
@ -1237,6 +1286,7 @@ xfs_seek_iomap_begin(
struct xfs_bmbt_irec imap, cmap; struct xfs_bmbt_irec imap, cmap;
int error = 0; int error = 0;
unsigned lockmode; unsigned lockmode;
u64 seq;
if (xfs_is_shutdown(mp)) if (xfs_is_shutdown(mp))
return -EIO; return -EIO;
@ -1271,8 +1321,9 @@ xfs_seek_iomap_begin(
if (data_fsb < cow_fsb + cmap.br_blockcount) if (data_fsb < cow_fsb + cmap.br_blockcount)
end_fsb = min(end_fsb, data_fsb); end_fsb = min(end_fsb, data_fsb);
xfs_trim_extent(&cmap, offset_fsb, end_fsb); xfs_trim_extent(&cmap, offset_fsb, end_fsb);
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
IOMAP_F_SHARED); IOMAP_F_SHARED, seq);
/* /*
* This is a COW extent, so we must probe the page cache * This is a COW extent, so we must probe the page cache
* because there could be dirty page cache being backed * because there could be dirty page cache being backed
@ -1293,8 +1344,9 @@ xfs_seek_iomap_begin(
imap.br_startblock = HOLESTARTBLOCK; imap.br_startblock = HOLESTARTBLOCK;
imap.br_state = XFS_EXT_NORM; imap.br_state = XFS_EXT_NORM;
done: done:
seq = xfs_iomap_inode_sequence(ip, 0);
xfs_trim_extent(&imap, offset_fsb, end_fsb); xfs_trim_extent(&imap, offset_fsb, end_fsb);
error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0); error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
out_unlock: out_unlock:
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
return error; return error;
@ -1320,6 +1372,7 @@ xfs_xattr_iomap_begin(
struct xfs_bmbt_irec imap; struct xfs_bmbt_irec imap;
int nimaps = 1, error = 0; int nimaps = 1, error = 0;
unsigned lockmode; unsigned lockmode;
int seq;
if (xfs_is_shutdown(mp)) if (xfs_is_shutdown(mp))
return -EIO; return -EIO;
@ -1336,12 +1389,14 @@ xfs_xattr_iomap_begin(
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, XFS_BMAPI_ATTRFORK); &nimaps, XFS_BMAPI_ATTRFORK);
out_unlock: out_unlock:
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
if (error) if (error)
return error; return error;
ASSERT(nimaps); ASSERT(nimaps);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
} }
const struct iomap_ops xfs_xattr_iomap_ops = { const struct iomap_ops xfs_xattr_iomap_ops = {

View File

@ -13,14 +13,15 @@ struct xfs_bmbt_irec;
int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb, int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb,
xfs_fileoff_t count_fsb, unsigned int flags, xfs_fileoff_t count_fsb, unsigned int flags,
struct xfs_bmbt_irec *imap); struct xfs_bmbt_irec *imap, u64 *sequence);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip, xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
xfs_fileoff_t end_fsb); xfs_fileoff_t end_fsb);
u64 xfs_iomap_inode_sequence(struct xfs_inode *ip, u16 iomap_flags);
int xfs_bmbt_to_iomap(struct xfs_inode *ip, struct iomap *iomap, int xfs_bmbt_to_iomap(struct xfs_inode *ip, struct iomap *iomap,
struct xfs_bmbt_irec *imap, unsigned int mapping_flags, struct xfs_bmbt_irec *imap, unsigned int mapping_flags,
u16 iomap_flags); u16 iomap_flags, u64 sequence_cookie);
int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len, int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
bool *did_zero); bool *did_zero);

View File

@ -125,6 +125,7 @@ xfs_fs_map_blocks(
int nimaps = 1; int nimaps = 1;
uint lock_flags; uint lock_flags;
int error = 0; int error = 0;
u64 seq;
if (xfs_is_shutdown(mp)) if (xfs_is_shutdown(mp))
return -EIO; return -EIO;
@ -176,6 +177,7 @@ xfs_fs_map_blocks(
lock_flags = xfs_ilock_data_map_shared(ip); lock_flags = xfs_ilock_data_map_shared(ip);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
&imap, &nimaps, bmapi_flags); &imap, &nimaps, bmapi_flags);
seq = xfs_iomap_inode_sequence(ip, 0);
ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK); ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
@ -189,7 +191,7 @@ xfs_fs_map_blocks(
xfs_iunlock(ip, lock_flags); xfs_iunlock(ip, lock_flags);
error = xfs_iomap_write_direct(ip, offset_fsb, error = xfs_iomap_write_direct(ip, offset_fsb,
end_fsb - offset_fsb, 0, &imap); end_fsb - offset_fsb, 0, &imap, &seq);
if (error) if (error)
goto out_unlock; goto out_unlock;
@ -209,7 +211,7 @@ xfs_fs_map_blocks(
} }
xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_iunlock(ip, XFS_IOLOCK_EXCL);
error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0); error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0, seq);
*device_generation = mp->m_generation; *device_generation = mp->m_generation;
return error; return error;
out_unlock: out_unlock: