2018-06-06 10:42:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2013-08-12 18:49:42 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
2013-08-12 18:49:45 +08:00
|
|
|
* Copyright (c) 2012 Red Hat, Inc.
|
2013-08-12 18:49:42 +08:00
|
|
|
* All Rights Reserved.
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-23 07:36:05 +08:00
|
|
|
#include "xfs_shared.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2013-08-12 18:49:42 +08:00
|
|
|
#include "xfs_bit.h"
|
|
|
|
#include "xfs_mount.h"
|
2016-08-03 09:15:38 +08:00
|
|
|
#include "xfs_defer.h"
|
2013-08-12 18:49:42 +08:00
|
|
|
#include "xfs_inode.h"
|
|
|
|
#include "xfs_btree.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_trans.h"
|
2013-08-12 18:49:42 +08:00
|
|
|
#include "xfs_alloc.h"
|
|
|
|
#include "xfs_bmap.h"
|
|
|
|
#include "xfs_bmap_util.h"
|
2013-10-23 07:51:50 +08:00
|
|
|
#include "xfs_bmap_btree.h"
|
2013-08-12 18:49:42 +08:00
|
|
|
#include "xfs_rtalloc.h"
|
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_quota.h"
|
|
|
|
#include "xfs_trans_space.h"
|
|
|
|
#include "xfs_trace.h"
|
2013-08-12 18:49:45 +08:00
|
|
|
#include "xfs_icache.h"
|
2016-10-04 00:11:41 +08:00
|
|
|
#include "xfs_iomap.h"
|
|
|
|
#include "xfs_reflink.h"
|
2013-08-12 18:49:42 +08:00
|
|
|
|
|
|
|
/* Kernel only BMAP related definitions and functions */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert the given file system block to a disk block. We have to treat it
|
|
|
|
* differently based on whether the file is a real time file or not, because the
|
|
|
|
* bmap code does.
|
|
|
|
*/
|
|
|
|
xfs_daddr_t
|
|
|
|
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
|
|
|
|
{
|
2019-08-30 23:56:55 +08:00
|
|
|
if (XFS_IS_REALTIME_INODE(ip))
|
|
|
|
return XFS_FSB_TO_BB(ip->i_mount, fsb);
|
|
|
|
return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
|
|
|
|
2015-11-03 09:27:22 +08:00
|
|
|
/*
|
|
|
|
* Routine to zero an extent on disk allocated to the specific inode.
|
|
|
|
*
|
|
|
|
* The VFS functions take a linearised filesystem block offset, so we have to
|
|
|
|
* convert the sparse xfs fsb to the right format first.
|
|
|
|
* VFS types are real funky, too.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_zero_extent(
|
2019-10-25 13:25:38 +08:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_fsblock_t start_fsb,
|
|
|
|
xfs_off_t count_fsb)
|
2015-11-03 09:27:22 +08:00
|
|
|
{
|
2019-10-25 13:25:38 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
|
|
|
|
xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
|
|
|
|
sector_t block = XFS_BB_TO_FSBT(mp, sector);
|
2015-11-03 09:27:22 +08:00
|
|
|
|
2019-10-25 13:25:38 +08:00
|
|
|
return blkdev_issue_zeroout(target->bt_bdev,
|
2016-03-16 01:20:41 +08:00
|
|
|
block << (mp->m_super->s_blocksize_bits - 9),
|
|
|
|
count_fsb << (mp->m_super->s_blocksize_bits - 9),
|
2017-04-06 01:21:08 +08:00
|
|
|
GFP_NOFS, 0);
|
2015-11-03 09:27:22 +08:00
|
|
|
}
|
|
|
|
|
2017-10-10 02:37:22 +08:00
|
|
|
#ifdef CONFIG_XFS_RT
|
2013-08-12 18:49:42 +08:00
|
|
|
int
|
|
|
|
xfs_bmap_rtalloc(
|
2021-05-10 07:22:54 +08:00
|
|
|
struct xfs_bmalloca *ap)
|
2013-08-12 18:49:42 +08:00
|
|
|
{
|
2021-05-10 07:22:54 +08:00
|
|
|
struct xfs_mount *mp = ap->ip->i_mount;
|
|
|
|
xfs_fileoff_t orig_offset = ap->offset;
|
|
|
|
xfs_rtblock_t rtb;
|
|
|
|
xfs_extlen_t prod = 0; /* product factor for allocators */
|
|
|
|
xfs_extlen_t mod = 0; /* product factor for allocators */
|
|
|
|
xfs_extlen_t ralen = 0; /* realtime allocation length */
|
|
|
|
xfs_extlen_t align; /* minimum allocation alignment */
|
|
|
|
xfs_extlen_t orig_length = ap->length;
|
|
|
|
xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
|
|
|
|
xfs_extlen_t raminlen;
|
|
|
|
bool rtlocked = false;
|
2021-05-10 07:22:55 +08:00
|
|
|
bool ignore_locality = false;
|
2021-05-10 07:22:54 +08:00
|
|
|
int error;
|
|
|
|
|
2013-08-12 18:49:42 +08:00
|
|
|
align = xfs_get_extsz_hint(ap->ip);
|
2021-05-10 07:22:54 +08:00
|
|
|
retry:
|
2013-08-12 18:49:42 +08:00
|
|
|
prod = align / mp->m_sb.sb_rextsize;
|
|
|
|
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
|
|
|
|
align, 1, ap->eof, 0,
|
|
|
|
ap->conv, &ap->offset, &ap->length);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
ASSERT(ap->length);
|
|
|
|
ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
|
|
|
|
|
2021-05-10 07:22:54 +08:00
|
|
|
/*
|
|
|
|
* If we shifted the file offset downward to satisfy an extent size
|
|
|
|
* hint, increase minlen by that amount so that the allocator won't
|
|
|
|
* give us an allocation that's too short to cover at least one of the
|
|
|
|
* blocks that the caller asked for.
|
|
|
|
*/
|
|
|
|
if (ap->offset != orig_offset)
|
|
|
|
minlen += orig_offset - ap->offset;
|
|
|
|
|
2013-08-12 18:49:42 +08:00
|
|
|
/*
|
|
|
|
* If the offset & length are not perfectly aligned
|
|
|
|
* then kill prod, it will just get us in trouble.
|
|
|
|
*/
|
2018-06-09 00:54:22 +08:00
|
|
|
div_u64_rem(ap->offset, align, &mod);
|
|
|
|
if (mod || ap->length % align)
|
2013-08-12 18:49:42 +08:00
|
|
|
prod = 1;
|
|
|
|
/*
|
|
|
|
* Set ralen to be the actual requested length in rtextents.
|
|
|
|
*/
|
|
|
|
ralen = ap->length / mp->m_sb.sb_rextsize;
|
|
|
|
/*
|
2021-08-09 14:35:22 +08:00
|
|
|
* If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
|
2013-08-12 18:49:42 +08:00
|
|
|
* we rounded up to it, cut it back so it's valid again.
|
|
|
|
* Note that if it's a really large request (bigger than
|
2021-08-09 14:35:22 +08:00
|
|
|
* XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
|
2013-08-12 18:49:42 +08:00
|
|
|
* adjust the starting point to match it.
|
|
|
|
*/
|
2021-08-09 14:35:22 +08:00
|
|
|
if (ralen * mp->m_sb.sb_rextsize >= XFS_MAX_BMBT_EXTLEN)
|
|
|
|
ralen = XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
|
|
|
/*
|
2016-02-08 07:46:51 +08:00
|
|
|
* Lock out modifications to both the RT bitmap and summary inodes
|
2013-08-12 18:49:42 +08:00
|
|
|
*/
|
2021-05-10 07:22:54 +08:00
|
|
|
if (!rtlocked) {
|
|
|
|
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
|
|
|
|
xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
|
|
|
|
xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
|
|
|
|
rtlocked = true;
|
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it's an allocation to an empty file at offset 0,
|
|
|
|
* pick an extent that will space things out in the rt area.
|
|
|
|
*/
|
|
|
|
if (ap->eof && ap->offset == 0) {
|
treewide: Remove uninitialized_var() usage
Using uninitialized_var() is dangerous as it papers over real bugs[1]
(or can in the future), and suppresses unrelated compiler warnings
(e.g. "unused variable"). If the compiler thinks it is uninitialized,
either simply initialize the variable or make compiler changes.
In preparation for removing[2] the[3] macro[4], remove all remaining
needless uses with the following script:
git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \
xargs perl -pi -e \
's/\buninitialized_var\(([^\)]+)\)/\1/g;
s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;'
drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid
pathological white-space.
No outstanding warnings were found building allmodconfig with GCC 9.3.0
for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64,
alpha, and m68k.
[1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/
[2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/
[3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/
[4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/
Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5
Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB
Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers
Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs
Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-04 04:09:38 +08:00
|
|
|
xfs_rtblock_t rtx; /* realtime extent no */
|
2013-08-12 18:49:42 +08:00
|
|
|
|
|
|
|
error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
ap->blkno = rtx * mp->m_sb.sb_rextsize;
|
|
|
|
} else {
|
|
|
|
ap->blkno = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_bmap_adjacent(ap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Realtime allocation, done through xfs_rtallocate_extent.
|
|
|
|
*/
|
2021-05-10 07:22:55 +08:00
|
|
|
if (ignore_locality)
|
|
|
|
ap->blkno = 0;
|
|
|
|
else
|
|
|
|
do_div(ap->blkno, mp->m_sb.sb_rextsize);
|
2013-08-12 18:49:42 +08:00
|
|
|
rtb = ap->blkno;
|
|
|
|
ap->length = ralen;
|
2021-05-10 07:22:54 +08:00
|
|
|
raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
|
|
|
|
error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
|
|
|
|
&ralen, ap->wasdel, prod, &rtb);
|
2017-02-18 00:21:06 +08:00
|
|
|
if (error)
|
2013-08-12 18:49:42 +08:00
|
|
|
return error;
|
2017-02-18 00:21:06 +08:00
|
|
|
|
2021-05-10 07:22:54 +08:00
|
|
|
if (rtb != NULLRTBLOCK) {
|
|
|
|
ap->blkno = rtb * mp->m_sb.sb_rextsize;
|
|
|
|
ap->length = ralen * mp->m_sb.sb_rextsize;
|
|
|
|
ap->ip->i_nblocks += ap->length;
|
2013-08-12 18:49:42 +08:00
|
|
|
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
|
|
|
|
if (ap->wasdel)
|
2021-05-10 07:22:54 +08:00
|
|
|
ap->ip->i_delayed_blks -= ap->length;
|
2013-08-12 18:49:42 +08:00
|
|
|
/*
|
|
|
|
* Adjust the disk quota also. This was reserved
|
|
|
|
* earlier.
|
|
|
|
*/
|
|
|
|
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
|
|
|
|
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
|
2021-05-10 07:22:54 +08:00
|
|
|
XFS_TRANS_DQ_RTBCOUNT, ap->length);
|
|
|
|
return 0;
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
2021-05-10 07:22:54 +08:00
|
|
|
|
|
|
|
if (align > mp->m_sb.sb_rextsize) {
|
|
|
|
/*
|
|
|
|
* We previously enlarged the request length to try to satisfy
|
|
|
|
* an extent size hint. The allocator didn't return anything,
|
|
|
|
* so reset the parameters to the original values and try again
|
|
|
|
* without alignment criteria.
|
|
|
|
*/
|
|
|
|
ap->offset = orig_offset;
|
|
|
|
ap->length = orig_length;
|
|
|
|
minlen = align = mp->m_sb.sb_rextsize;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2021-05-10 07:22:55 +08:00
|
|
|
if (!ignore_locality && ap->blkno != 0) {
|
|
|
|
/*
|
|
|
|
* If we can't allocate near a specific rt extent, try again
|
|
|
|
* without locality criteria.
|
|
|
|
*/
|
|
|
|
ignore_locality = true;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2021-05-10 07:22:54 +08:00
|
|
|
ap->blkno = NULLFSBLOCK;
|
|
|
|
ap->length = 0;
|
2013-08-12 18:49:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2017-10-10 02:37:22 +08:00
|
|
|
#endif /* CONFIG_XFS_RT */
|
2013-08-12 18:49:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extent tree block counting routines.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2017-06-17 02:00:12 +08:00
|
|
|
* Count leaf blocks given a range of extent records. Delayed allocation
|
|
|
|
* extents are not counted towards the totals.
|
2013-08-12 18:49:42 +08:00
|
|
|
*/
|
2017-08-30 06:44:14 +08:00
|
|
|
xfs_extnum_t
|
2013-08-12 18:49:42 +08:00
|
|
|
xfs_bmap_count_leaves(
|
2017-06-17 02:00:12 +08:00
|
|
|
struct xfs_ifork *ifp,
|
2017-06-17 02:00:12 +08:00
|
|
|
xfs_filblks_t *count)
|
2013-08-12 18:49:42 +08:00
|
|
|
{
|
2017-11-04 01:34:43 +08:00
|
|
|
struct xfs_iext_cursor icur;
|
2017-08-30 06:44:14 +08:00
|
|
|
struct xfs_bmbt_irec got;
|
2017-11-04 01:34:43 +08:00
|
|
|
xfs_extnum_t numrecs = 0;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-11-04 01:34:43 +08:00
|
|
|
for_each_xfs_iext(ifp, &icur, &got) {
|
2017-08-30 06:44:14 +08:00
|
|
|
if (!isnullstartblock(got.br_startblock)) {
|
|
|
|
*count += got.br_blockcount;
|
|
|
|
numrecs++;
|
2017-06-17 02:00:12 +08:00
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
2017-11-04 01:34:43 +08:00
|
|
|
|
2017-08-30 06:44:14 +08:00
|
|
|
return numrecs;
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-06-17 02:00:12 +08:00
|
|
|
* Count fsblocks of the given fork. Delayed allocation extents are
|
|
|
|
* not counted towards the totals.
|
2013-08-12 18:49:42 +08:00
|
|
|
*/
|
2017-06-17 02:00:12 +08:00
|
|
|
int
|
2013-08-12 18:49:42 +08:00
|
|
|
xfs_bmap_count_blocks(
|
2017-06-17 02:00:12 +08:00
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int whichfork,
|
|
|
|
xfs_extnum_t *nextents,
|
|
|
|
xfs_filblks_t *count)
|
2013-08-12 18:49:42 +08:00
|
|
|
{
|
2019-10-29 07:12:35 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2022-07-10 01:56:05 +08:00
|
|
|
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
|
2019-10-29 07:12:35 +08:00
|
|
|
struct xfs_btree_cur *cur;
|
|
|
|
xfs_extlen_t btblocks = 0;
|
2017-06-17 02:00:12 +08:00
|
|
|
int error;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-06-17 02:00:12 +08:00
|
|
|
*nextents = 0;
|
|
|
|
*count = 0;
|
2019-10-29 07:12:35 +08:00
|
|
|
|
2017-06-17 02:00:12 +08:00
|
|
|
if (!ifp)
|
2013-08-12 18:49:42 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-05-19 01:28:05 +08:00
|
|
|
switch (ifp->if_format) {
|
2017-06-17 02:00:12 +08:00
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2021-04-14 02:15:09 +08:00
|
|
|
error = xfs_iread_extents(tp, ip, whichfork);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2017-06-17 02:00:12 +08:00
|
|
|
|
2019-10-29 07:12:35 +08:00
|
|
|
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
|
|
|
error = xfs_btree_count_blocks(cur, &btblocks);
|
|
|
|
xfs_btree_del_cursor(cur, error);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2017-06-17 02:00:12 +08:00
|
|
|
/*
|
2019-10-29 07:12:35 +08:00
|
|
|
* xfs_btree_count_blocks includes the root block contained in
|
|
|
|
* the inode fork in @btblocks, so subtract one because we're
|
|
|
|
* only interested in allocated disk blocks.
|
2017-06-17 02:00:12 +08:00
|
|
|
*/
|
2019-10-29 07:12:35 +08:00
|
|
|
*count += btblocks - 1;
|
2017-06-17 02:00:12 +08:00
|
|
|
|
2021-04-21 06:54:36 +08:00
|
|
|
fallthrough;
|
2019-10-29 07:12:35 +08:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
*nextents = xfs_bmap_count_leaves(ifp, count);
|
|
|
|
break;
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
static int
|
|
|
|
xfs_getbmap_report_one(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct getbmapx *bmv,
|
2017-10-18 05:16:19 +08:00
|
|
|
struct kgetbmap *out,
|
2017-10-18 05:16:18 +08:00
|
|
|
int64_t bmv_end,
|
|
|
|
struct xfs_bmbt_irec *got)
|
2016-10-04 00:11:41 +08:00
|
|
|
{
|
2017-10-18 05:16:19 +08:00
|
|
|
struct kgetbmap *p = out + bmv->bmv_entries;
|
2018-10-18 14:19:48 +08:00
|
|
|
bool shared = false;
|
2017-10-18 05:16:18 +08:00
|
|
|
int error;
|
2016-10-04 00:11:41 +08:00
|
|
|
|
2018-10-18 14:19:48 +08:00
|
|
|
error = xfs_reflink_trim_around_shared(ip, got, &shared);
|
2016-10-04 00:11:41 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
if (isnullstartblock(got->br_startblock) ||
|
|
|
|
got->br_startblock == DELAYSTARTBLOCK) {
|
2016-10-04 00:11:41 +08:00
|
|
|
/*
|
2023-04-12 13:49:44 +08:00
|
|
|
* Take the flush completion as being a point-in-time snapshot
|
|
|
|
* where there are no delalloc extents, and if any new ones
|
|
|
|
* have been created racily, just skip them as being 'after'
|
|
|
|
* the flush and so don't get reported.
|
2016-10-04 00:11:41 +08:00
|
|
|
*/
|
2023-04-12 13:49:44 +08:00
|
|
|
if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
|
|
|
|
return 0;
|
2017-10-18 05:16:18 +08:00
|
|
|
|
|
|
|
p->bmv_oflags |= BMV_OF_DELALLOC;
|
|
|
|
p->bmv_block = -2;
|
2016-10-04 00:11:41 +08:00
|
|
|
} else {
|
2017-10-18 05:16:18 +08:00
|
|
|
p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
|
2016-10-04 00:11:41 +08:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
if (got->br_state == XFS_EXT_UNWRITTEN &&
|
|
|
|
(bmv->bmv_iflags & BMV_IF_PREALLOC))
|
|
|
|
p->bmv_oflags |= BMV_OF_PREALLOC;
|
|
|
|
|
|
|
|
if (shared)
|
|
|
|
p->bmv_oflags |= BMV_OF_SHARED;
|
|
|
|
|
|
|
|
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
|
|
|
|
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
|
|
|
|
|
|
|
|
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
|
|
|
|
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
|
|
|
|
bmv->bmv_entries++;
|
2016-10-04 00:11:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
static void
|
|
|
|
xfs_getbmap_report_hole(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct getbmapx *bmv,
|
2017-10-18 05:16:19 +08:00
|
|
|
struct kgetbmap *out,
|
2017-10-18 05:16:18 +08:00
|
|
|
int64_t bmv_end,
|
|
|
|
xfs_fileoff_t bno,
|
|
|
|
xfs_fileoff_t end)
|
|
|
|
{
|
2017-10-18 05:16:19 +08:00
|
|
|
struct kgetbmap *p = out + bmv->bmv_entries;
|
2017-10-18 05:16:18 +08:00
|
|
|
|
|
|
|
if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
|
|
|
|
return;
|
|
|
|
|
|
|
|
p->bmv_block = -1;
|
|
|
|
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
|
|
|
|
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
|
|
|
|
|
|
|
|
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
|
|
|
|
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
|
|
|
|
bmv->bmv_entries++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
xfs_getbmap_full(
|
|
|
|
struct getbmapx *bmv)
|
|
|
|
{
|
|
|
|
return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
xfs_getbmap_next_rec(
|
|
|
|
struct xfs_bmbt_irec *rec,
|
|
|
|
xfs_fileoff_t total_end)
|
|
|
|
{
|
|
|
|
xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
|
|
|
|
|
|
|
|
if (end == total_end)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
rec->br_startoff += rec->br_blockcount;
|
|
|
|
if (!isnullstartblock(rec->br_startblock) &&
|
|
|
|
rec->br_startblock != DELAYSTARTBLOCK)
|
|
|
|
rec->br_startblock += rec->br_blockcount;
|
|
|
|
rec->br_blockcount = total_end - end;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:42 +08:00
|
|
|
/*
|
|
|
|
* Get inode's extents as described in bmv, and format for output.
|
|
|
|
* Calls formatter to fill the user's buffer until all extents
|
|
|
|
* are mapped, until the passed-in bmv->bmv_count slots have
|
|
|
|
* been filled, or until the formatter short-circuits the loop,
|
|
|
|
* if it is tracking filled-in extents on its own.
|
|
|
|
*/
|
|
|
|
int /* error code */
|
|
|
|
xfs_getbmap(
|
2017-10-18 05:16:19 +08:00
|
|
|
struct xfs_inode *ip,
|
2013-08-12 18:49:42 +08:00
|
|
|
struct getbmapx *bmv, /* user bmap structure */
|
2017-10-18 05:16:19 +08:00
|
|
|
struct kgetbmap *out)
|
2013-08-12 18:49:42 +08:00
|
|
|
{
|
2017-10-18 05:16:18 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
int iflags = bmv->bmv_iflags;
|
2017-10-18 05:16:19 +08:00
|
|
|
int whichfork, lock, error = 0;
|
2017-10-18 05:16:18 +08:00
|
|
|
int64_t bmv_end, max_len;
|
|
|
|
xfs_fileoff_t bno, first_bno;
|
|
|
|
struct xfs_ifork *ifp;
|
|
|
|
struct xfs_bmbt_irec got, rec;
|
|
|
|
xfs_filblks_t len;
|
2017-11-04 01:34:43 +08:00
|
|
|
struct xfs_iext_cursor icur;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:19 +08:00
|
|
|
if (bmv->bmv_iflags & ~BMV_IF_VALID)
|
|
|
|
return -EINVAL;
|
2016-10-04 00:11:41 +08:00
|
|
|
#ifndef DEBUG
|
|
|
|
/* Only allow CoW fork queries if we're debugging. */
|
|
|
|
if (iflags & BMV_IF_COWFORK)
|
|
|
|
return -EINVAL;
|
|
|
|
#endif
|
|
|
|
if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
if (bmv->bmv_length < -1)
|
|
|
|
return -EINVAL;
|
|
|
|
bmv->bmv_entries = 0;
|
|
|
|
if (bmv->bmv_length == 0)
|
|
|
|
return 0;
|
|
|
|
|
2016-10-04 00:11:41 +08:00
|
|
|
if (iflags & BMV_IF_ATTRFORK)
|
|
|
|
whichfork = XFS_ATTR_FORK;
|
|
|
|
else if (iflags & BMV_IF_COWFORK)
|
|
|
|
whichfork = XFS_COW_FORK;
|
|
|
|
else
|
|
|
|
whichfork = XFS_DATA_FORK;
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
xfs_ilock(ip, XFS_IOLOCK_SHARED);
|
2016-10-04 00:11:41 +08:00
|
|
|
switch (whichfork) {
|
|
|
|
case XFS_ATTR_FORK:
|
xfs: fix NULL pointer dereference in xfs_getbmap()
Reproducer:
1. fallocate -l 100M image
2. mkfs.xfs -f image
3. mount image /mnt
4. setxattr("/mnt", "trusted.overlay.upper", NULL, 0, XATTR_CREATE)
5. char arg[32] = "\x01\xff\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x08\x00\x00\x00\xc6\x2a\xf7";
fd = open("/mnt", O_RDONLY|O_DIRECTORY);
ioctl(fd, _IOC(_IOC_READ|_IOC_WRITE, 0x58, 0x2c, 0x20), arg);
NULL pointer dereference will occur when race happens between xfs_getbmap()
and xfs_bmap_set_attrforkoff():
ioctl | setxattr
----------------------------|---------------------------
xfs_getbmap |
xfs_ifork_ptr |
xfs_inode_has_attr_fork |
ip->i_forkoff == 0 |
return NULL |
ifp == NULL |
| xfs_bmap_set_attrforkoff
| ip->i_forkoff > 0
xfs_inode_has_attr_fork |
ip->i_forkoff > 0 |
ifp == NULL |
ifp->if_format |
Fix this by locking i_lock before xfs_ifork_ptr().
Fixes: abbf9e8a4507 ("xfs: rewrite getbmap using the xfs_iext_* helpers")
Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
Signed-off-by: Guo Xuenan <guoxuenan@huawei.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
[djwong: added fixes tag]
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2022-07-28 08:21:52 +08:00
|
|
|
lock = xfs_ilock_attr_map_shared(ip);
|
2022-07-10 01:56:06 +08:00
|
|
|
if (!xfs_inode_has_attr_fork(ip))
|
xfs: fix NULL pointer dereference in xfs_getbmap()
Reproducer:
1. fallocate -l 100M image
2. mkfs.xfs -f image
3. mount image /mnt
4. setxattr("/mnt", "trusted.overlay.upper", NULL, 0, XATTR_CREATE)
5. char arg[32] = "\x01\xff\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x08\x00\x00\x00\xc6\x2a\xf7";
fd = open("/mnt", O_RDONLY|O_DIRECTORY);
ioctl(fd, _IOC(_IOC_READ|_IOC_WRITE, 0x58, 0x2c, 0x20), arg);
NULL pointer dereference will occur when race happens between xfs_getbmap()
and xfs_bmap_set_attrforkoff():
ioctl | setxattr
----------------------------|---------------------------
xfs_getbmap |
xfs_ifork_ptr |
xfs_inode_has_attr_fork |
ip->i_forkoff == 0 |
return NULL |
ifp == NULL |
| xfs_bmap_set_attrforkoff
| ip->i_forkoff > 0
xfs_inode_has_attr_fork |
ip->i_forkoff > 0 |
ifp == NULL |
ifp->if_format |
Fix this by locking i_lock before xfs_ifork_ptr().
Fixes: abbf9e8a4507 ("xfs: rewrite getbmap using the xfs_iext_* helpers")
Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
Signed-off-by: Guo Xuenan <guoxuenan@huawei.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
[djwong: added fixes tag]
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2022-07-28 08:21:52 +08:00
|
|
|
goto out_unlock_ilock;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
max_len = 1LL << 32;
|
2016-10-04 00:11:41 +08:00
|
|
|
break;
|
|
|
|
case XFS_COW_FORK:
|
xfs: fix NULL pointer dereference in xfs_getbmap()
Reproducer:
1. fallocate -l 100M image
2. mkfs.xfs -f image
3. mount image /mnt
4. setxattr("/mnt", "trusted.overlay.upper", NULL, 0, XATTR_CREATE)
5. char arg[32] = "\x01\xff\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x08\x00\x00\x00\xc6\x2a\xf7";
fd = open("/mnt", O_RDONLY|O_DIRECTORY);
ioctl(fd, _IOC(_IOC_READ|_IOC_WRITE, 0x58, 0x2c, 0x20), arg);
NULL pointer dereference will occur when race happens between xfs_getbmap()
and xfs_bmap_set_attrforkoff():
ioctl | setxattr
----------------------------|---------------------------
xfs_getbmap |
xfs_ifork_ptr |
xfs_inode_has_attr_fork |
ip->i_forkoff == 0 |
return NULL |
ifp == NULL |
| xfs_bmap_set_attrforkoff
| ip->i_forkoff > 0
xfs_inode_has_attr_fork |
ip->i_forkoff > 0 |
ifp == NULL |
ifp->if_format |
Fix this by locking i_lock before xfs_ifork_ptr().
Fixes: abbf9e8a4507 ("xfs: rewrite getbmap using the xfs_iext_* helpers")
Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
Signed-off-by: Guo Xuenan <guoxuenan@huawei.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
[djwong: added fixes tag]
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2022-07-28 08:21:52 +08:00
|
|
|
lock = XFS_ILOCK_SHARED;
|
|
|
|
xfs_ilock(ip, lock);
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
/* No CoW fork? Just return */
|
xfs: fix NULL pointer dereference in xfs_getbmap()
Reproducer:
1. fallocate -l 100M image
2. mkfs.xfs -f image
3. mount image /mnt
4. setxattr("/mnt", "trusted.overlay.upper", NULL, 0, XATTR_CREATE)
5. char arg[32] = "\x01\xff\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x08\x00\x00\x00\xc6\x2a\xf7";
fd = open("/mnt", O_RDONLY|O_DIRECTORY);
ioctl(fd, _IOC(_IOC_READ|_IOC_WRITE, 0x58, 0x2c, 0x20), arg);
NULL pointer dereference will occur when race happens between xfs_getbmap()
and xfs_bmap_set_attrforkoff():
ioctl | setxattr
----------------------------|---------------------------
xfs_getbmap |
xfs_ifork_ptr |
xfs_inode_has_attr_fork |
ip->i_forkoff == 0 |
return NULL |
ifp == NULL |
| xfs_bmap_set_attrforkoff
| ip->i_forkoff > 0
xfs_inode_has_attr_fork |
ip->i_forkoff > 0 |
ifp == NULL |
ifp->if_format |
Fix this by locking i_lock before xfs_ifork_ptr().
Fixes: abbf9e8a4507 ("xfs: rewrite getbmap using the xfs_iext_* helpers")
Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
Signed-off-by: Guo Xuenan <guoxuenan@huawei.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
[djwong: added fixes tag]
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2022-07-28 08:21:52 +08:00
|
|
|
if (!xfs_ifork_ptr(ip, whichfork))
|
|
|
|
goto out_unlock_ilock;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
if (xfs_get_cowextsz_hint(ip))
|
|
|
|
max_len = mp->m_super->s_maxbytes;
|
|
|
|
else
|
|
|
|
max_len = XFS_ISIZE(ip);
|
|
|
|
break;
|
2016-10-04 00:11:41 +08:00
|
|
|
case XFS_DATA_FORK:
|
2013-12-18 18:14:39 +08:00
|
|
|
if (!(iflags & BMV_IF_DELALLOC) &&
|
2021-03-30 02:11:40 +08:00
|
|
|
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
|
2014-06-25 12:58:08 +08:00
|
|
|
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
|
2013-08-12 18:49:42 +08:00
|
|
|
if (error)
|
|
|
|
goto out_unlock_iolock;
|
2013-12-18 18:14:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Even after flushing the inode, there can still be
|
|
|
|
* delalloc blocks on the inode beyond EOF due to
|
|
|
|
* speculative preallocation. These are not removed
|
|
|
|
* until the release function is called or the inode
|
|
|
|
* is inactivated. Hence we cannot assert here that
|
|
|
|
* ip->i_delayed_blks == 0.
|
|
|
|
*/
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
if (xfs_get_extsz_hint(ip) ||
|
2021-03-30 02:11:44 +08:00
|
|
|
(ip->i_diflags &
|
2017-10-18 05:16:18 +08:00
|
|
|
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
|
|
|
|
max_len = mp->m_super->s_maxbytes;
|
|
|
|
else
|
|
|
|
max_len = XFS_ISIZE(ip);
|
|
|
|
|
2013-12-18 18:14:39 +08:00
|
|
|
lock = xfs_ilock_data_map_shared(ip);
|
2016-10-04 00:11:41 +08:00
|
|
|
break;
|
2013-12-18 18:14:39 +08:00
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
|
xfs: fix NULL pointer dereference in xfs_getbmap()
Reproducer:
1. fallocate -l 100M image
2. mkfs.xfs -f image
3. mount image /mnt
4. setxattr("/mnt", "trusted.overlay.upper", NULL, 0, XATTR_CREATE)
5. char arg[32] = "\x01\xff\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x08\x00\x00\x00\xc6\x2a\xf7";
fd = open("/mnt", O_RDONLY|O_DIRECTORY);
ioctl(fd, _IOC(_IOC_READ|_IOC_WRITE, 0x58, 0x2c, 0x20), arg);
NULL pointer dereference will occur when race happens between xfs_getbmap()
and xfs_bmap_set_attrforkoff():
ioctl | setxattr
----------------------------|---------------------------
xfs_getbmap |
xfs_ifork_ptr |
xfs_inode_has_attr_fork |
ip->i_forkoff == 0 |
return NULL |
ifp == NULL |
| xfs_bmap_set_attrforkoff
| ip->i_forkoff > 0
xfs_inode_has_attr_fork |
ip->i_forkoff > 0 |
ifp == NULL |
ifp->if_format |
Fix this by locking i_lock before xfs_ifork_ptr().
Fixes: abbf9e8a4507 ("xfs: rewrite getbmap using the xfs_iext_* helpers")
Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
Signed-off-by: Guo Xuenan <guoxuenan@huawei.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
[djwong: added fixes tag]
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2022-07-28 08:21:52 +08:00
|
|
|
ifp = xfs_ifork_ptr(ip, whichfork);
|
|
|
|
|
2020-05-19 01:28:05 +08:00
|
|
|
switch (ifp->if_format) {
|
2017-10-18 05:16:18 +08:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
|
|
|
/* Local format inode forks report no extents. */
|
2013-08-12 18:49:42 +08:00
|
|
|
goto out_unlock_ilock;
|
2017-10-18 05:16:18 +08:00
|
|
|
default:
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_unlock_ilock;
|
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
if (bmv->bmv_length == -1) {
|
|
|
|
max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
|
|
|
|
bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
bmv_end = bmv->bmv_offset + bmv->bmv_length;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
|
|
|
|
len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2021-04-14 02:15:09 +08:00
|
|
|
error = xfs_iread_extents(NULL, ip, whichfork);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock_ilock;
|
2016-10-04 00:11:41 +08:00
|
|
|
|
2017-11-04 01:34:43 +08:00
|
|
|
if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
|
2017-10-18 05:16:18 +08:00
|
|
|
/*
|
|
|
|
* Report a whole-file hole if the delalloc flag is set to
|
|
|
|
* stay compatible with the old implementation.
|
|
|
|
*/
|
|
|
|
if (iflags & BMV_IF_DELALLOC)
|
|
|
|
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
|
|
|
|
XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
|
|
|
|
goto out_unlock_ilock;
|
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
while (!xfs_getbmap_full(bmv)) {
|
|
|
|
xfs_trim_extent(&got, first_bno, len);
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
/*
|
|
|
|
* Report an entry for a hole if this extent doesn't directly
|
|
|
|
* follow the previous one.
|
|
|
|
*/
|
|
|
|
if (got.br_startoff > bno) {
|
|
|
|
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
|
|
|
|
got.br_startoff);
|
|
|
|
if (xfs_getbmap_full(bmv))
|
|
|
|
break;
|
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
/*
|
|
|
|
* In order to report shared extents accurately, we report each
|
|
|
|
* distinct shared / unshared part of a single bmbt record with
|
|
|
|
* an individual getbmapx record.
|
|
|
|
*/
|
|
|
|
bno = got.br_startoff + got.br_blockcount;
|
|
|
|
rec = got;
|
|
|
|
do {
|
|
|
|
error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
|
|
|
|
&rec);
|
|
|
|
if (error || xfs_getbmap_full(bmv))
|
|
|
|
goto out_unlock_ilock;
|
|
|
|
} while (xfs_getbmap_next_rec(&rec, bno));
|
|
|
|
|
2017-11-04 01:34:43 +08:00
|
|
|
if (!xfs_iext_next_extent(ifp, &icur, &got)) {
|
2017-10-18 05:16:18 +08:00
|
|
|
xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
|
|
|
|
|
2023-05-02 07:15:01 +08:00
|
|
|
if (bmv->bmv_entries > 0)
|
|
|
|
out[bmv->bmv_entries - 1].bmv_oflags |=
|
|
|
|
BMV_OF_LAST;
|
2017-10-18 05:16:18 +08:00
|
|
|
|
|
|
|
if (whichfork != XFS_ATTR_FORK && bno < end &&
|
|
|
|
!xfs_getbmap_full(bmv)) {
|
|
|
|
xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
|
|
|
|
bno, end);
|
2017-01-27 01:50:30 +08:00
|
|
|
}
|
2017-10-18 05:16:18 +08:00
|
|
|
break;
|
2013-08-12 18:49:42 +08:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:16:18 +08:00
|
|
|
if (bno >= first_bno + len)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock_ilock:
|
2013-12-07 04:30:08 +08:00
|
|
|
xfs_iunlock(ip, lock);
|
2017-10-18 05:16:18 +08:00
|
|
|
out_unlock_iolock:
|
2013-08-12 18:49:42 +08:00
|
|
|
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-06-22 14:24:38 +08:00
|
|
|
* Dead simple method of punching delalyed allocation blocks from a range in
|
|
|
|
* the inode. This will always punch out both the start and end blocks, even
|
|
|
|
* if the ranges only partially overlap them, so it is up to the caller to
|
|
|
|
* ensure that partial blocks are not passed in.
|
2013-08-12 18:49:42 +08:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_bmap_punch_delalloc_range(
|
|
|
|
struct xfs_inode *ip,
|
2022-11-29 06:09:17 +08:00
|
|
|
xfs_off_t start_byte,
|
|
|
|
xfs_off_t end_byte)
|
2013-08-12 18:49:42 +08:00
|
|
|
{
|
2022-11-29 06:09:17 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2018-06-22 14:24:38 +08:00
|
|
|
struct xfs_ifork *ifp = &ip->i_df;
|
2022-11-29 06:09:17 +08:00
|
|
|
xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, start_byte);
|
|
|
|
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte);
|
2018-06-22 14:24:38 +08:00
|
|
|
struct xfs_bmbt_irec got, del;
|
|
|
|
struct xfs_iext_cursor icur;
|
2013-08-12 18:49:42 +08:00
|
|
|
int error = 0;
|
|
|
|
|
2021-04-14 02:15:12 +08:00
|
|
|
ASSERT(!xfs_need_iread_extents(ifp));
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2018-09-29 11:47:46 +08:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2018-06-22 14:24:38 +08:00
|
|
|
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
|
2018-07-12 13:25:57 +08:00
|
|
|
goto out_unlock;
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2018-06-22 14:24:38 +08:00
|
|
|
while (got.br_startoff + got.br_blockcount > start_fsb) {
|
|
|
|
del = got;
|
2022-11-29 06:09:17 +08:00
|
|
|
xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
|
2013-08-12 18:49:42 +08:00
|
|
|
|
|
|
|
/*
|
2018-06-22 14:24:38 +08:00
|
|
|
* A delete can push the cursor forward. Step back to the
|
|
|
|
* previous extent on non-delalloc or extents outside the
|
|
|
|
* target range.
|
2013-08-12 18:49:42 +08:00
|
|
|
*/
|
2018-06-22 14:24:38 +08:00
|
|
|
if (!del.br_blockcount ||
|
|
|
|
!isnullstartblock(del.br_startblock)) {
|
|
|
|
if (!xfs_iext_prev_extent(ifp, &icur, &got))
|
|
|
|
break;
|
|
|
|
continue;
|
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2018-06-22 14:24:38 +08:00
|
|
|
error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
|
|
|
|
&got, &del);
|
|
|
|
if (error || !xfs_iext_get_extent(ifp, &icur, &got))
|
|
|
|
break;
|
|
|
|
}
|
2013-08-12 18:49:42 +08:00
|
|
|
|
2018-07-12 13:25:57 +08:00
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2013-08-12 18:49:42 +08:00
|
|
|
return error;
|
|
|
|
}
|
2013-08-12 18:49:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Test whether it is appropriate to check an inode for and free post EOF
|
2024-10-16 08:11:24 +08:00
|
|
|
* blocks.
|
2013-08-12 18:49:45 +08:00
|
|
|
*/
|
|
|
|
bool
|
2021-03-24 07:59:31 +08:00
|
|
|
xfs_can_free_eofblocks(
|
2024-10-16 08:11:24 +08:00
|
|
|
struct xfs_inode *ip)
|
2013-08-12 18:49:45 +08:00
|
|
|
{
|
2021-03-24 07:59:31 +08:00
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t end_fsb;
|
|
|
|
xfs_fileoff_t last_fsb;
|
|
|
|
int nimaps = 1;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller must either hold the exclusive io lock; or be inactivating
|
|
|
|
* the inode, which guarantees there are no other users of the inode.
|
|
|
|
*/
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
|
|
|
|
(VFS_I(ip)->i_state & I_FREEING));
|
|
|
|
|
2013-08-12 18:49:45 +08:00
|
|
|
/* prealloc/delalloc exists only on regular files */
|
2016-02-09 13:54:58 +08:00
|
|
|
if (!S_ISREG(VFS_I(ip)->i_mode))
|
2013-08-12 18:49:45 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero sized files with no cached pages and delalloc blocks will not
|
|
|
|
* have speculative prealloc/delalloc blocks to remove.
|
|
|
|
*/
|
|
|
|
if (VFS_I(ip)->i_size == 0 &&
|
2014-08-04 11:23:15 +08:00
|
|
|
VFS_I(ip)->i_mapping->nrpages == 0 &&
|
2013-08-12 18:49:45 +08:00
|
|
|
ip->i_delayed_blks == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* If we haven't read in the extent list, then don't do it now. */
|
2021-04-14 02:15:12 +08:00
|
|
|
if (xfs_need_iread_extents(&ip->i_df))
|
2013-08-12 18:49:45 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
2024-10-16 08:11:24 +08:00
|
|
|
* Only free real extents for inodes with persistent preallocations or
|
|
|
|
* the append-only flag.
|
2013-08-12 18:49:45 +08:00
|
|
|
*/
|
2021-03-30 02:11:44 +08:00
|
|
|
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
|
2024-10-16 08:11:24 +08:00
|
|
|
if (ip->i_delayed_blks == 0)
|
2013-08-12 18:49:45 +08:00
|
|
|
return false;
|
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
/*
|
|
|
|
* Do not try to free post-EOF blocks if EOF is beyond the end of the
|
|
|
|
* range supported by the page cache, because the truncation will loop
|
|
|
|
* forever.
|
|
|
|
*/
|
|
|
|
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
|
2022-06-26 01:47:45 +08:00
|
|
|
if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
|
|
|
|
end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize);
|
2021-03-24 07:59:31 +08:00
|
|
|
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
|
|
|
if (last_fsb <= end_fsb)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up the mapping for the first block past EOF. If we can't find
|
|
|
|
* it, there's nothing to free.
|
|
|
|
*/
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
|
|
|
error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
|
|
|
|
0);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
|
|
|
if (error || nimaps == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there's a real mapping there or there are delayed allocation
|
|
|
|
* reservations, then we have post-EOF blocks to try to free.
|
|
|
|
*/
|
|
|
|
return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
|
2013-08-12 18:49:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-12 01:50:05 +08:00
|
|
|
* This is called to free any blocks beyond eof. The caller must hold
|
|
|
|
* IOLOCK_EXCL unless we are in the inode reclaim path and have the only
|
|
|
|
* reference to the inode.
|
2013-08-12 18:49:45 +08:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_free_eofblocks(
|
2017-01-28 15:22:55 +08:00
|
|
|
struct xfs_inode *ip)
|
2013-08-12 18:49:45 +08:00
|
|
|
{
|
2017-01-28 15:22:55 +08:00
|
|
|
struct xfs_trans *tp;
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2021-03-24 07:59:31 +08:00
|
|
|
int error;
|
2017-01-28 15:22:55 +08:00
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
/* Attach the dquots to the inode up front. */
|
|
|
|
error = xfs_qm_dqattach(ip);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
/* Wait on dio to ensure i_size has settled. */
|
|
|
|
inode_dio_wait(VFS_I(ip));
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2024-10-16 08:11:24 +08:00
|
|
|
/*
|
|
|
|
* For preallocated files only free delayed allocations.
|
|
|
|
*
|
|
|
|
* Note that this means we also leave speculative preallocations in
|
|
|
|
* place for preallocated files.
|
|
|
|
*/
|
|
|
|
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
|
|
|
|
if (ip->i_delayed_blks) {
|
|
|
|
xfs_bmap_punch_delalloc_range(ip,
|
|
|
|
round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
|
|
|
|
LLONG_MAX);
|
|
|
|
}
|
|
|
|
xfs_inode_clear_eofblocks_tag(ip);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
|
|
|
|
if (error) {
|
2021-08-19 09:46:53 +08:00
|
|
|
ASSERT(xfs_is_shutdown(mp));
|
2021-03-24 07:59:31 +08:00
|
|
|
return error;
|
|
|
|
}
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2017-01-28 15:22:57 +08:00
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
/*
|
|
|
|
* Do not update the on-disk file size. If we update the on-disk file
|
|
|
|
* size and then the system crashes before the contents of the file are
|
|
|
|
* flushed to disk then the files may be full of holes (ie NULL files
|
|
|
|
* bug).
|
|
|
|
*/
|
|
|
|
error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
|
|
|
|
XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
|
|
|
|
if (error)
|
|
|
|
goto err_cancel;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
error = xfs_trans_commit(tp);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
xfs_inode_clear_eofblocks_tag(ip);
|
|
|
|
goto out_unlock;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2021-03-24 07:59:31 +08:00
|
|
|
err_cancel:
|
|
|
|
/*
|
|
|
|
* If we get an error at this point we simply don't
|
|
|
|
* bother truncating the file.
|
|
|
|
*/
|
|
|
|
xfs_trans_cancel(tp);
|
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2013-08-12 18:49:45 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2013-10-12 15:55:07 +08:00
|
|
|
int
|
2013-08-12 18:49:45 +08:00
|
|
|
xfs_alloc_file_space(
|
2013-10-12 15:55:07 +08:00
|
|
|
struct xfs_inode *ip,
|
2013-08-12 18:49:45 +08:00
|
|
|
xfs_off_t offset,
|
xfs: kill the XFS_IOC_{ALLOC,FREE}SP* ioctls
According to the glibc compat header for Irix 4, these ioctls originated
in April 1991 as a (somewhat clunky) way to preallocate space at the end
of a file on an EFS filesystem. XFS, which was released in Irix 5.3 in
December 1993, picked up these ioctls to maintain compatibility and they
were ported to Linux in the early 2000s.
Recently it was pointed out to me they still lurk in the kernel, even
though the Linux fallocate syscall supplanted the functionality a long
time ago. fstests doesn't seem to include any real functional or stress
tests for these ioctls, which means that the code quality is ... very
questionable. Most notably, it was a stale disk block exposure vector
for 21 years and nobody noticed or complained. As mature programmers
say, "If you're not testing it, it's broken."
Given all that, let's withdraw these ioctls from the XFS userspace API.
Normally we'd set a long deprecation process, but I estimate that there
aren't any real users, so let's trigger a warning in dmesg and return
-ENOTTY.
See: CVE-2021-4155
Augments: 983d8e60f508 ("xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate")
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-01-08 09:45:51 +08:00
|
|
|
xfs_off_t len)
|
2013-08-12 18:49:45 +08:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_off_t count;
|
|
|
|
xfs_filblks_t allocatesize_fsb;
|
|
|
|
xfs_extlen_t extsz, temp;
|
|
|
|
xfs_fileoff_t startoffset_fsb;
|
2019-10-01 02:29:44 +08:00
|
|
|
xfs_fileoff_t endoffset_fsb;
|
2013-08-12 18:49:45 +08:00
|
|
|
int rt;
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
xfs_bmbt_irec_t imaps[1], *imapp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
trace_xfs_alloc_file_space(ip);
|
|
|
|
|
2021-08-19 09:46:53 +08:00
|
|
|
if (xfs_is_shutdown(mp))
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EIO;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2018-05-05 06:30:21 +08:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2013-08-12 18:49:45 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (len <= 0)
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
|
|
|
rt = XFS_IS_REALTIME_INODE(ip);
|
|
|
|
extsz = xfs_get_extsz_hint(ip);
|
|
|
|
|
|
|
|
count = len;
|
|
|
|
imapp = &imaps[0];
|
|
|
|
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
|
2019-10-01 02:29:44 +08:00
|
|
|
endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
|
|
|
|
allocatesize_fsb = endoffset_fsb - startoffset_fsb;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate file space until done or until there is an error
|
|
|
|
*/
|
|
|
|
while (allocatesize_fsb && !error) {
|
|
|
|
xfs_fileoff_t s, e;
|
2021-01-27 08:44:07 +08:00
|
|
|
unsigned int dblocks, rblocks, resblks;
|
2024-02-09 07:20:42 +08:00
|
|
|
int nimaps = 1;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine space reservations for data/realtime.
|
|
|
|
*/
|
|
|
|
if (unlikely(extsz)) {
|
|
|
|
s = startoffset_fsb;
|
|
|
|
do_div(s, extsz);
|
|
|
|
s *= extsz;
|
|
|
|
e = startoffset_fsb + allocatesize_fsb;
|
2018-06-09 00:54:22 +08:00
|
|
|
div_u64_rem(startoffset_fsb, extsz, &temp);
|
|
|
|
if (temp)
|
2013-08-12 18:49:45 +08:00
|
|
|
e += temp;
|
2018-06-09 00:54:22 +08:00
|
|
|
div_u64_rem(e, extsz, &temp);
|
|
|
|
if (temp)
|
2013-08-12 18:49:45 +08:00
|
|
|
e += extsz - temp;
|
|
|
|
} else {
|
|
|
|
s = 0;
|
|
|
|
e = allocatesize_fsb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The transaction reservation is limited to a 32-bit block
|
|
|
|
* count, hence we need to limit the number of blocks we are
|
|
|
|
* trying to reserve to avoid an overflow. We can't allocate
|
|
|
|
* more than @nimaps extents, and an extent is limited on disk
|
2021-08-09 14:35:22 +08:00
|
|
|
* to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
|
|
|
|
* limit.
|
2013-08-12 18:49:45 +08:00
|
|
|
*/
|
2021-08-09 14:35:22 +08:00
|
|
|
resblks = min_t(xfs_fileoff_t, (e - s),
|
|
|
|
(XFS_MAX_BMBT_EXTLEN * nimaps));
|
2013-08-12 18:49:45 +08:00
|
|
|
if (unlikely(rt)) {
|
2021-01-27 09:20:42 +08:00
|
|
|
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
|
|
|
rblocks = resblks;
|
2013-08-12 18:49:45 +08:00
|
|
|
} else {
|
2021-01-27 09:20:42 +08:00
|
|
|
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
|
|
|
|
rblocks = 0;
|
2013-08-12 18:49:45 +08:00
|
|
|
}
|
|
|
|
|
2021-01-27 08:44:07 +08:00
|
|
|
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
|
|
|
|
dblocks, rblocks, false, &tp);
|
2013-08-12 18:49:45 +08:00
|
|
|
if (error)
|
2021-01-27 08:44:07 +08:00
|
|
|
break;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2021-01-23 08:48:11 +08:00
|
|
|
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
|
|
|
|
XFS_IEXT_ADD_NOSPLIT_CNT);
|
2022-03-09 15:49:36 +08:00
|
|
|
if (error == -EFBIG)
|
|
|
|
error = xfs_iext_count_upgrade(tp, ip,
|
|
|
|
XFS_IEXT_ADD_NOSPLIT_CNT);
|
2021-01-23 08:48:11 +08:00
|
|
|
if (error)
|
2021-01-27 09:23:30 +08:00
|
|
|
goto error;
|
2021-01-23 08:48:11 +08:00
|
|
|
|
2024-02-09 07:20:42 +08:00
|
|
|
/*
|
|
|
|
* If the allocator cannot find a single free extent large
|
|
|
|
* enough to cover the start block of the requested range,
|
2024-10-16 08:11:06 +08:00
|
|
|
* xfs_bmapi_write will return -ENOSR.
|
2024-02-09 07:20:42 +08:00
|
|
|
*
|
|
|
|
* In that case we simply need to keep looping with the same
|
|
|
|
* startoffset_fsb so that one of the following allocations
|
|
|
|
* will eventually reach the requested range.
|
|
|
|
*/
|
2024-10-16 08:11:06 +08:00
|
|
|
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
|
|
|
|
allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
|
|
|
|
&nimaps);
|
|
|
|
if (error) {
|
|
|
|
if (error != -ENOSR)
|
|
|
|
goto error;
|
|
|
|
error = 0;
|
|
|
|
} else {
|
2024-02-09 07:20:42 +08:00
|
|
|
startoffset_fsb += imapp->br_blockcount;
|
|
|
|
allocatesize_fsb -= imapp->br_blockcount;
|
2013-08-12 18:49:45 +08:00
|
|
|
}
|
2024-10-16 08:11:06 +08:00
|
|
|
|
|
|
|
ip->i_diflags |= XFS_DIFLAG_PREALLOC;
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
|
|
|
|
error = xfs_trans_commit(tp);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2013-08-12 18:49:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
2021-01-27 09:23:30 +08:00
|
|
|
error:
|
2015-06-04 11:47:56 +08:00
|
|
|
xfs_trans_cancel(tp);
|
2013-08-12 18:49:45 +08:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-06-21 08:00:55 +08:00
|
|
|
static int
|
|
|
|
xfs_unmap_extent(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_fileoff_t startoffset_fsb,
|
|
|
|
xfs_filblks_t len_fsb,
|
|
|
|
int *done)
|
2013-08-12 18:49:45 +08:00
|
|
|
{
|
2016-06-21 08:00:55 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
|
|
|
int error;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2021-01-27 08:44:07 +08:00
|
|
|
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
|
2021-01-27 08:33:29 +08:00
|
|
|
false, &tp);
|
2016-06-21 08:00:55 +08:00
|
|
|
if (error)
|
2021-01-27 08:33:29 +08:00
|
|
|
return error;
|
2013-12-07 04:30:12 +08:00
|
|
|
|
2021-01-23 08:48:11 +08:00
|
|
|
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
|
|
|
|
XFS_IEXT_PUNCH_HOLE_CNT);
|
2022-03-09 15:49:36 +08:00
|
|
|
if (error == -EFBIG)
|
|
|
|
error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
|
2021-01-23 08:48:11 +08:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
|
2018-07-12 13:26:25 +08:00
|
|
|
error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
|
2016-06-21 08:00:55 +08:00
|
|
|
if (error)
|
2018-07-25 04:43:13 +08:00
|
|
|
goto out_trans_cancel;
|
2013-12-07 04:30:12 +08:00
|
|
|
|
2016-06-21 08:00:55 +08:00
|
|
|
error = xfs_trans_commit(tp);
|
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return error;
|
2015-06-04 07:19:08 +08:00
|
|
|
|
2016-06-21 08:00:55 +08:00
|
|
|
out_trans_cancel:
|
|
|
|
xfs_trans_cancel(tp);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2015-06-04 07:19:08 +08:00
|
|
|
|
xfs: properly serialise fallocate against AIO+DIO
AIO+DIO can extend the file size on IO completion, and it holds
no inode locks while the IO is in flight. Therefore, a race
condition exists in file size updates if we do something like this:
aio-thread fallocate-thread
lock inode
submit IO beyond inode->i_size
unlock inode
.....
lock inode
break layouts
if (off + len > inode->i_size)
new_size = off + len
.....
inode_dio_wait()
<blocks>
.....
completes
inode->i_size updated
inode_dio_done()
....
<wakes>
<does stuff no long beyond EOF>
if (new_size)
xfs_vn_setattr(inode, new_size)
Yup, that attempt to extend the file size in the fallocate code
turns into a truncate - it removes the whatever the aio write
allocated and put to disk, and reduced the inode size back down to
where the fallocate operation ends.
Fundamentally, xfs_file_fallocate() not compatible with racing
AIO+DIO completions, so we need to move the inode_dio_wait() call
up to where the lock the inode and break the layouts.
Secondly, storing the inode size and then using it unchecked without
holding the ILOCK is not safe; we can only do such a thing if we've
locked out and drained all IO and other modification operations,
which we don't do initially in xfs_file_fallocate.
It should be noted that some of the fallocate operations are
compound operations - they are made up of multiple manipulations
that may zero data, and so we may need to flush and invalidate the
file multiple times during an operation. However, we only need to
lock out IO and other space manipulation operations once, as that
lockout is maintained until the entire fallocate operation has been
completed.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-10-30 04:04:32 +08:00
|
|
|
/* Caller must first wait for the completion of any pending DIOs if required. */
|
2018-11-20 05:31:10 +08:00
|
|
|
int
|
2016-06-21 08:00:55 +08:00
|
|
|
xfs_flush_unmap_range(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t offset,
|
|
|
|
xfs_off_t len)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
xfs_off_t rounding, start, end;
|
|
|
|
int error;
|
|
|
|
|
2021-06-01 02:31:56 +08:00
|
|
|
rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
|
2016-06-21 08:00:55 +08:00
|
|
|
start = round_down(offset, rounding);
|
|
|
|
end = round_up(offset + len, rounding) - 1;
|
|
|
|
|
|
|
|
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
truncate_pagecache_range(inode, start, end);
|
|
|
|
return 0;
|
2013-08-12 18:49:45 +08:00
|
|
|
}
|
|
|
|
|
2013-10-12 15:55:07 +08:00
|
|
|
int
|
2013-08-12 18:49:45 +08:00
|
|
|
xfs_free_file_space(
|
2013-10-12 15:55:07 +08:00
|
|
|
struct xfs_inode *ip,
|
2013-08-12 18:49:45 +08:00
|
|
|
xfs_off_t offset,
|
2013-10-12 15:55:06 +08:00
|
|
|
xfs_off_t len)
|
2013-08-12 18:49:45 +08:00
|
|
|
{
|
2016-06-21 08:00:55 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2013-08-12 18:49:45 +08:00
|
|
|
xfs_fileoff_t startoffset_fsb;
|
2016-06-21 08:00:55 +08:00
|
|
|
xfs_fileoff_t endoffset_fsb;
|
2016-06-21 08:02:23 +08:00
|
|
|
int done = 0, error;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
|
|
|
trace_xfs_free_file_space(ip);
|
|
|
|
|
2018-05-05 06:30:21 +08:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2013-08-12 18:49:45 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (len <= 0) /* if nothing being freed */
|
2016-06-21 08:00:55 +08:00
|
|
|
return 0;
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2016-06-21 08:00:55 +08:00
|
|
|
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
|
|
|
|
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
|
2013-08-12 18:49:45 +08:00
|
|
|
|
2020-09-10 05:21:06 +08:00
|
|
|
/* We can only free complete realtime extents. */
|
2020-10-10 07:42:59 +08:00
|
|
|
if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
|
|
|
|
startoffset_fsb = roundup_64(startoffset_fsb,
|
|
|
|
mp->m_sb.sb_rextsize);
|
|
|
|
endoffset_fsb = rounddown_64(endoffset_fsb,
|
|
|
|
mp->m_sb.sb_rextsize);
|
2020-09-10 05:21:06 +08:00
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:45 +08:00
|
|
|
/*
|
2018-10-18 14:18:58 +08:00
|
|
|
* Need to zero the stuff we're not freeing, on disk.
|
2013-08-12 18:49:45 +08:00
|
|
|
*/
|
2016-06-21 08:02:23 +08:00
|
|
|
if (endoffset_fsb > startoffset_fsb) {
|
|
|
|
while (!done) {
|
|
|
|
error = xfs_unmap_extent(ip, startoffset_fsb,
|
|
|
|
endoffset_fsb - startoffset_fsb, &done);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2013-08-12 18:49:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 08:02:23 +08:00
|
|
|
/*
|
|
|
|
* Now that we've unmap all full blocks we'll have to zero out any
|
2021-11-29 18:21:49 +08:00
|
|
|
* partial block at the beginning and/or end. xfs_zero_range is smart
|
2018-03-14 14:15:32 +08:00
|
|
|
* enough to skip any holes, including those we just created, but we
|
|
|
|
* must take care not to zero beyond EOF and enlarge i_size.
|
2016-06-21 08:02:23 +08:00
|
|
|
*/
|
2017-04-04 03:22:29 +08:00
|
|
|
if (offset >= XFS_ISIZE(ip))
|
|
|
|
return 0;
|
|
|
|
if (offset + len > XFS_ISIZE(ip))
|
|
|
|
len = XFS_ISIZE(ip) - offset;
|
2021-11-29 18:21:49 +08:00
|
|
|
error = xfs_zero_range(ip, offset, len, NULL);
|
2018-06-22 14:26:58 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we zeroed right up to EOF and EOF straddles a page boundary we
|
|
|
|
* must make sure that the post-EOF area is also zeroed because the
|
2021-11-29 18:21:49 +08:00
|
|
|
* page could be mmap'd and xfs_zero_range doesn't do that for us.
|
2018-06-22 14:26:58 +08:00
|
|
|
* Writeback of the eof page will do this, albeit clumsily.
|
|
|
|
*/
|
2018-11-28 03:01:43 +08:00
|
|
|
if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
|
2018-06-22 14:26:58 +08:00
|
|
|
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
|
2018-11-28 03:01:43 +08:00
|
|
|
round_down(offset + len, PAGE_SIZE), LLONG_MAX);
|
2018-06-22 14:26:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
2013-08-12 18:49:45 +08:00
|
|
|
}
|
|
|
|
|
2015-04-13 09:25:04 +08:00
|
|
|
static int
|
2017-10-20 02:07:10 +08:00
|
|
|
xfs_prepare_shift(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
loff_t offset)
|
2014-02-24 07:58:19 +08:00
|
|
|
{
|
2019-12-12 05:18:38 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2014-02-24 07:58:19 +08:00
|
|
|
int error;
|
|
|
|
|
2014-09-23 13:39:05 +08:00
|
|
|
/*
|
|
|
|
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
|
|
|
|
* into the accessible region of the file.
|
|
|
|
*/
|
2024-10-16 08:11:24 +08:00
|
|
|
if (xfs_can_free_eofblocks(ip)) {
|
2017-01-28 15:22:55 +08:00
|
|
|
error = xfs_free_eofblocks(ip);
|
2014-09-02 10:12:53 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
2014-09-02 10:12:53 +08:00
|
|
|
|
2019-12-12 05:18:38 +08:00
|
|
|
/*
|
|
|
|
* Shift operations must stabilize the start block offset boundary along
|
|
|
|
* with the full range of the operation. If we don't, a COW writeback
|
|
|
|
* completion could race with an insert, front merge with the start
|
|
|
|
* extent (after split) during the shift and corrupt the file. Start
|
|
|
|
* with the block just prior to the start to stabilize the boundary.
|
|
|
|
*/
|
2021-06-01 02:31:56 +08:00
|
|
|
offset = round_down(offset, mp->m_sb.sb_blocksize);
|
2019-12-12 05:18:38 +08:00
|
|
|
if (offset)
|
2021-06-01 02:31:56 +08:00
|
|
|
offset -= mp->m_sb.sb_blocksize;
|
2019-12-12 05:18:38 +08:00
|
|
|
|
2014-09-23 13:39:05 +08:00
|
|
|
/*
|
|
|
|
* Writeback and invalidate cache for the remainder of the file as we're
|
2015-03-25 12:08:56 +08:00
|
|
|
* about to shift down every extent from offset to EOF.
|
2014-09-23 13:39:05 +08:00
|
|
|
*/
|
2018-11-20 05:31:09 +08:00
|
|
|
error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
|
2019-04-26 22:30:24 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
2014-02-24 07:58:19 +08:00
|
|
|
|
2015-03-25 12:08:56 +08:00
|
|
|
/*
|
2017-09-19 00:41:17 +08:00
|
|
|
* Clean out anything hanging around in the cow fork now that
|
|
|
|
* we've flushed all the dirty data out to disk to avoid having
|
|
|
|
* CoW extents at the wrong offsets.
|
|
|
|
*/
|
2018-07-18 07:51:51 +08:00
|
|
|
if (xfs_inode_has_cow_data(ip)) {
|
2017-09-19 00:41:17 +08:00
|
|
|
error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
|
|
|
|
true);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2017-10-20 02:07:10 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* xfs_collapse_file_space()
|
|
|
|
* This routine frees disk space and shift extent for the given file.
|
|
|
|
* The first thing we do is to free data blocks in the specified range
|
|
|
|
* by calling xfs_free_file_space(). It would also sync dirty data
|
|
|
|
* and invalidate page cache over the region on which collapse range
|
|
|
|
* is working. And Shift extent records to the left to cover a hole.
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success
|
|
|
|
* errno on error
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_collapse_file_space(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t offset,
|
|
|
|
xfs_off_t len)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
int error;
|
|
|
|
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
|
|
|
|
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
|
2017-10-20 02:07:11 +08:00
|
|
|
bool done = false;
|
2017-10-20 02:07:10 +08:00
|
|
|
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
2017-10-24 07:32:38 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
|
|
|
|
|
2017-10-20 02:07:10 +08:00
|
|
|
trace_xfs_collapse_file_space(ip);
|
|
|
|
|
|
|
|
error = xfs_free_file_space(ip, offset, len);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error = xfs_prepare_shift(ip, offset);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2015-03-25 12:08:56 +08:00
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2014-02-24 07:58:19 +08:00
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2014-02-24 07:58:19 +08:00
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
while (!done) {
|
2017-10-20 02:07:11 +08:00
|
|
|
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
|
2018-07-12 13:26:27 +08:00
|
|
|
&done);
|
2014-02-24 07:58:19 +08:00
|
|
|
if (error)
|
2018-07-25 04:43:13 +08:00
|
|
|
goto out_trans_cancel;
|
2020-02-27 01:43:16 +08:00
|
|
|
if (done)
|
|
|
|
break;
|
2014-02-24 07:58:19 +08:00
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
/* finish any deferred frees and roll the transaction */
|
|
|
|
error = xfs_defer_finish(&tp);
|
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
2014-02-24 07:58:19 +08:00
|
|
|
}
|
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
error = xfs_trans_commit(tp);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2014-02-24 07:58:19 +08:00
|
|
|
return error;
|
|
|
|
|
2015-08-19 08:01:40 +08:00
|
|
|
out_trans_cancel:
|
2015-06-04 11:47:56 +08:00
|
|
|
xfs_trans_cancel(tp);
|
2020-02-27 01:43:16 +08:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2014-02-24 07:58:19 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2015-03-25 12:08:56 +08:00
|
|
|
/*
|
|
|
|
* xfs_insert_file_space()
|
|
|
|
* This routine create hole space by shifting extents for the given file.
|
|
|
|
* The first thing we do is to sync dirty data and invalidate page cache
|
|
|
|
* over the region on which insert range is working. And split an extent
|
|
|
|
* to two extents at given offset by calling xfs_bmap_split_extent.
|
|
|
|
* And shift all extent records which are laying between [offset,
|
|
|
|
* last allocated extent] to the right to reserve hole range.
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success
|
|
|
|
* errno on error
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_insert_file_space(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t len)
|
|
|
|
{
|
2017-10-20 02:07:10 +08:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
int error;
|
|
|
|
xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
|
|
|
|
xfs_fileoff_t next_fsb = NULLFSBLOCK;
|
|
|
|
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
|
2017-10-20 02:07:11 +08:00
|
|
|
bool done = false;
|
2017-10-20 02:07:10 +08:00
|
|
|
|
2015-03-25 12:08:56 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
2017-10-24 07:32:38 +08:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
|
|
|
|
|
2015-03-25 12:08:56 +08:00
|
|
|
trace_xfs_insert_file_space(ip);
|
|
|
|
|
2018-06-22 14:26:57 +08:00
|
|
|
error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2017-10-20 02:07:10 +08:00
|
|
|
error = xfs_prepare_shift(ip, offset);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2020-02-27 01:43:15 +08:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
|
|
|
|
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2020-02-27 01:43:16 +08:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2020-02-27 01:43:15 +08:00
|
|
|
|
2021-01-23 08:48:11 +08:00
|
|
|
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
|
|
|
|
XFS_IEXT_PUNCH_HOLE_CNT);
|
2022-03-09 15:49:36 +08:00
|
|
|
if (error == -EFBIG)
|
|
|
|
error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
|
2021-01-23 08:48:11 +08:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
/*
|
|
|
|
* The extent shifting code works on extent granularity. So, if stop_fsb
|
|
|
|
* is not the starting block of extent, we need to split the extent at
|
|
|
|
* stop_fsb.
|
|
|
|
*/
|
2020-02-27 01:43:15 +08:00
|
|
|
error = xfs_bmap_split_extent(tp, ip, stop_fsb);
|
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
do {
|
2020-08-18 23:05:58 +08:00
|
|
|
error = xfs_defer_finish(&tp);
|
2017-10-20 02:07:10 +08:00
|
|
|
if (error)
|
2020-02-27 01:43:16 +08:00
|
|
|
goto out_trans_cancel;
|
2017-10-20 02:07:10 +08:00
|
|
|
|
2017-10-20 02:07:11 +08:00
|
|
|
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
|
2018-07-12 13:26:27 +08:00
|
|
|
&done, stop_fsb);
|
2017-10-20 02:07:10 +08:00
|
|
|
if (error)
|
2018-07-25 04:43:13 +08:00
|
|
|
goto out_trans_cancel;
|
2020-02-27 01:43:16 +08:00
|
|
|
} while (!done);
|
2017-10-20 02:07:10 +08:00
|
|
|
|
2020-02-27 01:43:16 +08:00
|
|
|
error = xfs_trans_commit(tp);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2017-10-20 02:07:10 +08:00
|
|
|
return error;
|
|
|
|
|
2018-07-25 04:43:13 +08:00
|
|
|
out_trans_cancel:
|
2017-10-20 02:07:10 +08:00
|
|
|
xfs_trans_cancel(tp);
|
2020-02-27 01:43:16 +08:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2017-10-20 02:07:10 +08:00
|
|
|
return error;
|
2015-03-25 12:08:56 +08:00
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:48 +08:00
|
|
|
/*
|
|
|
|
* We need to check that the format of the data fork in the temporary inode is
|
|
|
|
* valid for the target inode before doing the swap. This is not a problem with
|
|
|
|
* attr1 because of the fixed fork offset, but attr2 has a dynamically sized
|
|
|
|
* data fork depending on the space the attribute fork is taking so we can get
|
|
|
|
* invalid formats on the target inode.
|
|
|
|
*
|
|
|
|
* E.g. target has space for 7 extents in extent format, temp inode only has
|
|
|
|
* space for 6. If we defragment down to 7 extents, then the tmp format is a
|
|
|
|
* btree, but when swapped it needs to be in extent format. Hence we can't just
|
|
|
|
* blindly swap data forks on attr2 filesystems.
|
|
|
|
*
|
|
|
|
* Note that we check the swap in both directions so that we don't end up with
|
|
|
|
* a corrupt temporary inode, either.
|
|
|
|
*
|
|
|
|
* Note that fixing the way xfs_fsr sets up the attribute fork in the source
|
|
|
|
* inode will prevent this situation from occurring, so all we do here is
|
|
|
|
* reject and log the attempt. basically we are putting the responsibility on
|
|
|
|
* userspace to get this right.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xfs_swap_extents_check_format(
|
2016-10-04 00:11:52 +08:00
|
|
|
struct xfs_inode *ip, /* target inode */
|
|
|
|
struct xfs_inode *tip) /* tmp inode */
|
2013-08-12 18:49:48 +08:00
|
|
|
{
|
2020-05-19 01:28:05 +08:00
|
|
|
struct xfs_ifork *ifp = &ip->i_df;
|
|
|
|
struct xfs_ifork *tifp = &tip->i_df;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
2020-05-13 07:42:51 +08:00
|
|
|
/* User/group/project quota ids must match if quotas are enforced. */
|
|
|
|
if (XFS_IS_QUOTA_ON(ip->i_mount) &&
|
|
|
|
(!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
|
|
|
|
!gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
|
2021-03-30 02:11:39 +08:00
|
|
|
ip->i_projid != tip->i_projid))
|
2020-05-13 07:42:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-08-12 18:49:48 +08:00
|
|
|
/* Should never get a local format */
|
2020-05-19 01:28:05 +08:00
|
|
|
if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
|
|
|
|
tifp->if_format == XFS_DINODE_FMT_LOCAL)
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if the target inode has less extents that then temporary inode then
|
|
|
|
* why did userspace call us?
|
|
|
|
*/
|
2020-05-19 01:28:05 +08:00
|
|
|
if (ifp->if_nextents < tifp->if_nextents)
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
/*
|
|
|
|
* If we have to use the (expensive) rmap swap method, we can
|
|
|
|
* handle any number of extents and any format.
|
|
|
|
*/
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_rmapbt(ip->i_mount))
|
2016-10-04 00:11:53 +08:00
|
|
|
return 0;
|
|
|
|
|
2013-08-12 18:49:48 +08:00
|
|
|
/*
|
|
|
|
* if the target inode is in extent form and the temp inode is in btree
|
|
|
|
* form then we will end up with the target inode in the wrong format
|
|
|
|
* as we already know there are less extents in the temp inode.
|
|
|
|
*/
|
2020-05-19 01:28:05 +08:00
|
|
|
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
|
|
|
|
tifp->if_format == XFS_DINODE_FMT_BTREE)
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
/* Check temp in extent form to max in target */
|
2020-05-19 01:28:05 +08:00
|
|
|
if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
|
|
|
|
tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
/* Check target in extent form to max in temp */
|
2020-05-19 01:28:05 +08:00
|
|
|
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
|
|
|
|
ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are in a btree format, check that the temp root block will fit
|
|
|
|
* in the target and that it has enough extents to be in btree format
|
|
|
|
* in the target.
|
|
|
|
*
|
|
|
|
* Note that we have to be careful to allow btree->extent conversions
|
|
|
|
* (a common defrag case) which will occur when the temp inode is in
|
|
|
|
* extent format...
|
|
|
|
*/
|
2020-05-19 01:28:05 +08:00
|
|
|
if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
|
2022-07-10 01:56:06 +08:00
|
|
|
if (xfs_inode_has_attr_fork(ip) &&
|
2022-07-10 01:56:07 +08:00
|
|
|
XFS_BMAP_BMDR_SPACE(tifp->if_broot) > xfs_inode_fork_boff(ip))
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2020-05-19 01:28:05 +08:00
|
|
|
if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reciprocal target->temp btree format checks */
|
2020-05-19 01:28:05 +08:00
|
|
|
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
|
2022-07-10 01:56:06 +08:00
|
|
|
if (xfs_inode_has_attr_fork(tip) &&
|
2022-07-10 01:56:07 +08:00
|
|
|
XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2020-05-19 01:28:05 +08:00
|
|
|
if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
|
2014-06-25 12:58:08 +08:00
|
|
|
return -EINVAL;
|
2013-08-12 18:49:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-23 14:20:11 +08:00
|
|
|
static int
|
2014-08-04 11:44:08 +08:00
|
|
|
xfs_swap_extent_flush(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
truncate_pagecache_range(VFS_I(ip), 0, -1);
|
|
|
|
|
|
|
|
/* Verify O_DIRECT for ftmp */
|
|
|
|
if (VFS_I(ip)->i_mapping->nrpages)
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
/*
|
|
|
|
* Move extents from one file to another, when rmap is enabled.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_swap_extent_rmap(
|
|
|
|
struct xfs_trans **tpp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_inode *tip)
|
|
|
|
{
|
2018-07-12 13:26:17 +08:00
|
|
|
struct xfs_trans *tp = *tpp;
|
2016-10-04 00:11:53 +08:00
|
|
|
struct xfs_bmbt_irec irec;
|
|
|
|
struct xfs_bmbt_irec uirec;
|
|
|
|
struct xfs_bmbt_irec tirec;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_fileoff_t end_fsb;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
int error;
|
|
|
|
xfs_filblks_t ilen;
|
|
|
|
xfs_filblks_t rlen;
|
|
|
|
int nimaps;
|
2017-06-17 02:00:05 +08:00
|
|
|
uint64_t tip_flags2;
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the source file has shared blocks, we must flag the donor
|
|
|
|
* file as having shared blocks so that we get the shared-block
|
|
|
|
* rmap functions when we go to fix up the rmaps. The flags
|
|
|
|
* will be switch for reals later.
|
|
|
|
*/
|
2021-03-30 02:11:45 +08:00
|
|
|
tip_flags2 = tip->i_diflags2;
|
|
|
|
if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
|
|
|
|
tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
offset_fsb = 0;
|
|
|
|
end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
|
|
|
|
count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
|
|
|
|
|
|
|
|
while (count_fsb) {
|
|
|
|
/* Read extent from the donor file */
|
|
|
|
nimaps = 1;
|
|
|
|
error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
|
|
|
|
&nimaps, 0);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
ASSERT(nimaps == 1);
|
|
|
|
ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
|
|
|
|
|
|
|
|
trace_xfs_swap_extent_rmap_remap(tip, &tirec);
|
|
|
|
ilen = tirec.br_blockcount;
|
|
|
|
|
|
|
|
/* Unmap the old blocks in the source file. */
|
|
|
|
while (tirec.br_blockcount) {
|
2023-02-11 01:11:06 +08:00
|
|
|
ASSERT(tp->t_highest_agno == NULLAGNUMBER);
|
2016-10-04 00:11:53 +08:00
|
|
|
trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
|
|
|
|
|
|
|
|
/* Read extent from the source file */
|
|
|
|
nimaps = 1;
|
|
|
|
error = xfs_bmapi_read(ip, tirec.br_startoff,
|
|
|
|
tirec.br_blockcount, &irec,
|
|
|
|
&nimaps, 0);
|
|
|
|
if (error)
|
2018-09-29 11:41:58 +08:00
|
|
|
goto out;
|
2016-10-04 00:11:53 +08:00
|
|
|
ASSERT(nimaps == 1);
|
|
|
|
ASSERT(tirec.br_startoff == irec.br_startoff);
|
|
|
|
trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
|
|
|
|
|
|
|
|
/* Trim the extent. */
|
|
|
|
uirec = tirec;
|
|
|
|
uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
|
|
|
|
tirec.br_blockcount,
|
|
|
|
irec.br_blockcount);
|
|
|
|
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
|
|
|
|
|
2021-01-23 08:48:15 +08:00
|
|
|
if (xfs_bmap_is_real_extent(&uirec)) {
|
|
|
|
error = xfs_iext_count_may_overflow(ip,
|
|
|
|
XFS_DATA_FORK,
|
|
|
|
XFS_IEXT_SWAP_RMAP_CNT);
|
2022-03-09 15:49:36 +08:00
|
|
|
if (error == -EFBIG)
|
|
|
|
error = xfs_iext_count_upgrade(tp, ip,
|
|
|
|
XFS_IEXT_SWAP_RMAP_CNT);
|
2021-01-23 08:48:15 +08:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xfs_bmap_is_real_extent(&irec)) {
|
|
|
|
error = xfs_iext_count_may_overflow(tip,
|
|
|
|
XFS_DATA_FORK,
|
|
|
|
XFS_IEXT_SWAP_RMAP_CNT);
|
2022-03-09 15:49:36 +08:00
|
|
|
if (error == -EFBIG)
|
|
|
|
error = xfs_iext_count_upgrade(tp, ip,
|
|
|
|
XFS_IEXT_SWAP_RMAP_CNT);
|
2021-01-23 08:48:15 +08:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
/* Remove the mapping from the donor file. */
|
2019-08-27 08:06:04 +08:00
|
|
|
xfs_bmap_unmap_extent(tp, tip, &uirec);
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
/* Remove the mapping from the source file. */
|
2019-08-27 08:06:04 +08:00
|
|
|
xfs_bmap_unmap_extent(tp, ip, &irec);
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
/* Map the donor file's blocks into the source file. */
|
2019-08-27 08:06:04 +08:00
|
|
|
xfs_bmap_map_extent(tp, ip, &uirec);
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
/* Map the source file's blocks into the donor file. */
|
2019-08-27 08:06:04 +08:00
|
|
|
xfs_bmap_map_extent(tp, tip, &irec);
|
2016-10-04 00:11:53 +08:00
|
|
|
|
2018-07-25 04:43:15 +08:00
|
|
|
error = xfs_defer_finish(tpp);
|
2018-07-12 13:26:17 +08:00
|
|
|
tp = *tpp;
|
2016-10-04 00:11:53 +08:00
|
|
|
if (error)
|
2018-08-01 22:20:33 +08:00
|
|
|
goto out;
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
tirec.br_startoff += rlen;
|
|
|
|
if (tirec.br_startblock != HOLESTARTBLOCK &&
|
|
|
|
tirec.br_startblock != DELAYSTARTBLOCK)
|
|
|
|
tirec.br_startblock += rlen;
|
|
|
|
tirec.br_blockcount -= rlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Roll on... */
|
|
|
|
count_fsb -= ilen;
|
|
|
|
offset_fsb += ilen;
|
|
|
|
}
|
|
|
|
|
2021-03-30 02:11:45 +08:00
|
|
|
tip->i_diflags2 = tip_flags2;
|
2016-10-04 00:11:53 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
|
2021-03-30 02:11:45 +08:00
|
|
|
tip->i_diflags2 = tip_flags2;
|
2016-10-04 00:11:53 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
/* Swap the extents of two files by swapping data forks. */
|
|
|
|
STATIC int
|
|
|
|
xfs_swap_extent_forks(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_inode *tip,
|
|
|
|
int *src_log_flags,
|
|
|
|
int *target_log_flags)
|
2013-08-12 18:49:48 +08:00
|
|
|
{
|
2017-06-17 02:00:12 +08:00
|
|
|
xfs_filblks_t aforkblks = 0;
|
|
|
|
xfs_filblks_t taforkblks = 0;
|
|
|
|
xfs_extnum_t junk;
|
2017-06-17 02:00:05 +08:00
|
|
|
uint64_t tmp;
|
2016-10-04 00:11:53 +08:00
|
|
|
int error;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Count the number of extended attribute blocks
|
|
|
|
*/
|
2022-07-10 01:56:06 +08:00
|
|
|
if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
|
xfs: make inode attribute forks a permanent part of struct xfs_inode
Syzkaller reported a UAF bug a while back:
==================================================================
BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127
Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958
CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted
5.15.0-0.30.3-20220406_1406 #3
Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29
04/01/2014
Call Trace:
<TASK>
__dump_stack lib/dump_stack.c:88 [inline]
dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106
print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256
__kasan_report mm/kasan/report.c:442 [inline]
kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459
xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127
xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159
xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36
__vfs_getxattr+0xdf/0x13d fs/xattr.c:399
cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300
security_inode_need_killpriv+0x4c/0x97 security/security.c:1408
dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912
dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908
do_truncate+0xc3/0x1e0 fs/open.c:56
handle_truncate fs/namei.c:3084 [inline]
do_open fs/namei.c:3432 [inline]
path_openat+0x30ab/0x396d fs/namei.c:3561
do_filp_open+0x1c4/0x290 fs/namei.c:3588
do_sys_openat2+0x60d/0x98c fs/open.c:1212
do_sys_open+0xcf/0x13c fs/open.c:1228
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
RIP: 0033:0x7f7ef4bb753d
Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48
89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73
01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48
RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055
RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d
RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0
RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e
R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0
</TASK>
Allocated by task 2953:
kasan_save_stack+0x19/0x38 mm/kasan/common.c:38
kasan_set_track mm/kasan/common.c:46 [inline]
set_alloc_info mm/kasan/common.c:434 [inline]
__kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467
kasan_slab_alloc include/linux/kasan.h:254 [inline]
slab_post_alloc_hook mm/slab.h:519 [inline]
slab_alloc_node mm/slub.c:3213 [inline]
slab_alloc mm/slub.c:3221 [inline]
kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226
kmem_cache_zalloc include/linux/slab.h:711 [inline]
xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287
xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098
xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746
xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59
__vfs_setxattr+0x11b/0x177 fs/xattr.c:180
__vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214
__vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275
vfs_setxattr+0x154/0x33d fs/xattr.c:301
setxattr+0x216/0x29f fs/xattr.c:575
__do_sys_fsetxattr fs/xattr.c:632 [inline]
__se_sys_fsetxattr fs/xattr.c:621 [inline]
__x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
Freed by task 2949:
kasan_save_stack+0x19/0x38 mm/kasan/common.c:38
kasan_set_track+0x1c/0x21 mm/kasan/common.c:46
kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360
____kasan_slab_free mm/kasan/common.c:366 [inline]
____kasan_slab_free mm/kasan/common.c:328 [inline]
__kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374
kasan_slab_free include/linux/kasan.h:230 [inline]
slab_free_hook mm/slub.c:1700 [inline]
slab_free_freelist_hook mm/slub.c:1726 [inline]
slab_free mm/slub.c:3492 [inline]
kmem_cache_free+0xdc/0x3ce mm/slub.c:3508
xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773
xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822
xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413
xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684
xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802
xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59
__vfs_removexattr+0x106/0x16a fs/xattr.c:468
cap_inode_killpriv+0x24/0x47 security/commoncap.c:324
security_inode_killpriv+0x54/0xa1 security/security.c:1414
setattr_prepare+0x1a6/0x897 fs/attr.c:146
xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682
xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065
xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093
notify_change+0xae5/0x10a1 fs/attr.c:410
do_truncate+0x134/0x1e0 fs/open.c:64
handle_truncate fs/namei.c:3084 [inline]
do_open fs/namei.c:3432 [inline]
path_openat+0x30ab/0x396d fs/namei.c:3561
do_filp_open+0x1c4/0x290 fs/namei.c:3588
do_sys_openat2+0x60d/0x98c fs/open.c:1212
do_sys_open+0xcf/0x13c fs/open.c:1228
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
The buggy address belongs to the object at ffff88802cec9188
which belongs to the cache xfs_ifork of size 40
The buggy address is located 20 bytes inside of
40-byte region [ffff88802cec9188, ffff88802cec91b0)
The buggy address belongs to the page:
page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000
index:0x0 pfn:0x2cec9
flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff)
raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80
raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb
ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc
>ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb
^
ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb
ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb
==================================================================
The root cause of this bug is the unlocked access to xfs_inode.i_afp
from the getxattr code paths while trying to determine which ILOCK mode
to use to stabilize the xattr data. Unfortunately, the VFS does not
acquire i_rwsem when vfs_getxattr (or listxattr) call into the
filesystem, which means that getxattr can race with a removexattr that's
tearing down the attr fork and crash:
xfs_attr_set: xfs_attr_get:
xfs_attr_fork_remove: xfs_ilock_attr_map_shared:
xfs_idestroy_fork(ip->i_afp);
kmem_cache_free(xfs_ifork_cache, ip->i_afp);
if (ip->i_afp &&
ip->i_afp = NULL;
xfs_need_iread_extents(ip->i_afp))
<KABOOM>
ip->i_forkoff = 0;
Regrettably, the VFS is much more lax about i_rwsem and getxattr than
is immediately obvious -- not only does it not guarantee that we hold
i_rwsem, it actually doesn't guarantee that we *don't* hold it either.
The getxattr system call won't acquire the lock before calling XFS, but
the file capabilities code calls getxattr with and without i_rwsem held
to determine if the "security.capabilities" xattr is set on the file.
Fixing the VFS locking requires a treewide investigation into every code
path that could touch an xattr and what i_rwsem state it expects or sets
up. That could take years or even prove impossible; fortunately, we
can fix this UAF problem inside XFS.
An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to
ensure that i_forkoff is always zeroed before i_afp is set to null and
changed the read paths to use smp_rmb before accessing i_forkoff and
i_afp, which avoided these UAF problems. However, the patch author was
too busy dealing with other problems in the meantime, and by the time he
came back to this issue, the situation had changed a bit.
On a modern system with selinux, each inode will always have at least
one xattr for the selinux label, so it doesn't make much sense to keep
incurring the extra pointer dereference. Furthermore, Allison's
upcoming parent pointer patchset will also cause nearly every inode in
the filesystem to have extended attributes. Therefore, make the inode
attribute fork structure part of struct xfs_inode, at a cost of 40 more
bytes.
This patch adds a clunky if_present field where necessary to maintain
the existing logic of xattr fork null pointer testing in the existing
codebase. The next patch switches the logic over to XFS_IFORK_Q and it
all goes away.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-10 01:56:06 +08:00
|
|
|
ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
|
2017-06-17 02:00:12 +08:00
|
|
|
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
|
2016-10-04 00:11:53 +08:00
|
|
|
&aforkblks);
|
2013-08-12 18:49:48 +08:00
|
|
|
if (error)
|
2016-10-04 00:11:53 +08:00
|
|
|
return error;
|
2013-08-12 18:49:48 +08:00
|
|
|
}
|
2022-07-10 01:56:06 +08:00
|
|
|
if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
|
xfs: make inode attribute forks a permanent part of struct xfs_inode
Syzkaller reported a UAF bug a while back:
==================================================================
BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127
Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958
CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted
5.15.0-0.30.3-20220406_1406 #3
Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29
04/01/2014
Call Trace:
<TASK>
__dump_stack lib/dump_stack.c:88 [inline]
dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106
print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256
__kasan_report mm/kasan/report.c:442 [inline]
kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459
xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127
xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159
xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36
__vfs_getxattr+0xdf/0x13d fs/xattr.c:399
cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300
security_inode_need_killpriv+0x4c/0x97 security/security.c:1408
dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912
dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908
do_truncate+0xc3/0x1e0 fs/open.c:56
handle_truncate fs/namei.c:3084 [inline]
do_open fs/namei.c:3432 [inline]
path_openat+0x30ab/0x396d fs/namei.c:3561
do_filp_open+0x1c4/0x290 fs/namei.c:3588
do_sys_openat2+0x60d/0x98c fs/open.c:1212
do_sys_open+0xcf/0x13c fs/open.c:1228
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
RIP: 0033:0x7f7ef4bb753d
Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48
89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73
01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48
RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055
RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d
RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0
RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e
R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0
</TASK>
Allocated by task 2953:
kasan_save_stack+0x19/0x38 mm/kasan/common.c:38
kasan_set_track mm/kasan/common.c:46 [inline]
set_alloc_info mm/kasan/common.c:434 [inline]
__kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467
kasan_slab_alloc include/linux/kasan.h:254 [inline]
slab_post_alloc_hook mm/slab.h:519 [inline]
slab_alloc_node mm/slub.c:3213 [inline]
slab_alloc mm/slub.c:3221 [inline]
kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226
kmem_cache_zalloc include/linux/slab.h:711 [inline]
xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287
xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098
xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746
xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59
__vfs_setxattr+0x11b/0x177 fs/xattr.c:180
__vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214
__vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275
vfs_setxattr+0x154/0x33d fs/xattr.c:301
setxattr+0x216/0x29f fs/xattr.c:575
__do_sys_fsetxattr fs/xattr.c:632 [inline]
__se_sys_fsetxattr fs/xattr.c:621 [inline]
__x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
Freed by task 2949:
kasan_save_stack+0x19/0x38 mm/kasan/common.c:38
kasan_set_track+0x1c/0x21 mm/kasan/common.c:46
kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360
____kasan_slab_free mm/kasan/common.c:366 [inline]
____kasan_slab_free mm/kasan/common.c:328 [inline]
__kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374
kasan_slab_free include/linux/kasan.h:230 [inline]
slab_free_hook mm/slub.c:1700 [inline]
slab_free_freelist_hook mm/slub.c:1726 [inline]
slab_free mm/slub.c:3492 [inline]
kmem_cache_free+0xdc/0x3ce mm/slub.c:3508
xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773
xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822
xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413
xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684
xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802
xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59
__vfs_removexattr+0x106/0x16a fs/xattr.c:468
cap_inode_killpriv+0x24/0x47 security/commoncap.c:324
security_inode_killpriv+0x54/0xa1 security/security.c:1414
setattr_prepare+0x1a6/0x897 fs/attr.c:146
xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682
xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065
xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093
notify_change+0xae5/0x10a1 fs/attr.c:410
do_truncate+0x134/0x1e0 fs/open.c:64
handle_truncate fs/namei.c:3084 [inline]
do_open fs/namei.c:3432 [inline]
path_openat+0x30ab/0x396d fs/namei.c:3561
do_filp_open+0x1c4/0x290 fs/namei.c:3588
do_sys_openat2+0x60d/0x98c fs/open.c:1212
do_sys_open+0xcf/0x13c fs/open.c:1228
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
The buggy address belongs to the object at ffff88802cec9188
which belongs to the cache xfs_ifork of size 40
The buggy address is located 20 bytes inside of
40-byte region [ffff88802cec9188, ffff88802cec91b0)
The buggy address belongs to the page:
page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000
index:0x0 pfn:0x2cec9
flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff)
raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80
raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb
ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc
>ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb
^
ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb
ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb
==================================================================
The root cause of this bug is the unlocked access to xfs_inode.i_afp
from the getxattr code paths while trying to determine which ILOCK mode
to use to stabilize the xattr data. Unfortunately, the VFS does not
acquire i_rwsem when vfs_getxattr (or listxattr) call into the
filesystem, which means that getxattr can race with a removexattr that's
tearing down the attr fork and crash:
xfs_attr_set: xfs_attr_get:
xfs_attr_fork_remove: xfs_ilock_attr_map_shared:
xfs_idestroy_fork(ip->i_afp);
kmem_cache_free(xfs_ifork_cache, ip->i_afp);
if (ip->i_afp &&
ip->i_afp = NULL;
xfs_need_iread_extents(ip->i_afp))
<KABOOM>
ip->i_forkoff = 0;
Regrettably, the VFS is much more lax about i_rwsem and getxattr than
is immediately obvious -- not only does it not guarantee that we hold
i_rwsem, it actually doesn't guarantee that we *don't* hold it either.
The getxattr system call won't acquire the lock before calling XFS, but
the file capabilities code calls getxattr with and without i_rwsem held
to determine if the "security.capabilities" xattr is set on the file.
Fixing the VFS locking requires a treewide investigation into every code
path that could touch an xattr and what i_rwsem state it expects or sets
up. That could take years or even prove impossible; fortunately, we
can fix this UAF problem inside XFS.
An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to
ensure that i_forkoff is always zeroed before i_afp is set to null and
changed the read paths to use smp_rmb before accessing i_forkoff and
i_afp, which avoided these UAF problems. However, the patch author was
too busy dealing with other problems in the meantime, and by the time he
came back to this issue, the situation had changed a bit.
On a modern system with selinux, each inode will always have at least
one xattr for the selinux label, so it doesn't make much sense to keep
incurring the extra pointer dereference. Furthermore, Allison's
upcoming parent pointer patchset will also cause nearly every inode in
the filesystem to have extended attributes. Therefore, make the inode
attribute fork structure part of struct xfs_inode, at a cost of 40 more
bytes.
This patch adds a clunky if_present field where necessary to maintain
the existing logic of xattr fork null pointer testing in the existing
codebase. The next patch switches the logic over to XFS_IFORK_Q and it
all goes away.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-10 01:56:06 +08:00
|
|
|
tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
|
2017-06-17 02:00:12 +08:00
|
|
|
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
|
2016-10-04 00:11:53 +08:00
|
|
|
&taforkblks);
|
2013-08-12 18:49:48 +08:00
|
|
|
if (error)
|
2016-10-04 00:11:53 +08:00
|
|
|
return error;
|
2013-08-12 18:49:48 +08:00
|
|
|
}
|
|
|
|
|
2013-08-30 08:23:44 +08:00
|
|
|
/*
|
2017-08-30 01:08:39 +08:00
|
|
|
* Btree format (v3) inodes have the inode number stamped in the bmbt
|
|
|
|
* block headers. We can't start changing the bmbt blocks until the
|
|
|
|
* inode owner change is logged so recovery does the right thing in the
|
|
|
|
* event of a crash. Set the owner change log flags now and leave the
|
|
|
|
* bmbt scan as the last step.
|
2013-08-30 08:23:44 +08:00
|
|
|
*/
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_v3inodes(ip->i_mount)) {
|
2020-05-19 01:28:05 +08:00
|
|
|
if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
|
2020-03-18 23:15:11 +08:00
|
|
|
(*target_log_flags) |= XFS_ILOG_DOWNER;
|
2020-05-19 01:28:05 +08:00
|
|
|
if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
|
2020-03-18 23:15:11 +08:00
|
|
|
(*src_log_flags) |= XFS_ILOG_DOWNER;
|
|
|
|
}
|
2013-08-30 08:23:44 +08:00
|
|
|
|
2013-08-12 18:49:48 +08:00
|
|
|
/*
|
|
|
|
* Swap the data forks of the inodes
|
|
|
|
*/
|
2018-07-12 13:26:38 +08:00
|
|
|
swap(ip->i_df, tip->i_df);
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fix the on-disk inode values
|
|
|
|
*/
|
2021-03-30 02:11:40 +08:00
|
|
|
tmp = (uint64_t)ip->i_nblocks;
|
|
|
|
ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
|
|
|
|
tip->i_nblocks = tmp + taforkblks - aforkblks;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The extents in the source inode could still contain speculative
|
|
|
|
* preallocation beyond EOF (e.g. the file is open but not modified
|
|
|
|
* while defrag is in progress). In that case, we need to copy over the
|
|
|
|
* number of delalloc blocks the data fork in the source inode is
|
|
|
|
* tracking beyond EOF so that when the fork is truncated away when the
|
|
|
|
* temporary inode is unlinked we don't underrun the i_delayed_blks
|
|
|
|
* counter on that inode.
|
|
|
|
*/
|
|
|
|
ASSERT(tip->i_delayed_blks == 0);
|
|
|
|
tip->i_delayed_blks = ip->i_delayed_blks;
|
|
|
|
ip->i_delayed_blks = 0;
|
|
|
|
|
2020-05-19 01:28:05 +08:00
|
|
|
switch (ip->i_df.if_format) {
|
2013-08-12 18:49:48 +08:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2016-10-04 00:11:53 +08:00
|
|
|
(*src_log_flags) |= XFS_ILOG_DEXT;
|
2013-08-12 18:49:48 +08:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2021-08-19 09:46:37 +08:00
|
|
|
ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
|
2016-10-04 00:11:53 +08:00
|
|
|
(*src_log_flags & XFS_ILOG_DOWNER));
|
|
|
|
(*src_log_flags) |= XFS_ILOG_DBROOT;
|
2013-08-12 18:49:48 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-05-19 01:28:05 +08:00
|
|
|
switch (tip->i_df.if_format) {
|
2013-08-12 18:49:48 +08:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2016-10-04 00:11:53 +08:00
|
|
|
(*target_log_flags) |= XFS_ILOG_DEXT;
|
2013-08-12 18:49:48 +08:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2016-10-04 00:11:53 +08:00
|
|
|
(*target_log_flags) |= XFS_ILOG_DBROOT;
|
2021-08-19 09:46:37 +08:00
|
|
|
ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
|
2016-10-04 00:11:53 +08:00
|
|
|
(*target_log_flags & XFS_ILOG_DOWNER));
|
2013-08-12 18:49:48 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-30 01:08:40 +08:00
|
|
|
/*
|
|
|
|
* Fix up the owners of the bmbt blocks to refer to the current inode. The
|
|
|
|
* change owner scan attempts to order all modified buffers in the current
|
|
|
|
* transaction. In the event of ordered buffer failure, the offending buffer is
|
|
|
|
* physically logged as a fallback and the scan returns -EAGAIN. We must roll
|
|
|
|
* the transaction in this case to replenish the fallback log reservation and
|
|
|
|
* restart the scan. This process repeats until the scan completes.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xfs_swap_change_owner(
|
|
|
|
struct xfs_trans **tpp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_inode *tmpip)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct xfs_trans *tp = *tpp;
|
|
|
|
|
|
|
|
do {
|
|
|
|
error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
|
|
|
|
NULL);
|
|
|
|
/* success or fatal error */
|
|
|
|
if (error != -EAGAIN)
|
|
|
|
break;
|
|
|
|
|
|
|
|
error = xfs_trans_roll(tpp);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
tp = *tpp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Redirty both inodes so they can relog and keep the log tail
|
|
|
|
* moving forward.
|
|
|
|
*/
|
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
|
|
|
xfs_trans_ijoin(tp, tmpip, 0);
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
|
|
|
|
} while (true);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
int
|
|
|
|
xfs_swap_extents(
|
|
|
|
struct xfs_inode *ip, /* target inode */
|
|
|
|
struct xfs_inode *tip, /* tmp inode */
|
|
|
|
struct xfs_swapext *sxp)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
struct xfs_bstat *sbp = &sxp->sx_stat;
|
|
|
|
int src_log_flags, target_log_flags;
|
|
|
|
int error = 0;
|
2017-06-17 02:00:05 +08:00
|
|
|
uint64_t f;
|
2017-08-30 01:08:40 +08:00
|
|
|
int resblks = 0;
|
2020-06-30 05:44:36 +08:00
|
|
|
unsigned int flags = 0;
|
2023-07-06 03:01:47 +08:00
|
|
|
struct timespec64 ctime;
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the inodes against other IO, page faults and truncate to
|
|
|
|
* begin with. Then we can ensure the inodes are flushed and have no
|
|
|
|
* page cache safely. Once we have done this we can take the ilocks and
|
|
|
|
* do the rest of the checks.
|
|
|
|
*/
|
2016-11-30 11:33:25 +08:00
|
|
|
lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
|
2021-05-24 19:17:49 +08:00
|
|
|
filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
|
|
|
|
VFS_I(tip)->i_mapping);
|
2016-10-04 00:11:53 +08:00
|
|
|
|
|
|
|
/* Verify that both files have the same format */
|
|
|
|
if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify both files are either real-time or non-realtime */
|
|
|
|
if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2019-11-10 04:04:30 +08:00
|
|
|
error = xfs_qm_dqattach(ip);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
error = xfs_qm_dqattach(tip);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
error = xfs_swap_extent_flush(ip);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
error = xfs_swap_extent_flush(tip);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2018-10-18 14:21:55 +08:00
|
|
|
if (xfs_inode_has_cow_data(tip)) {
|
|
|
|
error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
|
|
|
|
if (error)
|
2020-05-05 05:06:27 +08:00
|
|
|
goto out_unlock;
|
2018-10-18 14:21:55 +08:00
|
|
|
}
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
/*
|
|
|
|
* Extent "swapping" with rmap requires a permanent reservation and
|
|
|
|
* a block reservation because it's really just a remap operation
|
|
|
|
* performed with log redo items!
|
|
|
|
*/
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_rmapbt(mp)) {
|
2020-05-19 01:27:22 +08:00
|
|
|
int w = XFS_DATA_FORK;
|
|
|
|
uint32_t ipnext = ip->i_df.if_nextents;
|
|
|
|
uint32_t tipnext = tip->i_df.if_nextents;
|
2018-03-10 06:01:58 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Conceptually this shouldn't affect the shape of either bmbt,
|
|
|
|
* but since we atomically move extents one by one, we reserve
|
|
|
|
* enough space to rebuild both trees.
|
|
|
|
*/
|
|
|
|
resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
|
|
|
|
resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
|
|
|
|
|
2016-10-04 00:11:53 +08:00
|
|
|
/*
|
2020-06-30 05:44:36 +08:00
|
|
|
* If either inode straddles a bmapbt block allocation boundary,
|
|
|
|
* the rmapbt algorithm triggers repeated allocs and frees as
|
|
|
|
* extents are remapped. This can exhaust the block reservation
|
|
|
|
* prematurely and cause shutdown. Return freed blocks to the
|
|
|
|
* transaction reservation to counter this behavior.
|
2016-10-04 00:11:53 +08:00
|
|
|
*/
|
2020-06-30 05:44:36 +08:00
|
|
|
flags |= XFS_TRANS_RES_FDBLKS;
|
2017-08-30 01:08:40 +08:00
|
|
|
}
|
2020-06-30 05:44:36 +08:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
|
|
|
|
&tp);
|
2016-10-04 00:11:53 +08:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock and join the inodes to the tansaction so that transaction commit
|
|
|
|
* or cancel will unlock the inodes from this point onwards.
|
|
|
|
*/
|
2018-01-27 07:27:33 +08:00
|
|
|
xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
|
2016-10-04 00:11:53 +08:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
|
|
|
xfs_trans_ijoin(tp, tip, 0);
|
|
|
|
|
|
|
|
|
|
|
|
/* Verify all data are being swapped */
|
|
|
|
if (sxp->sx_offset != 0 ||
|
2021-03-30 02:11:40 +08:00
|
|
|
sxp->sx_length != ip->i_disk_size ||
|
|
|
|
sxp->sx_length != tip->i_disk_size) {
|
2016-10-04 00:11:53 +08:00
|
|
|
error = -EFAULT;
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_xfs_swap_extent_before(ip, 0);
|
|
|
|
trace_xfs_swap_extent_before(tip, 1);
|
|
|
|
|
|
|
|
/* check inode formats now that data is flushed */
|
|
|
|
error = xfs_swap_extents_check_format(ip, tip);
|
|
|
|
if (error) {
|
|
|
|
xfs_notice(mp,
|
|
|
|
"%s: inode 0x%llx format is incompatible for exchanging.",
|
|
|
|
__func__, ip->i_ino);
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compare the current change & modify times with that
|
|
|
|
* passed in. If they differ, we abort this swap.
|
|
|
|
* This is the mechanism used to ensure the calling
|
|
|
|
* process that the file was not changed out from
|
|
|
|
* under it.
|
|
|
|
*/
|
2023-07-06 03:01:47 +08:00
|
|
|
ctime = inode_get_ctime(VFS_I(ip));
|
|
|
|
if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
|
|
|
|
(sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
|
2016-10-04 00:11:53 +08:00
|
|
|
(sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
|
|
|
|
(sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
|
|
|
|
error = -EBUSY;
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note the trickiness in setting the log flags - we set the owner log
|
|
|
|
* flag on the opposite inode (i.e. the inode we are setting the new
|
|
|
|
* owner to be) because once we swap the forks and log that, log
|
|
|
|
* recovery is going to see the fork as owned by the swapped inode,
|
|
|
|
* not the pre-swapped inodes.
|
|
|
|
*/
|
|
|
|
src_log_flags = XFS_ILOG_CORE;
|
|
|
|
target_log_flags = XFS_ILOG_CORE;
|
|
|
|
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_rmapbt(mp))
|
2016-10-04 00:11:53 +08:00
|
|
|
error = xfs_swap_extent_rmap(&tp, ip, tip);
|
|
|
|
else
|
|
|
|
error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
|
|
|
|
&target_log_flags);
|
2016-10-04 00:11:53 +08:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
|
2016-10-04 00:11:42 +08:00
|
|
|
/* Do we have to swap reflink flags? */
|
2021-03-30 02:11:45 +08:00
|
|
|
if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
|
|
|
|
(tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
|
|
|
|
f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
|
|
|
|
ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
|
|
|
|
ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
|
|
|
|
tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
|
|
|
|
tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
|
2017-09-19 00:41:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Swap the cow forks. */
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_reflink(mp)) {
|
2020-05-19 01:28:05 +08:00
|
|
|
ASSERT(!ip->i_cowfp ||
|
|
|
|
ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
|
|
|
|
ASSERT(!tip->i_cowfp ||
|
|
|
|
tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
|
2017-09-19 00:41:18 +08:00
|
|
|
|
2018-07-12 13:26:38 +08:00
|
|
|
swap(ip->i_cowfp, tip->i_cowfp);
|
2017-09-19 00:41:18 +08:00
|
|
|
|
2018-03-14 14:15:30 +08:00
|
|
|
if (ip->i_cowfp && ip->i_cowfp->if_bytes)
|
2017-09-19 00:41:18 +08:00
|
|
|
xfs_inode_set_cowblocks_tag(ip);
|
|
|
|
else
|
|
|
|
xfs_inode_clear_cowblocks_tag(ip);
|
2018-03-14 14:15:30 +08:00
|
|
|
if (tip->i_cowfp && tip->i_cowfp->if_bytes)
|
2017-09-19 00:41:18 +08:00
|
|
|
xfs_inode_set_cowblocks_tag(tip);
|
|
|
|
else
|
|
|
|
xfs_inode_clear_cowblocks_tag(tip);
|
2016-10-04 00:11:42 +08:00
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:48 +08:00
|
|
|
xfs_trans_log_inode(tp, ip, src_log_flags);
|
|
|
|
xfs_trans_log_inode(tp, tip, target_log_flags);
|
|
|
|
|
2017-08-30 01:08:39 +08:00
|
|
|
/*
|
|
|
|
* The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
|
|
|
|
* have inode number owner values in the bmbt blocks that still refer to
|
|
|
|
* the old inode. Scan each bmbt to fix up the owner values with the
|
|
|
|
* inode number of the current inode.
|
|
|
|
*/
|
|
|
|
if (src_log_flags & XFS_ILOG_DOWNER) {
|
2017-08-30 01:08:40 +08:00
|
|
|
error = xfs_swap_change_owner(&tp, ip, tip);
|
2017-08-30 01:08:39 +08:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
if (target_log_flags & XFS_ILOG_DOWNER) {
|
2017-08-30 01:08:40 +08:00
|
|
|
error = xfs_swap_change_owner(&tp, tip, ip);
|
2017-08-30 01:08:39 +08:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:48 +08:00
|
|
|
/*
|
|
|
|
* If this is a synchronous mount, make sure that the
|
|
|
|
* transaction goes to disk before returning to the user.
|
|
|
|
*/
|
2021-08-19 09:46:52 +08:00
|
|
|
if (xfs_has_wsync(mp))
|
2013-08-12 18:49:48 +08:00
|
|
|
xfs_trans_set_sync(tp);
|
|
|
|
|
2015-06-04 11:48:08 +08:00
|
|
|
error = xfs_trans_commit(tp);
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
trace_xfs_swap_extent_after(ip, 0);
|
|
|
|
trace_xfs_swap_extent_after(tip, 1);
|
|
|
|
|
2021-05-24 19:17:49 +08:00
|
|
|
out_unlock_ilock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_iunlock(tip, XFS_ILOCK_EXCL);
|
2016-11-30 11:33:25 +08:00
|
|
|
out_unlock:
|
2021-05-24 19:17:49 +08:00
|
|
|
filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
|
|
|
|
VFS_I(tip)->i_mapping);
|
2016-11-30 11:33:25 +08:00
|
|
|
unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
|
2016-10-04 00:11:53 +08:00
|
|
|
return error;
|
2013-08-12 18:49:48 +08:00
|
|
|
|
|
|
|
out_trans_cancel:
|
2015-06-04 11:47:56 +08:00
|
|
|
xfs_trans_cancel(tp);
|
2021-05-24 19:17:49 +08:00
|
|
|
goto out_unlock_ilock;
|
2013-08-12 18:49:48 +08:00
|
|
|
}
|