OpenCloudOS-Kernel/fs/xfs/libxfs/xfs_inode_buf.c

666 lines
17 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_icache.h"
#include "xfs_trans.h"
#include "xfs_ialloc.h"
#include "xfs_dir2.h"
#include <linux/iversion.h>
/*
* If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence
* has not had the inode cores stamped into it. Hence for readahead, the buffer
* may be potentially invalid.
*
xfs: inode recovery readahead can race with inode buffer creation When we do inode readahead in log recovery, we do can do the readahead before we've replayed the icreate transaction that stamps the buffer with inode cores. The inode readahead verifier catches this and marks the buffer as !done to indicate that it doesn't yet contain valid inodes. In adding buffer error notification (i.e. setting b_error = -EIO at the same time as as we clear the done flag) to such a readahead verifier failure, we can then get subsequent inode recovery failing with this error: XFS (dm-0): metadata I/O error: block 0xa00060 ("xlog_recover_do..(read#2)") error 5 numblks 32 This occurs when readahead completion races with icreate item replay such as: inode readahead find buffer lock buffer submit RA io .... icreate recovery xfs_trans_get_buffer find buffer lock buffer <blocks on RA completion> ..... <ra completion> fails verifier clear XBF_DONE set bp->b_error = -EIO release and unlock buffer <icreate gains lock> icreate initialises buffer marks buffer as done adds buffer to delayed write queue releases buffer At this point, we have an initialised inode buffer that is up to date but has an -EIO state registered against it. When we finally get to recovering an inode in that buffer: inode item recovery xfs_trans_read_buffer find buffer lock buffer sees XBF_DONE is set, returns buffer sees bp->b_error is set fail log recovery! Essentially, we need xfs_trans_get_buf_map() to clear the error status of the buffer when doing a lookup. This function returns uninitialised buffers, so the buffer returned can not be in an error state and none of the code that uses this function expects b_error to be set on return. Indeed, there is an ASSERT(!bp->b_error); in the transaction case in xfs_trans_get_buf_map() that would have caught this if log recovery used transactions.... This patch firstly changes the inode readahead failure to set -EIO on the buffer, and secondly changes xfs_buf_get_map() to never return a buffer with an error state set so this first change doesn't cause unexpected log recovery failures. cc: <stable@vger.kernel.org> # 3.12 - current Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-01-12 04:03:44 +08:00
* If the readahead buffer is invalid, we need to mark it with an error and
* clear the DONE status of the buffer so that a followup read will re-read it
* from disk. We don't report the error otherwise to avoid warnings during log
* recovery and we don't get unnecessary panics on debug kernels. We use EIO here
xfs: inode recovery readahead can race with inode buffer creation When we do inode readahead in log recovery, we do can do the readahead before we've replayed the icreate transaction that stamps the buffer with inode cores. The inode readahead verifier catches this and marks the buffer as !done to indicate that it doesn't yet contain valid inodes. In adding buffer error notification (i.e. setting b_error = -EIO at the same time as as we clear the done flag) to such a readahead verifier failure, we can then get subsequent inode recovery failing with this error: XFS (dm-0): metadata I/O error: block 0xa00060 ("xlog_recover_do..(read#2)") error 5 numblks 32 This occurs when readahead completion races with icreate item replay such as: inode readahead find buffer lock buffer submit RA io .... icreate recovery xfs_trans_get_buffer find buffer lock buffer <blocks on RA completion> ..... <ra completion> fails verifier clear XBF_DONE set bp->b_error = -EIO release and unlock buffer <icreate gains lock> icreate initialises buffer marks buffer as done adds buffer to delayed write queue releases buffer At this point, we have an initialised inode buffer that is up to date but has an -EIO state registered against it. When we finally get to recovering an inode in that buffer: inode item recovery xfs_trans_read_buffer find buffer lock buffer sees XBF_DONE is set, returns buffer sees bp->b_error is set fail log recovery! Essentially, we need xfs_trans_get_buf_map() to clear the error status of the buffer when doing a lookup. This function returns uninitialised buffers, so the buffer returned can not be in an error state and none of the code that uses this function expects b_error to be set on return. Indeed, there is an ASSERT(!bp->b_error); in the transaction case in xfs_trans_get_buf_map() that would have caught this if log recovery used transactions.... This patch firstly changes the inode readahead failure to set -EIO on the buffer, and secondly changes xfs_buf_get_map() to never return a buffer with an error state set so this first change doesn't cause unexpected log recovery failures. cc: <stable@vger.kernel.org> # 3.12 - current Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-01-12 04:03:44 +08:00
* because all we want to do is say readahead failed; there is no-one to report
* the error to, so this will distinguish it from a non-ra verifier failure.
* Changes to this readahead error behaviour also need to be reflected in
xfs: handle dquot buffer readahead in log recovery correctly When we do dquot readahead in log recovery, we do not use a verifier as the underlying buffer may not have dquots in it. e.g. the allocation operation hasn't yet been replayed. Hence we do not want to fail recovery because we detect an operation to be replayed has not been run yet. This problem was addressed for inodes in commit d891400 ("xfs: inode buffers may not be valid during recovery readahead") but the problem was not recognised to exist for dquots and their buffers as the dquot readahead did not have a verifier. The result of not using a verifier is that when the buffer is then next read to replay a dquot modification, the dquot buffer verifier will only be attached to the buffer if *readahead is not complete*. Hence we can read the buffer, replay the dquot changes and then add it to the delwri submission list without it having a verifier attached to it. This then generates warnings in xfs_buf_ioapply(), which catches and warns about this case. Fix this and make it handle the same readahead verifier error cases as for inode buffers by adding a new readahead verifier that has a write operation as well as a read operation that marks the buffer as not done if any corruption is detected. Also make sure we don't run readahead if the dquot buffer has been marked as cancelled by recovery. This will result in readahead either succeeding and the buffer having a valid write verifier, or readahead failing and the buffer state requiring the subsequent read to resubmit the IO with the new verifier. In either case, this will result in the buffer always ending up with a valid write verifier on it. Note: we also need to fix the inode buffer readahead error handling to mark the buffer with EIO. Brian noticed the code I copied from there wrong during review, so fix it at the same time. Add comments linking the two functions that handle readahead verifier errors together so we don't forget this behavioural link in future. cc: <stable@vger.kernel.org> # 3.12 - current Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-01-12 04:04:01 +08:00
* xfs_dquot_buf_readahead_verify().
*/
static void
xfs_inode_buf_verify(
struct xfs_buf *bp,
bool readahead)
{
struct xfs_mount *mp = bp->b_mount;
xfs_agnumber_t agno;
int i;
int ni;
/*
* Validate the magic number and version of every inode in the buffer
*/
agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
for (i = 0; i < ni; i++) {
int di_ok;
xfs_dinode_t *dip;
xfs_agino_t unlinked_ino;
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
xfs_verify_agino_or_null(mp, agno, unlinked_ino);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP))) {
if (readahead) {
bp->b_flags &= ~XBF_DONE;
xfs: inode recovery readahead can race with inode buffer creation When we do inode readahead in log recovery, we do can do the readahead before we've replayed the icreate transaction that stamps the buffer with inode cores. The inode readahead verifier catches this and marks the buffer as !done to indicate that it doesn't yet contain valid inodes. In adding buffer error notification (i.e. setting b_error = -EIO at the same time as as we clear the done flag) to such a readahead verifier failure, we can then get subsequent inode recovery failing with this error: XFS (dm-0): metadata I/O error: block 0xa00060 ("xlog_recover_do..(read#2)") error 5 numblks 32 This occurs when readahead completion races with icreate item replay such as: inode readahead find buffer lock buffer submit RA io .... icreate recovery xfs_trans_get_buffer find buffer lock buffer <blocks on RA completion> ..... <ra completion> fails verifier clear XBF_DONE set bp->b_error = -EIO release and unlock buffer <icreate gains lock> icreate initialises buffer marks buffer as done adds buffer to delayed write queue releases buffer At this point, we have an initialised inode buffer that is up to date but has an -EIO state registered against it. When we finally get to recovering an inode in that buffer: inode item recovery xfs_trans_read_buffer find buffer lock buffer sees XBF_DONE is set, returns buffer sees bp->b_error is set fail log recovery! Essentially, we need xfs_trans_get_buf_map() to clear the error status of the buffer when doing a lookup. This function returns uninitialised buffers, so the buffer returned can not be in an error state and none of the code that uses this function expects b_error to be set on return. Indeed, there is an ASSERT(!bp->b_error); in the transaction case in xfs_trans_get_buf_map() that would have caught this if log recovery used transactions.... This patch firstly changes the inode readahead failure to set -EIO on the buffer, and secondly changes xfs_buf_get_map() to never return a buffer with an error state set so this first change doesn't cause unexpected log recovery failures. cc: <stable@vger.kernel.org> # 3.12 - current Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-01-12 04:03:44 +08:00
xfs_buf_ioerror(bp, -EIO);
return;
}
#ifdef DEBUG
xfs_alert(mp,
"bad inode magic/vsn daddr %lld #%d (magic=%x)",
(unsigned long long)bp->b_bn, i,
be16_to_cpu(dip->di_magic));
#endif
xfs_buf_verifier_error(bp, -EFSCORRUPTED,
__func__, dip, sizeof(*dip),
NULL);
return;
}
}
}
static void
xfs_inode_buf_read_verify(
struct xfs_buf *bp)
{
xfs_inode_buf_verify(bp, false);
}
static void
xfs_inode_buf_readahead_verify(
struct xfs_buf *bp)
{
xfs_inode_buf_verify(bp, true);
}
static void
xfs_inode_buf_write_verify(
struct xfs_buf *bp)
{
xfs_inode_buf_verify(bp, false);
}
const struct xfs_buf_ops xfs_inode_buf_ops = {
.name = "xfs_inode",
.magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
cpu_to_be16(XFS_DINODE_MAGIC) },
.verify_read = xfs_inode_buf_read_verify,
.verify_write = xfs_inode_buf_write_verify,
};
const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
.name = "xfs_inode_ra",
.magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
cpu_to_be16(XFS_DINODE_MAGIC) },
.verify_read = xfs_inode_buf_readahead_verify,
.verify_write = xfs_inode_buf_write_verify,
};
/*
* This routine is called to map an inode to the buffer containing the on-disk
* version of the inode. It returns a pointer to the buffer containing the
* on-disk inode in the bpp parameter, and in the dipp parameter it returns a
* pointer to the on-disk inode within that buffer.
*
* If a non-zero error is returned, then the contents of bpp and dipp are
* undefined.
*/
int
xfs_imap_to_bp(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_imap *imap,
struct xfs_dinode **dipp,
struct xfs_buf **bpp,
uint buf_flags)
{
struct xfs_buf *bp;
int error;
buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
(int)imap->im_len, buf_flags, &bp,
&xfs_inode_buf_ops);
if (error) {
ASSERT(error != -EAGAIN || (buf_flags & XBF_TRYLOCK));
return error;
}
*bpp = bp;
xfs: pin inode backing buffer to the inode log item When we dirty an inode, we are going to have to write it disk at some point in the near future. This requires the inode cluster backing buffer to be present in memory. Unfortunately, under severe memory pressure we can reclaim the inode backing buffer while the inode is dirty in memory, resulting in stalling the AIL pushing because it has to do a read-modify-write cycle on the cluster buffer. When we have no memory available, the read of the cluster buffer blocks the AIL pushing process, and this causes all sorts of issues for memory reclaim as it requires inode writeback to make forwards progress. Allocating a cluster buffer causes more memory pressure, and results in more cluster buffers to be reclaimed, resulting in more RMW cycles to be done in the AIL context and everything then backs up on AIL progress. Only the synchronous inode cluster writeback in the the inode reclaim code provides some level of forwards progress guarantees that prevent OOM-killer rampages in this situation. Fix this by pinning the inode backing buffer to the inode log item when the inode is first dirtied (i.e. in xfs_trans_log_inode()). This may mean the first modification of an inode that has been held in cache for a long time may block on a cluster buffer read, but we can do that in transaction context and block safely until the buffer has been allocated and read. Once we have the cluster buffer, the inode log item takes a reference to it, pinning it in memory, and attaches it to the log item for future reference. This means we can always grab the cluster buffer from the inode log item when we need it. When the inode is finally cleaned and removed from the AIL, we can drop the reference the inode log item holds on the cluster buffer. Once all inodes on the cluster buffer are clean, the cluster buffer will be unpinned and it will be available for memory reclaim to reclaim again. This avoids the issues with needing to do RMW cycles in the AIL pushing context, and hence allows complete non-blocking inode flushing to be performed by the AIL pushing context. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-30 05:49:15 +08:00
if (dipp)
*dipp = xfs_buf_offset(bp, imap->im_boffset);
return 0;
}
/* Convert an ondisk timestamp to an incore timestamp. */
struct timespec64
xfs_inode_from_disk_ts(
const xfs_timestamp_t ts)
{
struct timespec64 tv;
struct xfs_legacy_timestamp *lts;
lts = (struct xfs_legacy_timestamp *)&ts;
tv.tv_sec = (int)be32_to_cpu(lts->t_sec);
tv.tv_nsec = (int)be32_to_cpu(lts->t_nsec);
return tv;
}
int
xfs_inode_from_disk(
struct xfs_inode *ip,
struct xfs_dinode *from)
{
struct xfs_icdinode *to = &ip->i_d;
struct inode *inode = VFS_I(ip);
int error;
xfs_failaddr_t fa;
ASSERT(ip->i_cowfp == NULL);
ASSERT(ip->i_afp == NULL);
fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
if (fa) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
sizeof(*from), fa);
return -EFSCORRUPTED;
}
/*
* First get the permanent information that is needed to allocate an
* inode. If the inode is unused, mode is zero and we shouldn't mess
* with the uninitialized part of it.
*/
to->di_flushiter = be16_to_cpu(from->di_flushiter);
inode->i_generation = be32_to_cpu(from->di_gen);
inode->i_mode = be16_to_cpu(from->di_mode);
if (!inode->i_mode)
return 0;
/*
* Convert v1 inodes immediately to v2 inode format as this is the
* minimum inode version format we support in the rest of the code.
* They will also be unconditionally written back to disk as v2 inodes.
*/
if (unlikely(from->di_version == 1)) {
set_nlink(inode, be16_to_cpu(from->di_onlink));
to->di_projid = 0;
} else {
set_nlink(inode, be32_to_cpu(from->di_nlink));
to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
be16_to_cpu(from->di_projid_lo);
}
i_uid_write(inode, be32_to_cpu(from->di_uid));
i_gid_write(inode, be32_to_cpu(from->di_gid));
/*
* Time is signed, so need to convert to signed 32 bit before
* storing in inode timestamp which may be 64 bit. Otherwise
* a time before epoch is converted to a time long after epoch
* on 64 bit systems.
*/
inode->i_atime = xfs_inode_from_disk_ts(from->di_atime);
inode->i_mtime = xfs_inode_from_disk_ts(from->di_mtime);
inode->i_ctime = xfs_inode_from_disk_ts(from->di_ctime);
to->di_size = be64_to_cpu(from->di_size);
to->di_nblocks = be64_to_cpu(from->di_nblocks);
to->di_extsize = be32_to_cpu(from->di_extsize);
to->di_forkoff = from->di_forkoff;
to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
to->di_dmstate = be16_to_cpu(from->di_dmstate);
to->di_flags = be16_to_cpu(from->di_flags);
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
inode_set_iversion_queried(inode,
be64_to_cpu(from->di_changecount));
to->di_crtime = xfs_inode_from_disk_ts(from->di_crtime);
to->di_flags2 = be64_to_cpu(from->di_flags2);
to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
}
error = xfs_iformat_data_fork(ip, from);
if (error)
return error;
if (from->di_forkoff) {
error = xfs_iformat_attr_fork(ip, from);
if (error)
goto out_destroy_data_fork;
}
if (xfs_is_reflink_inode(ip))
xfs_ifork_init_cow(ip);
return 0;
out_destroy_data_fork:
xfs_idestroy_fork(&ip->i_df);
return error;
}
/* Convert an incore timestamp to an ondisk timestamp. */
static inline xfs_timestamp_t
xfs_inode_to_disk_ts(
const struct timespec64 tv)
{
struct xfs_legacy_timestamp *lts;
xfs_timestamp_t ts;
lts = (struct xfs_legacy_timestamp *)&ts;
lts->t_sec = cpu_to_be32(tv.tv_sec);
lts->t_nsec = cpu_to_be32(tv.tv_nsec);
return ts;
}
void
xfs_inode_to_disk(
struct xfs_inode *ip,
struct xfs_dinode *to,
xfs_lsn_t lsn)
{
struct xfs_icdinode *from = &ip->i_d;
struct inode *inode = VFS_I(ip);
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
to->di_onlink = 0;
to->di_format = xfs_ifork_format(&ip->i_df);
to->di_uid = cpu_to_be32(i_uid_read(inode));
to->di_gid = cpu_to_be32(i_gid_read(inode));
to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
memset(to->di_pad, 0, sizeof(to->di_pad));
to->di_atime = xfs_inode_to_disk_ts(inode->i_atime);
to->di_mtime = xfs_inode_to_disk_ts(inode->i_mtime);
to->di_ctime = xfs_inode_to_disk_ts(inode->i_ctime);
to->di_nlink = cpu_to_be32(inode->i_nlink);
to->di_gen = cpu_to_be32(inode->i_generation);
to->di_mode = cpu_to_be16(inode->i_mode);
to->di_size = cpu_to_be64(from->di_size);
to->di_nblocks = cpu_to_be64(from->di_nblocks);
to->di_extsize = cpu_to_be32(from->di_extsize);
to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
to->di_forkoff = from->di_forkoff;
to->di_aformat = xfs_ifork_format(ip->i_afp);
to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
to->di_dmstate = cpu_to_be16(from->di_dmstate);
to->di_flags = cpu_to_be16(from->di_flags);
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
to->di_version = 3;
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to->di_crtime = xfs_inode_to_disk_ts(from->di_crtime);
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(ip->i_ino);
to->di_lsn = cpu_to_be64(lsn);
memset(to->di_pad2, 0, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to->di_flushiter = 0;
} else {
to->di_version = 2;
to->di_flushiter = cpu_to_be16(from->di_flushiter);
}
}
static xfs_failaddr_t
xfs_dinode_verify_fork(
struct xfs_dinode *dip,
struct xfs_mount *mp,
int whichfork)
{
uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
switch (XFS_DFORK_FORMAT(dip, whichfork)) {
case XFS_DINODE_FMT_LOCAL:
/*
* no local regular files yet
*/
if (whichfork == XFS_DATA_FORK) {
if (S_ISREG(be16_to_cpu(dip->di_mode)))
return __this_address;
if (be64_to_cpu(dip->di_size) >
XFS_DFORK_SIZE(dip, mp, whichfork))
return __this_address;
}
if (di_nextents)
return __this_address;
break;
case XFS_DINODE_FMT_EXTENTS:
if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
return __this_address;
break;
case XFS_DINODE_FMT_BTREE:
if (whichfork == XFS_ATTR_FORK) {
if (di_nextents > MAXAEXTNUM)
return __this_address;
} else if (di_nextents > MAXEXTNUM) {
return __this_address;
}
break;
default:
return __this_address;
}
return NULL;
}
static xfs_failaddr_t
xfs_dinode_verify_forkoff(
struct xfs_dinode *dip,
struct xfs_mount *mp)
{
if (!dip->di_forkoff)
return NULL;
switch (dip->di_format) {
case XFS_DINODE_FMT_DEV:
if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
return __this_address;
break;
case XFS_DINODE_FMT_LOCAL: /* fall through ... */
case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
case XFS_DINODE_FMT_BTREE:
if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
return __this_address;
break;
default:
return __this_address;
}
return NULL;
}
xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
xfs_ino_t ino,
struct xfs_dinode *dip)
{
xfs_failaddr_t fa;
uint16_t mode;
uint16_t flags;
uint64_t flags2;
uint64_t di_size;
if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
return __this_address;
/* Verify v3 integrity information first */
if (dip->di_version >= 3) {
if (!xfs_sb_version_has_v3inode(&mp->m_sb))
return __this_address;
if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF))
return __this_address;
if (be64_to_cpu(dip->di_ino) != ino)
return __this_address;
if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
}
/* don't allow invalid i_size */
di_size = be64_to_cpu(dip->di_size);
if (di_size & (1ULL << 63))
return __this_address;
mode = be16_to_cpu(dip->di_mode);
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
return __this_address;
/* No zero-length symlinks/dirs. */
if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
return __this_address;
/* Fork checks carried over from xfs_iformat_fork */
if (mode &&
be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
be64_to_cpu(dip->di_nblocks))
return __this_address;
if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
return __this_address;
flags = be16_to_cpu(dip->di_flags);
if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
return __this_address;
/* check for illegal values of forkoff */
fa = xfs_dinode_verify_forkoff(dip, mp);
if (fa)
return fa;
/* Do we have appropriate data fork formats for the mode? */
switch (mode & S_IFMT) {
case S_IFIFO:
case S_IFCHR:
case S_IFBLK:
case S_IFSOCK:
if (dip->di_format != XFS_DINODE_FMT_DEV)
return __this_address;
break;
case S_IFREG:
case S_IFLNK:
case S_IFDIR:
fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
if (fa)
return fa;
break;
case 0:
/* Uninitialized inode ok. */
break;
default:
return __this_address;
}
if (dip->di_forkoff) {
fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
if (fa)
return fa;
} else {
/*
* If there is no fork offset, this may be a freshly-made inode
* in a new disk cluster, in which case di_aformat is zeroed.
* Otherwise, such an inode must be in EXTENTS format; this goes
* for freed inodes as well.
*/
switch (dip->di_aformat) {
case 0:
case XFS_DINODE_FMT_EXTENTS:
break;
default:
return __this_address;
}
if (dip->di_anextents)
return __this_address;
}
/* extent size hint validation */
fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
mode, flags);
if (fa)
return fa;
/* only version 3 or greater inodes are extensively verified here */
if (dip->di_version < 3)
return NULL;
flags2 = be64_to_cpu(dip->di_flags2);
/* don't allow reflink/cowextsize if we don't have reflink */
if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
!xfs_sb_version_hasreflink(&mp->m_sb))
return __this_address;
/* only regular files get reflink */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
return __this_address;
/* don't let reflink and realtime mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
return __this_address;
/* don't let reflink and dax mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
return __this_address;
/* COW extent size hint validation */
fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
mode, flags, flags2);
if (fa)
return fa;
return NULL;
}
void
xfs_dinode_calc_crc(
struct xfs_mount *mp,
struct xfs_dinode *dip)
{
uint32_t crc;
if (dip->di_version < 3)
return;
ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF);
dip->di_crc = xfs_end_cksum(crc);
}
/*
* Validate di_extsize hint.
*
* The rules are documented at xfs_ioctl_setattr_check_extsize().
* These functions must be kept in sync with each other.
*/
xfs_failaddr_t
xfs_inode_validate_extsize(
struct xfs_mount *mp,
uint32_t extsize,
uint16_t mode,
uint16_t flags)
{
bool rt_flag;
bool hint_flag;
bool inherit_flag;
uint32_t extsize_bytes;
uint32_t blocksize_bytes;
rt_flag = (flags & XFS_DIFLAG_REALTIME);
hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
extsize_bytes = XFS_FSB_TO_B(mp, extsize);
if (rt_flag)
blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
else
blocksize_bytes = mp->m_sb.sb_blocksize;
if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
return __this_address;
if (hint_flag && !S_ISREG(mode))
return __this_address;
if (inherit_flag && !S_ISDIR(mode))
return __this_address;
if ((hint_flag || inherit_flag) && extsize == 0)
return __this_address;
/* free inodes get flags set to zero but extsize remains */
if (mode && !(hint_flag || inherit_flag) && extsize != 0)
return __this_address;
if (extsize_bytes % blocksize_bytes)
return __this_address;
if (extsize > MAXEXTLEN)
return __this_address;
if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
return __this_address;
return NULL;
}
/*
* Validate di_cowextsize hint.
*
* The rules are documented at xfs_ioctl_setattr_check_cowextsize().
* These functions must be kept in sync with each other.
*/
xfs_failaddr_t
xfs_inode_validate_cowextsize(
struct xfs_mount *mp,
uint32_t cowextsize,
uint16_t mode,
uint16_t flags,
uint64_t flags2)
{
bool rt_flag;
bool hint_flag;
uint32_t cowextsize_bytes;
rt_flag = (flags & XFS_DIFLAG_REALTIME);
hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
return __this_address;
if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
return __this_address;
if (hint_flag && cowextsize == 0)
return __this_address;
/* free inodes get flags set to zero but cowextsize remains */
if (mode && !hint_flag && cowextsize != 0)
return __this_address;
if (hint_flag && rt_flag)
return __this_address;
if (cowextsize_bytes % mp->m_sb.sb_blocksize)
return __this_address;
if (cowextsize > MAXEXTLEN)
return __this_address;
if (cowextsize > mp->m_sb.sb_agblocks / 2)
return __this_address;
return NULL;
}