2018-06-06 10:42:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2013-08-12 18:49:35 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-23 07:36:05 +08:00
|
|
|
#include "xfs_shared.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2013-08-12 18:49:35 +08:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
2017-11-01 03:04:49 +08:00
|
|
|
#include "xfs_errortag.h"
|
2013-08-12 18:49:35 +08:00
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_icache.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_trans.h"
|
2013-10-23 07:51:50 +08:00
|
|
|
#include "xfs_ialloc.h"
|
2017-01-18 03:41:44 +08:00
|
|
|
#include "xfs_dir2.h"
|
2013-08-12 18:49:35 +08:00
|
|
|
|
2017-12-11 19:35:19 +08:00
|
|
|
#include <linux/iversion.h>
|
|
|
|
|
2013-08-27 09:39:37 +08:00
|
|
|
/*
|
|
|
|
* If we are doing readahead on an inode buffer, we might be in log recovery
|
|
|
|
* reading an inode allocation buffer that hasn't yet been replayed, and hence
|
|
|
|
* has not had the inode cores stamped into it. Hence for readahead, the buffer
|
|
|
|
* may be potentially invalid.
|
|
|
|
*
|
2016-01-12 04:03:44 +08:00
|
|
|
* If the readahead buffer is invalid, we need to mark it with an error and
|
|
|
|
* clear the DONE status of the buffer so that a followup read will re-read it
|
|
|
|
* from disk. We don't report the error otherwise to avoid warnings during log
|
2020-06-30 05:44:35 +08:00
|
|
|
* recovery and we don't get unnecessary panics on debug kernels. We use EIO here
|
2016-01-12 04:03:44 +08:00
|
|
|
* because all we want to do is say readahead failed; there is no-one to report
|
|
|
|
* the error to, so this will distinguish it from a non-ra verifier failure.
|
2020-06-30 05:44:35 +08:00
|
|
|
* Changes to this readahead error behaviour also need to be reflected in
|
2016-01-12 04:04:01 +08:00
|
|
|
* xfs_dquot_buf_readahead_verify().
|
2013-08-27 09:39:37 +08:00
|
|
|
*/
|
2013-08-12 18:49:35 +08:00
|
|
|
static void
|
|
|
|
xfs_inode_buf_verify(
|
2013-08-27 09:39:37 +08:00
|
|
|
struct xfs_buf *bp,
|
|
|
|
bool readahead)
|
2013-08-12 18:49:35 +08:00
|
|
|
{
|
2019-06-29 10:27:29 +08:00
|
|
|
struct xfs_mount *mp = bp->b_mount;
|
2018-03-24 01:06:56 +08:00
|
|
|
xfs_agnumber_t agno;
|
2013-08-12 18:49:35 +08:00
|
|
|
int i;
|
|
|
|
int ni;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate the magic number and version of every inode in the buffer
|
|
|
|
*/
|
2018-03-24 01:06:56 +08:00
|
|
|
agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
|
2013-08-12 18:49:35 +08:00
|
|
|
ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
|
|
|
|
for (i = 0; i < ni; i++) {
|
|
|
|
int di_ok;
|
|
|
|
xfs_dinode_t *dip;
|
2018-03-24 01:06:56 +08:00
|
|
|
xfs_agino_t unlinked_ino;
|
2013-08-12 18:49:35 +08:00
|
|
|
|
2015-06-22 07:44:29 +08:00
|
|
|
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
|
2018-03-24 01:06:56 +08:00
|
|
|
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
|
2019-02-17 03:47:28 +08:00
|
|
|
di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
|
2020-03-18 23:15:09 +08:00
|
|
|
xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
|
2019-02-08 02:37:13 +08:00
|
|
|
xfs_verify_agino_or_null(mp, agno, unlinked_ino);
|
2013-08-12 18:49:35 +08:00
|
|
|
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
|
2017-06-21 08:54:47 +08:00
|
|
|
XFS_ERRTAG_ITOBP_INOTOBP))) {
|
2013-08-27 09:39:37 +08:00
|
|
|
if (readahead) {
|
|
|
|
bp->b_flags &= ~XBF_DONE;
|
2016-01-12 04:03:44 +08:00
|
|
|
xfs_buf_ioerror(bp, -EIO);
|
2013-08-27 09:39:37 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:35 +08:00
|
|
|
#ifdef DEBUG
|
2013-09-03 19:47:38 +08:00
|
|
|
xfs_alert(mp,
|
2013-08-12 18:49:35 +08:00
|
|
|
"bad inode magic/vsn daddr %lld #%d (magic=%x)",
|
|
|
|
(unsigned long long)bp->b_bn, i,
|
|
|
|
be16_to_cpu(dip->di_magic));
|
|
|
|
#endif
|
2018-03-24 01:06:53 +08:00
|
|
|
xfs_buf_verifier_error(bp, -EFSCORRUPTED,
|
|
|
|
__func__, dip, sizeof(*dip),
|
|
|
|
NULL);
|
2018-03-24 01:06:56 +08:00
|
|
|
return;
|
2013-08-12 18:49:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
xfs_inode_buf_read_verify(
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2013-08-27 09:39:37 +08:00
|
|
|
xfs_inode_buf_verify(bp, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xfs_inode_buf_readahead_verify(
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
|
|
|
xfs_inode_buf_verify(bp, true);
|
2013-08-12 18:49:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xfs_inode_buf_write_verify(
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2013-08-27 09:39:37 +08:00
|
|
|
xfs_inode_buf_verify(bp, false);
|
2013-08-12 18:49:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct xfs_buf_ops xfs_inode_buf_ops = {
|
2016-01-04 13:10:19 +08:00
|
|
|
.name = "xfs_inode",
|
2019-02-17 03:47:28 +08:00
|
|
|
.magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
|
|
|
|
cpu_to_be16(XFS_DINODE_MAGIC) },
|
2013-08-12 18:49:35 +08:00
|
|
|
.verify_read = xfs_inode_buf_read_verify,
|
|
|
|
.verify_write = xfs_inode_buf_write_verify,
|
|
|
|
};
|
|
|
|
|
2013-08-27 09:39:37 +08:00
|
|
|
const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
|
2019-02-08 02:45:45 +08:00
|
|
|
.name = "xfs_inode_ra",
|
2019-02-17 03:47:28 +08:00
|
|
|
.magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
|
|
|
|
cpu_to_be16(XFS_DINODE_MAGIC) },
|
2013-08-27 09:39:37 +08:00
|
|
|
.verify_read = xfs_inode_buf_readahead_verify,
|
|
|
|
.verify_write = xfs_inode_buf_write_verify,
|
|
|
|
};
|
|
|
|
|
2013-08-12 18:49:35 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This routine is called to map an inode to the buffer containing the on-disk
|
|
|
|
* version of the inode. It returns a pointer to the buffer containing the
|
|
|
|
* on-disk inode in the bpp parameter, and in the dipp parameter it returns a
|
|
|
|
* pointer to the on-disk inode within that buffer.
|
|
|
|
*
|
|
|
|
* If a non-zero error is returned, then the contents of bpp and dipp are
|
|
|
|
* undefined.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_imap_to_bp(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_imap *imap,
|
|
|
|
struct xfs_dinode **dipp,
|
|
|
|
struct xfs_buf **bpp,
|
2020-05-07 04:29:20 +08:00
|
|
|
uint buf_flags)
|
2013-08-12 18:49:35 +08:00
|
|
|
{
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
buf_flags |= XBF_UNMAPPED;
|
|
|
|
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
|
|
|
|
(int)imap->im_len, buf_flags, &bp,
|
|
|
|
&xfs_inode_buf_ops);
|
|
|
|
if (error) {
|
2020-05-07 04:25:20 +08:00
|
|
|
ASSERT(error != -EAGAIN || (buf_flags & XBF_TRYLOCK));
|
2013-08-12 18:49:35 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
*bpp = bp;
|
xfs: pin inode backing buffer to the inode log item
When we dirty an inode, we are going to have to write it disk at
some point in the near future. This requires the inode cluster
backing buffer to be present in memory. Unfortunately, under severe
memory pressure we can reclaim the inode backing buffer while the
inode is dirty in memory, resulting in stalling the AIL pushing
because it has to do a read-modify-write cycle on the cluster
buffer.
When we have no memory available, the read of the cluster buffer
blocks the AIL pushing process, and this causes all sorts of issues
for memory reclaim as it requires inode writeback to make forwards
progress. Allocating a cluster buffer causes more memory pressure,
and results in more cluster buffers to be reclaimed, resulting in
more RMW cycles to be done in the AIL context and everything then
backs up on AIL progress. Only the synchronous inode cluster
writeback in the the inode reclaim code provides some level of
forwards progress guarantees that prevent OOM-killer rampages in
this situation.
Fix this by pinning the inode backing buffer to the inode log item
when the inode is first dirtied (i.e. in xfs_trans_log_inode()).
This may mean the first modification of an inode that has been held
in cache for a long time may block on a cluster buffer read, but
we can do that in transaction context and block safely until the
buffer has been allocated and read.
Once we have the cluster buffer, the inode log item takes a
reference to it, pinning it in memory, and attaches it to the log
item for future reference. This means we can always grab the cluster
buffer from the inode log item when we need it.
When the inode is finally cleaned and removed from the AIL, we can
drop the reference the inode log item holds on the cluster buffer.
Once all inodes on the cluster buffer are clean, the cluster buffer
will be unpinned and it will be available for memory reclaim to
reclaim again.
This avoids the issues with needing to do RMW cycles in the AIL
pushing context, and hence allows complete non-blocking inode
flushing to be performed by the AIL pushing context.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-30 05:49:15 +08:00
|
|
|
if (dipp)
|
|
|
|
*dipp = xfs_buf_offset(bp, imap->im_boffset);
|
2013-08-12 18:49:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-25 06:15:46 +08:00
|
|
|
/* Convert an ondisk timestamp to an incore timestamp. */
|
|
|
|
struct timespec64
|
|
|
|
xfs_inode_from_disk_ts(
|
|
|
|
const xfs_timestamp_t ts)
|
|
|
|
{
|
|
|
|
struct timespec64 tv;
|
|
|
|
struct xfs_legacy_timestamp *lts;
|
|
|
|
|
|
|
|
lts = (struct xfs_legacy_timestamp *)&ts;
|
|
|
|
tv.tv_sec = (int)be32_to_cpu(lts->t_sec);
|
|
|
|
tv.tv_nsec = (int)be32_to_cpu(lts->t_nsec);
|
|
|
|
|
|
|
|
return tv;
|
|
|
|
}
|
|
|
|
|
2020-05-15 05:00:02 +08:00
|
|
|
int
|
2016-02-09 13:54:58 +08:00
|
|
|
xfs_inode_from_disk(
|
|
|
|
struct xfs_inode *ip,
|
2016-02-09 13:54:58 +08:00
|
|
|
struct xfs_dinode *from)
|
2013-08-12 18:49:35 +08:00
|
|
|
{
|
2016-02-09 13:54:58 +08:00
|
|
|
struct xfs_icdinode *to = &ip->i_d;
|
|
|
|
struct inode *inode = VFS_I(ip);
|
2020-05-15 05:01:17 +08:00
|
|
|
int error;
|
2020-05-15 05:01:18 +08:00
|
|
|
xfs_failaddr_t fa;
|
2020-05-15 05:01:17 +08:00
|
|
|
|
|
|
|
ASSERT(ip->i_cowfp == NULL);
|
|
|
|
ASSERT(ip->i_afp == NULL);
|
2016-02-09 13:54:58 +08:00
|
|
|
|
2020-05-15 05:01:18 +08:00
|
|
|
fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
|
|
|
|
if (fa) {
|
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
|
|
|
|
sizeof(*from), fa);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
2020-05-15 05:01:17 +08:00
|
|
|
/*
|
|
|
|
* First get the permanent information that is needed to allocate an
|
|
|
|
* inode. If the inode is unused, mode is zero and we shouldn't mess
|
2020-06-30 05:44:35 +08:00
|
|
|
* with the uninitialized part of it.
|
2020-05-15 05:01:17 +08:00
|
|
|
*/
|
|
|
|
to->di_flushiter = be16_to_cpu(from->di_flushiter);
|
|
|
|
inode->i_generation = be32_to_cpu(from->di_gen);
|
|
|
|
inode->i_mode = be16_to_cpu(from->di_mode);
|
|
|
|
if (!inode->i_mode)
|
|
|
|
return 0;
|
|
|
|
|
2016-02-09 13:54:58 +08:00
|
|
|
/*
|
|
|
|
* Convert v1 inodes immediately to v2 inode format as this is the
|
|
|
|
* minimum inode version format we support in the rest of the code.
|
2020-03-18 23:15:11 +08:00
|
|
|
* They will also be unconditionally written back to disk as v2 inodes.
|
2016-02-09 13:54:58 +08:00
|
|
|
*/
|
2020-03-18 23:15:11 +08:00
|
|
|
if (unlikely(from->di_version == 1)) {
|
2016-02-09 13:54:58 +08:00
|
|
|
set_nlink(inode, be16_to_cpu(from->di_onlink));
|
2019-11-13 00:22:54 +08:00
|
|
|
to->di_projid = 0;
|
2016-02-09 13:54:58 +08:00
|
|
|
} else {
|
2016-02-09 13:54:58 +08:00
|
|
|
set_nlink(inode, be32_to_cpu(from->di_nlink));
|
2019-11-13 00:22:54 +08:00
|
|
|
to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
|
|
|
|
be16_to_cpu(from->di_projid_lo);
|
2016-02-09 13:54:58 +08:00
|
|
|
}
|
|
|
|
|
2020-02-22 00:31:27 +08:00
|
|
|
i_uid_write(inode, be32_to_cpu(from->di_uid));
|
|
|
|
i_gid_write(inode, be32_to_cpu(from->di_gid));
|
2016-02-09 13:54:58 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Time is signed, so need to convert to signed 32 bit before
|
|
|
|
* storing in inode timestamp which may be 64 bit. Otherwise
|
|
|
|
* a time before epoch is converted to a time long after epoch
|
|
|
|
* on 64 bit systems.
|
|
|
|
*/
|
2020-08-25 06:15:46 +08:00
|
|
|
inode->i_atime = xfs_inode_from_disk_ts(from->di_atime);
|
|
|
|
inode->i_mtime = xfs_inode_from_disk_ts(from->di_mtime);
|
|
|
|
inode->i_ctime = xfs_inode_from_disk_ts(from->di_ctime);
|
2016-02-09 13:54:58 +08:00
|
|
|
|
2013-08-12 18:49:35 +08:00
|
|
|
to->di_size = be64_to_cpu(from->di_size);
|
|
|
|
to->di_nblocks = be64_to_cpu(from->di_nblocks);
|
|
|
|
to->di_extsize = be32_to_cpu(from->di_extsize);
|
|
|
|
to->di_forkoff = from->di_forkoff;
|
|
|
|
to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
|
|
|
|
to->di_dmstate = be16_to_cpu(from->di_dmstate);
|
|
|
|
to->di_flags = be16_to_cpu(from->di_flags);
|
|
|
|
|
2020-03-18 23:15:11 +08:00
|
|
|
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
2017-12-11 19:35:19 +08:00
|
|
|
inode_set_iversion_queried(inode,
|
|
|
|
be64_to_cpu(from->di_changecount));
|
2020-08-25 06:15:46 +08:00
|
|
|
to->di_crtime = xfs_inode_from_disk_ts(from->di_crtime);
|
2013-08-12 18:49:35 +08:00
|
|
|
to->di_flags2 = be64_to_cpu(from->di_flags2);
|
2016-10-04 00:11:43 +08:00
|
|
|
to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
|
2013-08-12 18:49:35 +08:00
|
|
|
}
|
2020-05-15 05:00:02 +08:00
|
|
|
|
2020-05-15 05:01:17 +08:00
|
|
|
error = xfs_iformat_data_fork(ip, from);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2020-05-19 01:27:21 +08:00
|
|
|
if (from->di_forkoff) {
|
2020-05-15 05:01:17 +08:00
|
|
|
error = xfs_iformat_attr_fork(ip, from);
|
|
|
|
if (error)
|
|
|
|
goto out_destroy_data_fork;
|
|
|
|
}
|
|
|
|
if (xfs_is_reflink_inode(ip))
|
|
|
|
xfs_ifork_init_cow(ip);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_destroy_data_fork:
|
2020-05-19 01:29:27 +08:00
|
|
|
xfs_idestroy_fork(&ip->i_df);
|
2020-05-15 05:01:17 +08:00
|
|
|
return error;
|
2013-08-12 18:49:35 +08:00
|
|
|
}
|
|
|
|
|
2020-08-25 06:15:46 +08:00
|
|
|
/* Convert an incore timestamp to an ondisk timestamp. */
|
|
|
|
static inline xfs_timestamp_t
|
|
|
|
xfs_inode_to_disk_ts(
|
|
|
|
const struct timespec64 tv)
|
|
|
|
{
|
|
|
|
struct xfs_legacy_timestamp *lts;
|
|
|
|
xfs_timestamp_t ts;
|
|
|
|
|
|
|
|
lts = (struct xfs_legacy_timestamp *)&ts;
|
|
|
|
lts->t_sec = cpu_to_be32(tv.tv_sec);
|
|
|
|
lts->t_nsec = cpu_to_be32(tv.tv_nsec);
|
|
|
|
|
|
|
|
return ts;
|
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:35 +08:00
|
|
|
void
|
2016-02-09 13:54:58 +08:00
|
|
|
xfs_inode_to_disk(
|
|
|
|
struct xfs_inode *ip,
|
2016-02-09 13:54:58 +08:00
|
|
|
struct xfs_dinode *to,
|
|
|
|
xfs_lsn_t lsn)
|
2016-02-09 13:54:58 +08:00
|
|
|
{
|
|
|
|
struct xfs_icdinode *from = &ip->i_d;
|
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_onlink = 0;
|
2016-02-09 13:54:58 +08:00
|
|
|
|
2020-05-19 01:28:05 +08:00
|
|
|
to->di_format = xfs_ifork_format(&ip->i_df);
|
2020-02-22 00:31:27 +08:00
|
|
|
to->di_uid = cpu_to_be32(i_uid_read(inode));
|
|
|
|
to->di_gid = cpu_to_be32(i_gid_read(inode));
|
2019-11-13 00:22:54 +08:00
|
|
|
to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
|
|
|
|
to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
|
2016-02-09 13:54:58 +08:00
|
|
|
|
2016-02-09 13:54:58 +08:00
|
|
|
memset(to->di_pad, 0, sizeof(to->di_pad));
|
2020-08-25 06:15:46 +08:00
|
|
|
to->di_atime = xfs_inode_to_disk_ts(inode->i_atime);
|
|
|
|
to->di_mtime = xfs_inode_to_disk_ts(inode->i_mtime);
|
|
|
|
to->di_ctime = xfs_inode_to_disk_ts(inode->i_ctime);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_nlink = cpu_to_be32(inode->i_nlink);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_gen = cpu_to_be32(inode->i_generation);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_mode = cpu_to_be16(inode->i_mode);
|
2016-02-09 13:54:58 +08:00
|
|
|
|
|
|
|
to->di_size = cpu_to_be64(from->di_size);
|
|
|
|
to->di_nblocks = cpu_to_be64(from->di_nblocks);
|
|
|
|
to->di_extsize = cpu_to_be32(from->di_extsize);
|
2020-05-19 01:27:22 +08:00
|
|
|
to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
|
|
|
|
to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_forkoff = from->di_forkoff;
|
2020-05-19 01:28:05 +08:00
|
|
|
to->di_aformat = xfs_ifork_format(ip->i_afp);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
|
|
|
|
to->di_dmstate = cpu_to_be16(from->di_dmstate);
|
|
|
|
to->di_flags = cpu_to_be16(from->di_flags);
|
|
|
|
|
2020-03-18 23:15:11 +08:00
|
|
|
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
|
|
|
to->di_version = 3;
|
2017-12-11 19:35:19 +08:00
|
|
|
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
|
2020-08-25 06:15:46 +08:00
|
|
|
to->di_crtime = xfs_inode_to_disk_ts(from->di_crtime);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_flags2 = cpu_to_be64(from->di_flags2);
|
2016-10-04 00:11:43 +08:00
|
|
|
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_ino = cpu_to_be64(ip->i_ino);
|
|
|
|
to->di_lsn = cpu_to_be64(lsn);
|
|
|
|
memset(to->di_pad2, 0, sizeof(to->di_pad2));
|
|
|
|
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_flushiter = 0;
|
|
|
|
} else {
|
2020-03-18 23:15:11 +08:00
|
|
|
to->di_version = 2;
|
2016-02-09 13:54:58 +08:00
|
|
|
to->di_flushiter = cpu_to_be16(from->di_flushiter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-22 14:25:57 +08:00
|
|
|
static xfs_failaddr_t
|
|
|
|
xfs_dinode_verify_fork(
|
|
|
|
struct xfs_dinode *dip,
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
int whichfork)
|
|
|
|
{
|
|
|
|
uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
|
|
|
|
|
|
|
|
switch (XFS_DFORK_FORMAT(dip, whichfork)) {
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
|
|
|
/*
|
|
|
|
* no local regular files yet
|
|
|
|
*/
|
|
|
|
if (whichfork == XFS_DATA_FORK) {
|
|
|
|
if (S_ISREG(be16_to_cpu(dip->di_mode)))
|
|
|
|
return __this_address;
|
|
|
|
if (be64_to_cpu(dip->di_size) >
|
|
|
|
XFS_DFORK_SIZE(dip, mp, whichfork))
|
|
|
|
return __this_address;
|
|
|
|
}
|
|
|
|
if (di_nextents)
|
|
|
|
return __this_address;
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
|
|
|
|
return __this_address;
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
|
|
if (whichfork == XFS_ATTR_FORK) {
|
|
|
|
if (di_nextents > MAXAEXTNUM)
|
|
|
|
return __this_address;
|
|
|
|
} else if (di_nextents > MAXEXTNUM) {
|
|
|
|
return __this_address;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return __this_address;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-09-29 11:50:13 +08:00
|
|
|
static xfs_failaddr_t
|
|
|
|
xfs_dinode_verify_forkoff(
|
|
|
|
struct xfs_dinode *dip,
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
2020-05-19 01:27:21 +08:00
|
|
|
if (!dip->di_forkoff)
|
2018-09-29 11:50:13 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
switch (dip->di_format) {
|
|
|
|
case XFS_DINODE_FMT_DEV:
|
|
|
|
if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
|
|
|
|
return __this_address;
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_LOCAL: /* fall through ... */
|
|
|
|
case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2020-03-18 23:15:10 +08:00
|
|
|
if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
|
2018-09-29 11:50:13 +08:00
|
|
|
return __this_address;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return __this_address;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-09 02:51:03 +08:00
|
|
|
xfs_failaddr_t
|
2013-08-12 18:49:35 +08:00
|
|
|
xfs_dinode_verify(
|
|
|
|
struct xfs_mount *mp,
|
2016-11-08 08:56:06 +08:00
|
|
|
xfs_ino_t ino,
|
2013-08-12 18:49:35 +08:00
|
|
|
struct xfs_dinode *dip)
|
|
|
|
{
|
2018-06-06 01:06:44 +08:00
|
|
|
xfs_failaddr_t fa;
|
2017-01-18 03:41:41 +08:00
|
|
|
uint16_t mode;
|
2016-10-04 00:11:50 +08:00
|
|
|
uint16_t flags;
|
|
|
|
uint64_t flags2;
|
2018-01-09 02:51:04 +08:00
|
|
|
uint64_t di_size;
|
2016-10-04 00:11:50 +08:00
|
|
|
|
2013-08-12 18:49:35 +08:00
|
|
|
if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
|
2018-01-09 02:51:03 +08:00
|
|
|
return __this_address;
|
2013-08-12 18:49:35 +08:00
|
|
|
|
2018-01-09 02:51:04 +08:00
|
|
|
/* Verify v3 integrity information first */
|
|
|
|
if (dip->di_version >= 3) {
|
2020-03-18 23:15:09 +08:00
|
|
|
if (!xfs_sb_version_has_v3inode(&mp->m_sb))
|
2018-01-09 02:51:04 +08:00
|
|
|
return __this_address;
|
|
|
|
if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
|
|
|
|
XFS_DINODE_CRC_OFF))
|
|
|
|
return __this_address;
|
|
|
|
if (be64_to_cpu(dip->di_ino) != ino)
|
|
|
|
return __this_address;
|
|
|
|
if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
|
|
|
|
return __this_address;
|
|
|
|
}
|
2013-08-12 18:49:35 +08:00
|
|
|
|
2016-12-05 09:38:38 +08:00
|
|
|
/* don't allow invalid i_size */
|
2018-01-09 02:51:04 +08:00
|
|
|
di_size = be64_to_cpu(dip->di_size);
|
|
|
|
if (di_size & (1ULL << 63))
|
2018-01-09 02:51:03 +08:00
|
|
|
return __this_address;
|
2016-12-05 09:38:38 +08:00
|
|
|
|
2017-01-18 03:41:41 +08:00
|
|
|
mode = be16_to_cpu(dip->di_mode);
|
2017-01-18 03:41:44 +08:00
|
|
|
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
|
2018-01-09 02:51:03 +08:00
|
|
|
return __this_address;
|
2017-01-18 03:41:41 +08:00
|
|
|
|
|
|
|
/* No zero-length symlinks/dirs. */
|
2018-01-09 02:51:04 +08:00
|
|
|
if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
|
2018-01-09 02:51:03 +08:00
|
|
|
return __this_address;
|
2016-12-05 09:38:38 +08:00
|
|
|
|
2018-01-09 02:51:04 +08:00
|
|
|
/* Fork checks carried over from xfs_iformat_fork */
|
|
|
|
if (mode &&
|
|
|
|
be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
|
|
|
|
be64_to_cpu(dip->di_nblocks))
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
flags = be16_to_cpu(dip->di_flags);
|
|
|
|
|
|
|
|
if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
|
|
|
|
return __this_address;
|
|
|
|
|
2018-09-29 11:50:13 +08:00
|
|
|
/* check for illegal values of forkoff */
|
|
|
|
fa = xfs_dinode_verify_forkoff(dip, mp);
|
|
|
|
if (fa)
|
|
|
|
return fa;
|
|
|
|
|
2018-01-09 02:51:04 +08:00
|
|
|
/* Do we have appropriate data fork formats for the mode? */
|
|
|
|
switch (mode & S_IFMT) {
|
|
|
|
case S_IFIFO:
|
|
|
|
case S_IFCHR:
|
|
|
|
case S_IFBLK:
|
|
|
|
case S_IFSOCK:
|
|
|
|
if (dip->di_format != XFS_DINODE_FMT_DEV)
|
|
|
|
return __this_address;
|
|
|
|
break;
|
|
|
|
case S_IFREG:
|
|
|
|
case S_IFLNK:
|
|
|
|
case S_IFDIR:
|
2018-06-22 14:25:57 +08:00
|
|
|
fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
|
|
|
|
if (fa)
|
|
|
|
return fa;
|
2018-01-09 02:51:04 +08:00
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
/* Uninitialized inode ok. */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return __this_address;
|
|
|
|
}
|
|
|
|
|
2020-05-19 01:27:21 +08:00
|
|
|
if (dip->di_forkoff) {
|
2018-06-22 14:25:57 +08:00
|
|
|
fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
|
|
|
|
if (fa)
|
|
|
|
return fa;
|
2018-04-17 14:06:53 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If there is no fork offset, this may be a freshly-made inode
|
|
|
|
* in a new disk cluster, in which case di_aformat is zeroed.
|
|
|
|
* Otherwise, such an inode must be in EXTENTS format; this goes
|
|
|
|
* for freed inodes as well.
|
|
|
|
*/
|
|
|
|
switch (dip->di_aformat) {
|
|
|
|
case 0:
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return __this_address;
|
|
|
|
}
|
|
|
|
if (dip->di_anextents)
|
|
|
|
return __this_address;
|
2018-01-09 02:51:04 +08:00
|
|
|
}
|
2016-12-05 09:38:38 +08:00
|
|
|
|
2018-06-06 01:06:44 +08:00
|
|
|
/* extent size hint validation */
|
|
|
|
fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
|
|
|
|
mode, flags);
|
|
|
|
if (fa)
|
|
|
|
return fa;
|
|
|
|
|
2013-08-12 18:49:35 +08:00
|
|
|
/* only version 3 or greater inodes are extensively verified here */
|
|
|
|
if (dip->di_version < 3)
|
2018-01-09 02:51:03 +08:00
|
|
|
return NULL;
|
2016-10-04 00:11:50 +08:00
|
|
|
|
|
|
|
flags2 = be64_to_cpu(dip->di_flags2);
|
|
|
|
|
|
|
|
/* don't allow reflink/cowextsize if we don't have reflink */
|
|
|
|
if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
|
2018-06-06 01:06:44 +08:00
|
|
|
!xfs_sb_version_hasreflink(&mp->m_sb))
|
2018-01-09 02:51:03 +08:00
|
|
|
return __this_address;
|
2016-10-04 00:11:50 +08:00
|
|
|
|
2018-01-09 02:51:04 +08:00
|
|
|
/* only regular files get reflink */
|
|
|
|
if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
|
|
|
|
return __this_address;
|
2016-10-04 00:11:50 +08:00
|
|
|
|
|
|
|
/* don't let reflink and realtime mix */
|
|
|
|
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
|
2018-01-09 02:51:03 +08:00
|
|
|
return __this_address;
|
2016-10-04 00:11:50 +08:00
|
|
|
|
2016-10-04 00:11:50 +08:00
|
|
|
/* don't let reflink and dax mix */
|
|
|
|
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
|
2018-01-09 02:51:03 +08:00
|
|
|
return __this_address;
|
2016-10-04 00:11:50 +08:00
|
|
|
|
2018-06-06 01:09:33 +08:00
|
|
|
/* COW extent size hint validation */
|
|
|
|
fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
|
|
|
|
mode, flags, flags2);
|
|
|
|
if (fa)
|
|
|
|
return fa;
|
|
|
|
|
2018-01-09 02:51:03 +08:00
|
|
|
return NULL;
|
2013-08-12 18:49:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_dinode_calc_crc(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_dinode *dip)
|
|
|
|
{
|
2017-06-17 02:00:05 +08:00
|
|
|
uint32_t crc;
|
2013-08-12 18:49:35 +08:00
|
|
|
|
|
|
|
if (dip->di_version < 3)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
|
2016-12-05 11:40:32 +08:00
|
|
|
crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
|
2014-02-27 12:15:27 +08:00
|
|
|
XFS_DINODE_CRC_OFF);
|
2013-08-12 18:49:35 +08:00
|
|
|
dip->di_crc = xfs_end_cksum(crc);
|
|
|
|
}
|
|
|
|
|
2018-03-24 01:06:55 +08:00
|
|
|
/*
|
|
|
|
* Validate di_extsize hint.
|
|
|
|
*
|
|
|
|
* The rules are documented at xfs_ioctl_setattr_check_extsize().
|
|
|
|
* These functions must be kept in sync with each other.
|
|
|
|
*/
|
|
|
|
xfs_failaddr_t
|
|
|
|
xfs_inode_validate_extsize(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
uint32_t extsize,
|
|
|
|
uint16_t mode,
|
|
|
|
uint16_t flags)
|
|
|
|
{
|
|
|
|
bool rt_flag;
|
|
|
|
bool hint_flag;
|
|
|
|
bool inherit_flag;
|
|
|
|
uint32_t extsize_bytes;
|
|
|
|
uint32_t blocksize_bytes;
|
|
|
|
|
|
|
|
rt_flag = (flags & XFS_DIFLAG_REALTIME);
|
|
|
|
hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
|
|
|
|
inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
|
|
|
|
extsize_bytes = XFS_FSB_TO_B(mp, extsize);
|
|
|
|
|
|
|
|
if (rt_flag)
|
|
|
|
blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
|
|
|
|
else
|
|
|
|
blocksize_bytes = mp->m_sb.sb_blocksize;
|
|
|
|
|
|
|
|
if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (hint_flag && !S_ISREG(mode))
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (inherit_flag && !S_ISDIR(mode))
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if ((hint_flag || inherit_flag) && extsize == 0)
|
|
|
|
return __this_address;
|
|
|
|
|
2018-07-25 02:34:52 +08:00
|
|
|
/* free inodes get flags set to zero but extsize remains */
|
|
|
|
if (mode && !(hint_flag || inherit_flag) && extsize != 0)
|
2018-03-24 01:06:55 +08:00
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (extsize_bytes % blocksize_bytes)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (extsize > MAXEXTLEN)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate di_cowextsize hint.
|
|
|
|
*
|
|
|
|
* The rules are documented at xfs_ioctl_setattr_check_cowextsize().
|
|
|
|
* These functions must be kept in sync with each other.
|
|
|
|
*/
|
|
|
|
xfs_failaddr_t
|
|
|
|
xfs_inode_validate_cowextsize(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
uint32_t cowextsize,
|
|
|
|
uint16_t mode,
|
|
|
|
uint16_t flags,
|
|
|
|
uint64_t flags2)
|
|
|
|
{
|
|
|
|
bool rt_flag;
|
|
|
|
bool hint_flag;
|
|
|
|
uint32_t cowextsize_bytes;
|
|
|
|
|
|
|
|
rt_flag = (flags & XFS_DIFLAG_REALTIME);
|
|
|
|
hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
|
|
|
|
cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
|
|
|
|
|
|
|
|
if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (hint_flag && cowextsize == 0)
|
|
|
|
return __this_address;
|
|
|
|
|
2018-07-25 02:34:52 +08:00
|
|
|
/* free inodes get flags set to zero but cowextsize remains */
|
|
|
|
if (mode && !hint_flag && cowextsize != 0)
|
2018-03-24 01:06:55 +08:00
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (hint_flag && rt_flag)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (cowextsize_bytes % mp->m_sb.sb_blocksize)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (cowextsize > MAXEXTLEN)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (cowextsize > mp->m_sb.sb_agblocks / 2)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|