2018-06-06 10:42:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2013-10-15 06:17:52 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
|
|
|
* Copyright (c) 2013 Red Hat, Inc.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-29 19:11:58 +08:00
|
|
|
#include "xfs_shared.h"
|
2013-10-15 06:17:52 +08:00
|
|
|
#include "xfs_format.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2013-10-15 06:17:52 +08:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
|
|
|
#include "xfs_quota.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_trans.h"
|
2013-10-15 06:17:52 +08:00
|
|
|
#include "xfs_qm.h"
|
|
|
|
#include "xfs_error.h"
|
|
|
|
|
|
|
|
int
|
|
|
|
xfs_calc_dquots_per_chunk(
|
|
|
|
unsigned int nbblks) /* basic block units */
|
|
|
|
{
|
|
|
|
ASSERT(nbblks > 0);
|
2017-04-07 07:01:47 +08:00
|
|
|
return BBTOB(nbblks) / sizeof(xfs_dqblk_t);
|
2013-10-15 06:17:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do some primitive error checking on ondisk dquot data structures.
|
2018-05-08 00:20:18 +08:00
|
|
|
*
|
|
|
|
* The xfs_dqblk structure /contains/ the xfs_disk_dquot structure;
|
|
|
|
* we verify them separately because at some points we have only the
|
|
|
|
* smaller xfs_disk_dquot structure available.
|
2013-10-15 06:17:52 +08:00
|
|
|
*/
|
2018-05-08 00:20:18 +08:00
|
|
|
|
2018-01-09 02:51:25 +08:00
|
|
|
xfs_failaddr_t
|
|
|
|
xfs_dquot_verify(
|
2019-11-13 09:04:02 +08:00
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_disk_dquot *ddq,
|
|
|
|
xfs_dqid_t id,
|
|
|
|
uint type) /* used only during quotacheck */
|
2013-10-15 06:17:52 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We can encounter an uninitialized dquot buffer for 2 reasons:
|
|
|
|
* 1. If we crash while deleting the quotainode(s), and those blks got
|
|
|
|
* used for user data. This is because we take the path of regular
|
|
|
|
* file deletion; however, the size field of quotainodes is never
|
|
|
|
* updated, so all the tricks that we play in itruncate_finish
|
|
|
|
* don't quite matter.
|
|
|
|
*
|
|
|
|
* 2. We don't play the quota buffers when there's a quotaoff logitem.
|
|
|
|
* But the allocation will be replayed so we'll end up with an
|
|
|
|
* uninitialized quota block.
|
|
|
|
*
|
|
|
|
* This is all fine; things are still consistent, and we haven't lost
|
|
|
|
* any quota information. Just don't complain about bad dquot blks.
|
|
|
|
*/
|
2018-01-09 02:51:25 +08:00
|
|
|
if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC))
|
|
|
|
return __this_address;
|
|
|
|
if (ddq->d_version != XFS_DQUOT_VERSION)
|
|
|
|
return __this_address;
|
2013-10-15 06:17:52 +08:00
|
|
|
|
2018-05-08 00:20:17 +08:00
|
|
|
if (type && ddq->d_flags != type)
|
|
|
|
return __this_address;
|
2013-10-15 06:17:52 +08:00
|
|
|
if (ddq->d_flags != XFS_DQ_USER &&
|
|
|
|
ddq->d_flags != XFS_DQ_PROJ &&
|
2018-01-09 02:51:25 +08:00
|
|
|
ddq->d_flags != XFS_DQ_GROUP)
|
|
|
|
return __this_address;
|
2013-10-15 06:17:52 +08:00
|
|
|
|
2018-01-09 02:51:25 +08:00
|
|
|
if (id != -1 && id != be32_to_cpu(ddq->d_id))
|
|
|
|
return __this_address;
|
2013-10-15 06:17:52 +08:00
|
|
|
|
2018-01-09 02:51:25 +08:00
|
|
|
if (!ddq->d_id)
|
|
|
|
return NULL;
|
2013-10-15 06:17:52 +08:00
|
|
|
|
2018-01-09 02:51:25 +08:00
|
|
|
if (ddq->d_blk_softlimit &&
|
|
|
|
be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) &&
|
|
|
|
!ddq->d_btimer)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (ddq->d_ino_softlimit &&
|
|
|
|
be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) &&
|
|
|
|
!ddq->d_itimer)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
if (ddq->d_rtb_softlimit &&
|
|
|
|
be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) &&
|
|
|
|
!ddq->d_rtbtimer)
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
return NULL;
|
2018-01-09 02:51:24 +08:00
|
|
|
}
|
|
|
|
|
2018-05-08 00:20:18 +08:00
|
|
|
xfs_failaddr_t
|
|
|
|
xfs_dqblk_verify(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_dqblk *dqb,
|
|
|
|
xfs_dqid_t id,
|
|
|
|
uint type) /* used only during quotacheck */
|
|
|
|
{
|
|
|
|
if (xfs_sb_version_hascrc(&mp->m_sb) &&
|
|
|
|
!uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid))
|
|
|
|
return __this_address;
|
|
|
|
|
|
|
|
return xfs_dquot_verify(mp, &dqb->dd_diskdq, id, type);
|
|
|
|
}
|
|
|
|
|
2018-01-09 02:51:24 +08:00
|
|
|
/*
|
|
|
|
* Do some primitive error checking on ondisk dquot data structures.
|
|
|
|
*/
|
2019-05-02 11:26:30 +08:00
|
|
|
void
|
2018-05-08 00:20:17 +08:00
|
|
|
xfs_dqblk_repair(
|
2018-01-09 02:51:24 +08:00
|
|
|
struct xfs_mount *mp,
|
2018-05-08 00:20:17 +08:00
|
|
|
struct xfs_dqblk *dqb,
|
2018-01-09 02:51:24 +08:00
|
|
|
xfs_dqid_t id,
|
|
|
|
uint type)
|
|
|
|
{
|
2013-10-15 06:17:52 +08:00
|
|
|
/*
|
|
|
|
* Typically, a repair is only requested by quotacheck.
|
|
|
|
*/
|
|
|
|
ASSERT(id != -1);
|
2018-05-08 00:20:17 +08:00
|
|
|
memset(dqb, 0, sizeof(xfs_dqblk_t));
|
2013-10-15 06:17:52 +08:00
|
|
|
|
2018-05-08 00:20:17 +08:00
|
|
|
dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
|
|
|
|
dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION;
|
|
|
|
dqb->dd_diskdq.d_flags = type;
|
|
|
|
dqb->dd_diskdq.d_id = cpu_to_be32(id);
|
2013-10-15 06:17:52 +08:00
|
|
|
|
|
|
|
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
2018-05-08 00:20:17 +08:00
|
|
|
uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid);
|
|
|
|
xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
|
2013-10-15 06:17:52 +08:00
|
|
|
XFS_DQUOT_CRC_OFF);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC bool
|
|
|
|
xfs_dquot_buf_verify_crc(
|
|
|
|
struct xfs_mount *mp,
|
2018-05-08 00:20:47 +08:00
|
|
|
struct xfs_buf *bp,
|
|
|
|
bool readahead)
|
2013-10-15 06:17:52 +08:00
|
|
|
{
|
|
|
|
struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
|
|
|
|
int ndquots;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!xfs_sb_version_hascrc(&mp->m_sb))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we are in log recovery, the quota subsystem has not been
|
|
|
|
* initialised so we have no quotainfo structure. In that case, we need
|
|
|
|
* to manually calculate the number of dquots in the buffer.
|
|
|
|
*/
|
|
|
|
if (mp->m_quotainfo)
|
|
|
|
ndquots = mp->m_quotainfo->qi_dqperchunk;
|
|
|
|
else
|
2016-10-20 12:46:18 +08:00
|
|
|
ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
|
2013-10-15 06:17:52 +08:00
|
|
|
|
|
|
|
for (i = 0; i < ndquots; i++, d++) {
|
|
|
|
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
|
2018-05-08 00:20:47 +08:00
|
|
|
XFS_DQUOT_CRC_OFF)) {
|
|
|
|
if (!readahead)
|
|
|
|
xfs_buf_verifier_error(bp, -EFSBADCRC, __func__,
|
|
|
|
d, sizeof(*d), __this_address);
|
2013-10-15 06:17:52 +08:00
|
|
|
return false;
|
2018-05-08 00:20:47 +08:00
|
|
|
}
|
2013-10-15 06:17:52 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-09 02:51:25 +08:00
|
|
|
STATIC xfs_failaddr_t
|
2013-10-15 06:17:52 +08:00
|
|
|
xfs_dquot_buf_verify(
|
|
|
|
struct xfs_mount *mp,
|
2018-05-08 00:20:47 +08:00
|
|
|
struct xfs_buf *bp,
|
|
|
|
bool readahead)
|
2013-10-15 06:17:52 +08:00
|
|
|
{
|
2018-05-08 00:20:18 +08:00
|
|
|
struct xfs_dqblk *dqb = bp->b_addr;
|
2018-01-09 02:51:25 +08:00
|
|
|
xfs_failaddr_t fa;
|
2013-10-15 06:17:52 +08:00
|
|
|
xfs_dqid_t id = 0;
|
|
|
|
int ndquots;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we are in log recovery, the quota subsystem has not been
|
|
|
|
* initialised so we have no quotainfo structure. In that case, we need
|
|
|
|
* to manually calculate the number of dquots in the buffer.
|
|
|
|
*/
|
|
|
|
if (mp->m_quotainfo)
|
|
|
|
ndquots = mp->m_quotainfo->qi_dqperchunk;
|
|
|
|
else
|
2014-04-14 17:03:34 +08:00
|
|
|
ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
|
2013-10-15 06:17:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On the first read of the buffer, verify that each dquot is valid.
|
|
|
|
* We don't know what the id of the dquot is supposed to be, just that
|
|
|
|
* they should be increasing monotonically within the buffer. If the
|
|
|
|
* first id is corrupt, then it will fail on the second dquot in the
|
|
|
|
* buffer so corruptions could point to the wrong dquot in this case.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ndquots; i++) {
|
|
|
|
struct xfs_disk_dquot *ddq;
|
|
|
|
|
2018-05-08 00:20:18 +08:00
|
|
|
ddq = &dqb[i].dd_diskdq;
|
2013-10-15 06:17:52 +08:00
|
|
|
|
|
|
|
if (i == 0)
|
|
|
|
id = be32_to_cpu(ddq->d_id);
|
|
|
|
|
2018-05-08 00:20:18 +08:00
|
|
|
fa = xfs_dqblk_verify(mp, &dqb[i], id + i, 0);
|
2018-05-08 00:20:47 +08:00
|
|
|
if (fa) {
|
|
|
|
if (!readahead)
|
|
|
|
xfs_buf_verifier_error(bp, -EFSCORRUPTED,
|
|
|
|
__func__, &dqb[i],
|
|
|
|
sizeof(struct xfs_dqblk), fa);
|
2018-01-09 02:51:25 +08:00
|
|
|
return fa;
|
2018-05-08 00:20:47 +08:00
|
|
|
}
|
2013-10-15 06:17:52 +08:00
|
|
|
}
|
2018-01-09 02:51:25 +08:00
|
|
|
|
|
|
|
return NULL;
|
2013-10-15 06:17:52 +08:00
|
|
|
}
|
|
|
|
|
2018-01-09 02:51:08 +08:00
|
|
|
static xfs_failaddr_t
|
|
|
|
xfs_dquot_buf_verify_struct(
|
2018-01-09 02:51:25 +08:00
|
|
|
struct xfs_buf *bp)
|
2018-01-09 02:51:08 +08:00
|
|
|
{
|
2019-06-29 10:27:29 +08:00
|
|
|
struct xfs_mount *mp = bp->b_mount;
|
2018-01-09 02:51:08 +08:00
|
|
|
|
2018-05-08 00:20:47 +08:00
|
|
|
return xfs_dquot_buf_verify(mp, bp, false);
|
2018-01-09 02:51:08 +08:00
|
|
|
}
|
|
|
|
|
2013-10-15 06:17:52 +08:00
|
|
|
static void
|
|
|
|
xfs_dquot_buf_read_verify(
|
2018-01-09 02:51:25 +08:00
|
|
|
struct xfs_buf *bp)
|
2013-10-15 06:17:52 +08:00
|
|
|
{
|
2019-06-29 10:27:29 +08:00
|
|
|
struct xfs_mount *mp = bp->b_mount;
|
2013-10-15 06:17:52 +08:00
|
|
|
|
2018-05-08 00:20:47 +08:00
|
|
|
if (!xfs_dquot_buf_verify_crc(mp, bp, false))
|
|
|
|
return;
|
|
|
|
xfs_dquot_buf_verify(mp, bp, false);
|
2013-10-15 06:17:52 +08:00
|
|
|
}
|
|
|
|
|
2016-01-12 04:04:01 +08:00
|
|
|
/*
|
|
|
|
* readahead errors are silent and simply leave the buffer as !done so a real
|
|
|
|
* read will then be run with the xfs_dquot_buf_ops verifier. See
|
|
|
|
* xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
|
|
|
|
* reporting the failure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xfs_dquot_buf_readahead_verify(
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2019-06-29 10:27:29 +08:00
|
|
|
struct xfs_mount *mp = bp->b_mount;
|
2016-01-12 04:04:01 +08:00
|
|
|
|
2018-05-08 00:20:47 +08:00
|
|
|
if (!xfs_dquot_buf_verify_crc(mp, bp, true) ||
|
|
|
|
xfs_dquot_buf_verify(mp, bp, true) != NULL) {
|
2016-01-12 04:04:01 +08:00
|
|
|
xfs_buf_ioerror(bp, -EIO);
|
|
|
|
bp->b_flags &= ~XBF_DONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-15 06:17:52 +08:00
|
|
|
/*
|
|
|
|
* we don't calculate the CRC here as that is done when the dquot is flushed to
|
|
|
|
* the buffer after the update is done. This ensures that the dquot in the
|
|
|
|
* buffer always has an up-to-date CRC value.
|
|
|
|
*/
|
2013-10-29 19:11:58 +08:00
|
|
|
static void
|
2013-10-15 06:17:52 +08:00
|
|
|
xfs_dquot_buf_write_verify(
|
2018-01-09 02:51:25 +08:00
|
|
|
struct xfs_buf *bp)
|
2013-10-15 06:17:52 +08:00
|
|
|
{
|
2019-06-29 10:27:29 +08:00
|
|
|
struct xfs_mount *mp = bp->b_mount;
|
2013-10-15 06:17:52 +08:00
|
|
|
|
2018-05-08 00:20:47 +08:00
|
|
|
xfs_dquot_buf_verify(mp, bp, false);
|
2013-10-15 06:17:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct xfs_buf_ops xfs_dquot_buf_ops = {
|
2016-01-04 13:10:19 +08:00
|
|
|
.name = "xfs_dquot",
|
2019-02-17 03:47:28 +08:00
|
|
|
.magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
|
|
|
|
cpu_to_be16(XFS_DQUOT_MAGIC) },
|
2013-10-15 06:17:52 +08:00
|
|
|
.verify_read = xfs_dquot_buf_read_verify,
|
|
|
|
.verify_write = xfs_dquot_buf_write_verify,
|
2018-01-09 02:51:08 +08:00
|
|
|
.verify_struct = xfs_dquot_buf_verify_struct,
|
2013-10-15 06:17:52 +08:00
|
|
|
};
|
|
|
|
|
2016-01-12 04:04:01 +08:00
|
|
|
const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
|
|
|
|
.name = "xfs_dquot_ra",
|
2019-02-17 03:47:28 +08:00
|
|
|
.magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
|
|
|
|
cpu_to_be16(XFS_DQUOT_MAGIC) },
|
2016-01-12 04:04:01 +08:00
|
|
|
.verify_read = xfs_dquot_buf_readahead_verify,
|
|
|
|
.verify_write = xfs_dquot_buf_write_verify,
|
|
|
|
};
|