xfs: add support for free space btree staging cursors

Add support for btree staging cursors for the free space btrees.  This
is needed both for online repair and also to convert xfs_repair to use
btree bulk loading.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
This commit is contained in:
Darrick J. Wong 2020-03-11 10:52:49 -07:00
parent 60e3d70707
commit e6eb33d905
2 changed files with 90 additions and 22 deletions

View File

@ -12,6 +12,7 @@
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_mount.h" #include "xfs_mount.h"
#include "xfs_btree.h" #include "xfs_btree.h"
#include "xfs_btree_staging.h"
#include "xfs_alloc_btree.h" #include "xfs_alloc_btree.h"
#include "xfs_alloc.h" #include "xfs_alloc.h"
#include "xfs_extent_busy.h" #include "xfs_extent_busy.h"
@ -471,6 +472,43 @@ static const struct xfs_btree_ops xfs_cntbt_ops = {
.recs_inorder = xfs_cntbt_recs_inorder, .recs_inorder = xfs_cntbt_recs_inorder,
}; };
/* Allocate most of a new allocation btree cursor. */
STATIC struct xfs_btree_cur *
xfs_allocbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
if (btnum == XFS_BTNUM_CNT) {
cur->bc_ops = &xfs_cntbt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
} else {
cur->bc_ops = &xfs_bnobt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
cur->bc_ag.agno = agno;
cur->bc_ag.abt.active = false;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
return cur;
}
/* /*
* Allocate a new allocation btree cursor. * Allocate a new allocation btree cursor.
*/ */
@ -485,36 +523,59 @@ xfs_allocbt_init_cursor(
struct xfs_agf *agf = agbp->b_addr; struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); cur = xfs_allocbt_init_common(mp, tp, agno, btnum);
if (btnum == XFS_BTNUM_CNT)
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
if (btnum == XFS_BTNUM_CNT) {
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
cur->bc_ops = &xfs_cntbt_ops;
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; else
} else {
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
cur->bc_ops = &xfs_bnobt_ops;
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
}
cur->bc_ag.agbp = agbp; cur->bc_ag.agbp = agbp;
cur->bc_ag.agno = agno;
cur->bc_ag.abt.active = false;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
return cur; return cur;
} }
/* Create a free space btree cursor with a fake root for staging. */
struct xfs_btree_cur *
xfs_allocbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
xfs_agnumber_t agno,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
cur = xfs_allocbt_init_common(mp, NULL, agno, btnum);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
/*
* Install a new free space btree root. Caller is responsible for invalidating
* and freeing the old btree blocks.
*/
void
xfs_allocbt_commit_staged_btree(
struct xfs_btree_cur *cur,
struct xfs_trans *tp,
struct xfs_buf *agbp)
{
struct xfs_agf *agf = agbp->b_addr;
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
if (cur->bc_btnum == XFS_BTNUM_BNO) {
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
} else {
cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
}
}
/* /*
* Calculate number of records in an alloc btree block. * Calculate number of records in an alloc btree block.
*/ */

View File

@ -13,6 +13,7 @@
struct xfs_buf; struct xfs_buf;
struct xfs_btree_cur; struct xfs_btree_cur;
struct xfs_mount; struct xfs_mount;
struct xbtree_afakeroot;
/* /*
* Btree block header size depends on a superblock flag. * Btree block header size depends on a superblock flag.
@ -48,8 +49,14 @@ struct xfs_mount;
extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *, struct xfs_trans *, struct xfs_buf *,
xfs_agnumber_t, xfs_btnum_t); xfs_agnumber_t, xfs_btnum_t);
struct xfs_btree_cur *xfs_allocbt_stage_cursor(struct xfs_mount *mp,
struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
xfs_btnum_t btnum);
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int); extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp, extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp,
unsigned long long len); unsigned long long len);
void xfs_allocbt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, struct xfs_buf *agbp);
#endif /* __XFS_ALLOC_BTREE_H__ */ #endif /* __XFS_ALLOC_BTREE_H__ */