xfs: dynamically allocate cursors based on maxlevels

To support future btree code, we need to be able to size btree cursors
dynamically for very large btrees.  Switch the maxlevels computation to
use the precomputed values in the superblock, and create cursors that
can handle a certain height.  For now, we retain the btree cursor cache
that can handle up to 9-level btrees, though a subsequent patch
introduces separate caches for each btree type, where each cache's
objects will be exactly tall enough to handle the specific btree type.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
This commit is contained in:
Darrick J. Wong 2021-09-16 12:27:24 -07:00
parent c0643f6fdd
commit c940a0c54a
7 changed files with 22 additions and 9 deletions

View File

@ -477,7 +477,7 @@ xfs_allocbt_init_common(
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
cur = xfs_btree_alloc_cursor(mp, tp, btnum);
cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_ag_maxlevels);
cur->bc_ag.abt.active = false;
if (btnum == XFS_BTNUM_CNT) {

View File

@ -552,7 +552,8 @@ xfs_bmbt_init_cursor(
struct xfs_btree_cur *cur;
ASSERT(whichfork != XFS_COW_FORK);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
mp->m_bm_maxlevels[whichfork]);
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);

View File

@ -94,6 +94,12 @@ uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
#define XFS_BTREE_MAXLEVELS 9 /* max of all btrees */
/*
* The btree cursor zone hands out cursors that can handle up to this many
* levels. This is the known maximum for all btree types.
*/
#define XFS_BTREE_CUR_CACHE_MAXLEVELS (9)
struct xfs_btree_ops {
/* size of the key and record structures */
size_t key_len;
@ -583,15 +589,18 @@ static inline struct xfs_btree_cur *
xfs_btree_alloc_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_btnum_t btnum)
xfs_btnum_t btnum,
uint8_t maxlevels)
{
struct xfs_btree_cur *cur;
ASSERT(maxlevels <= XFS_BTREE_CUR_CACHE_MAXLEVELS);
cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = btnum;
cur->bc_maxlevels = XFS_BTREE_MAXLEVELS;
cur->bc_maxlevels = maxlevels;
return cur;
}

View File

@ -432,7 +432,8 @@ xfs_inobt_init_common(
{
struct xfs_btree_cur *cur;
cur = xfs_btree_alloc_cursor(mp, tp, btnum);
cur = xfs_btree_alloc_cursor(mp, tp, btnum,
M_IGEO(mp)->inobt_maxlevels);
if (btnum == XFS_BTNUM_INO) {
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
cur->bc_ops = &xfs_inobt_ops;

View File

@ -322,7 +322,8 @@ xfs_refcountbt_init_common(
ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
mp->m_refc_maxlevels);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;

View File

@ -452,7 +452,8 @@ xfs_rmapbt_init_common(
struct xfs_btree_cur *cur;
/* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
mp->m_rmap_maxlevels);
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ops = &xfs_rmapbt_ops;

View File

@ -1966,8 +1966,8 @@ xfs_init_zones(void)
goto out_destroy_log_ticket_zone;
xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
xfs_btree_cur_sizeof(XFS_BTREE_MAXLEVELS),
0, 0, NULL);
xfs_btree_cur_sizeof(XFS_BTREE_CUR_CACHE_MAXLEVELS),
0, 0, NULL);
if (!xfs_btree_cur_zone)
goto out_destroy_bmap_free_item_zone;