2018-06-06 10:42:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-11-02 11:58:39 +08:00
|
|
|
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_BTREE_H__
|
|
|
|
#define __XFS_BTREE_H__
|
|
|
|
|
|
|
|
struct xfs_buf;
|
|
|
|
struct xfs_inode;
|
|
|
|
struct xfs_mount;
|
|
|
|
struct xfs_trans;
|
|
|
|
|
2007-11-23 13:28:09 +08:00
|
|
|
extern kmem_zone_t *xfs_btree_cur_zone;
|
|
|
|
|
2008-10-30 13:54:12 +08:00
|
|
|
/*
|
|
|
|
* Generic key, ptr and record wrapper structures.
|
|
|
|
*
|
|
|
|
* These are disk format structures, and are converted where necessary
|
|
|
|
* by the btree specific code that needs to interpret them.
|
|
|
|
*/
|
|
|
|
union xfs_btree_ptr {
|
|
|
|
__be32 s; /* short form ptr */
|
|
|
|
__be64 l; /* long form ptr */
|
|
|
|
};
|
|
|
|
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
/*
|
2016-09-19 08:24:36 +08:00
|
|
|
* The in-core btree key. Overlapping btrees actually store two keys
|
|
|
|
* per pointer, so we reserve enough memory to hold both. The __*bigkey
|
|
|
|
* items should never be accessed directly.
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
*/
|
2016-09-19 08:24:36 +08:00
|
|
|
union xfs_btree_key {
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
struct xfs_bmbt_key bmbt;
|
|
|
|
xfs_bmdr_key_t bmbr; /* bmbt root block */
|
|
|
|
xfs_alloc_key_t alloc;
|
|
|
|
struct xfs_inobt_key inobt;
|
2016-09-19 08:24:36 +08:00
|
|
|
struct xfs_rmap_key rmap;
|
|
|
|
struct xfs_rmap_key __rmap_bigkey[2];
|
2016-10-04 00:11:18 +08:00
|
|
|
struct xfs_refcount_key refc;
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
};
|
|
|
|
|
2008-10-30 13:54:12 +08:00
|
|
|
union xfs_btree_rec {
|
2016-08-03 09:36:07 +08:00
|
|
|
struct xfs_bmbt_rec bmbt;
|
|
|
|
xfs_bmdr_rec_t bmbr; /* bmbt root block */
|
|
|
|
struct xfs_alloc_rec alloc;
|
|
|
|
struct xfs_inobt_rec inobt;
|
|
|
|
struct xfs_rmap_rec rmap;
|
2016-10-04 00:11:18 +08:00
|
|
|
struct xfs_refcount_rec refc;
|
2008-10-30 13:54:12 +08:00
|
|
|
};
|
|
|
|
|
2013-10-23 07:51:50 +08:00
|
|
|
/*
|
|
|
|
* This nonsense is to make -wlint happy.
|
|
|
|
*/
|
|
|
|
#define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi)
|
|
|
|
#define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi)
|
|
|
|
#define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi)
|
|
|
|
|
|
|
|
#define XFS_BTNUM_BNO ((xfs_btnum_t)XFS_BTNUM_BNOi)
|
|
|
|
#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi)
|
|
|
|
#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
|
|
|
|
#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
|
2014-04-24 14:00:52 +08:00
|
|
|
#define XFS_BTNUM_FINO ((xfs_btnum_t)XFS_BTNUM_FINOi)
|
2016-08-03 09:30:32 +08:00
|
|
|
#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi)
|
2016-10-04 00:11:16 +08:00
|
|
|
#define XFS_BTNUM_REFC ((xfs_btnum_t)XFS_BTNUM_REFCi)
|
2013-10-23 07:51:50 +08:00
|
|
|
|
2017-06-17 02:00:05 +08:00
|
|
|
uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
|
2017-01-28 15:16:38 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* For logging record fields.
|
|
|
|
*/
|
2013-08-30 08:23:44 +08:00
|
|
|
#define XFS_BB_MAGIC (1 << 0)
|
|
|
|
#define XFS_BB_LEVEL (1 << 1)
|
|
|
|
#define XFS_BB_NUMRECS (1 << 2)
|
|
|
|
#define XFS_BB_LEFTSIB (1 << 3)
|
|
|
|
#define XFS_BB_RIGHTSIB (1 << 4)
|
|
|
|
#define XFS_BB_BLKNO (1 << 5)
|
|
|
|
#define XFS_BB_LSN (1 << 6)
|
|
|
|
#define XFS_BB_UUID (1 << 7)
|
|
|
|
#define XFS_BB_OWNER (1 << 8)
|
2005-04-17 06:20:36 +08:00
|
|
|
#define XFS_BB_NUM_BITS 5
|
|
|
|
#define XFS_BB_ALL_BITS ((1 << XFS_BB_NUM_BITS) - 1)
|
2013-08-30 08:23:44 +08:00
|
|
|
#define XFS_BB_NUM_BITS_CRC 9
|
2013-04-22 03:53:46 +08:00
|
|
|
#define XFS_BB_ALL_BITS_CRC ((1 << XFS_BB_NUM_BITS_CRC) - 1)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-30 13:55:03 +08:00
|
|
|
/*
|
|
|
|
* Generic stats interface
|
|
|
|
*/
|
2015-10-12 15:21:22 +08:00
|
|
|
#define XFS_BTREE_STATS_INC(cur, stat) \
|
2016-12-05 11:38:58 +08:00
|
|
|
XFS_STATS_INC_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat)
|
|
|
|
#define XFS_BTREE_STATS_ADD(cur, stat, val) \
|
|
|
|
XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
xfs: increase XFS_BTREE_MAXLEVELS to fit the rmapbt
By my calculations, a 1,073,741,824 block AG with a 1k block size
can attain a maximum height of 9. Assuming a record size of 24
bytes, a key/ptr size of 44 bytes, and half-full btree nodes, we'd
need 53,687,092 blocks for the records and ~6 million blocks for the
keys. That requires a btree of height 9 based on the following
derivation:
Block size = 1024b
sblock CRC header = 56b
== 1024-56 = 968 bytes for tree data
rmapbt record = 24b
== 40 records per leaf block
rmapbt ptr/key = 44b
== 22 ptr/keys per block
Worst case, each block is half full, so 20 records and 11 ptrs per block.
1073741824 rmap records / 20 records per block
== 53687092 leaf blocks
53687092 leaves / 11 ptrs per block
== 4880645 level 1 blocks
== 443695 level 2 blocks
== 40336 level 3 blocks
== 3667 level 4 blocks
== 334 level 5 blocks
== 31 level 6 blocks
== 3 level 7 blocks
== 1 level 8 block
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:29:42 +08:00
|
|
|
#define XFS_BTREE_MAXLEVELS 9 /* max of all btrees */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-30 13:53:59 +08:00
|
|
|
struct xfs_btree_ops {
|
2008-10-30 13:55:34 +08:00
|
|
|
/* size of the key and record structures */
|
|
|
|
size_t key_len;
|
|
|
|
size_t rec_len;
|
|
|
|
|
2008-10-30 13:53:59 +08:00
|
|
|
/* cursor operations */
|
|
|
|
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
|
2008-10-30 13:57:40 +08:00
|
|
|
void (*update_cursor)(struct xfs_btree_cur *src,
|
|
|
|
struct xfs_btree_cur *dst);
|
2008-10-30 13:55:13 +08:00
|
|
|
|
2008-10-30 13:57:16 +08:00
|
|
|
/* update btree root pointer */
|
|
|
|
void (*set_root)(struct xfs_btree_cur *cur,
|
2010-09-08 07:34:07 +08:00
|
|
|
union xfs_btree_ptr *nptr, int level_change);
|
2008-10-30 13:57:16 +08:00
|
|
|
|
2008-10-30 13:57:03 +08:00
|
|
|
/* block allocation / freeing */
|
|
|
|
int (*alloc_block)(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_ptr *start_bno,
|
|
|
|
union xfs_btree_ptr *new_bno,
|
2014-04-14 17:03:53 +08:00
|
|
|
int *stat);
|
2008-10-30 13:57:51 +08:00
|
|
|
int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
|
2008-10-30 13:57:03 +08:00
|
|
|
|
2008-10-30 13:56:32 +08:00
|
|
|
/* update last record information */
|
|
|
|
void (*update_lastrec)(struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_btree_block *block,
|
|
|
|
union xfs_btree_rec *rec,
|
|
|
|
int ptr, int reason);
|
|
|
|
|
2008-10-30 13:55:23 +08:00
|
|
|
/* records in block/level */
|
2008-10-30 13:58:01 +08:00
|
|
|
int (*get_minrecs)(struct xfs_btree_cur *cur, int level);
|
2008-10-30 13:55:23 +08:00
|
|
|
int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
|
|
|
|
|
2008-10-30 13:57:40 +08:00
|
|
|
/* records on disk. Matter for the root in inode case. */
|
|
|
|
int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
|
|
|
|
|
2008-10-30 13:56:09 +08:00
|
|
|
/* init values of btree structures */
|
|
|
|
void (*init_key_from_rec)(union xfs_btree_key *key,
|
|
|
|
union xfs_btree_rec *rec);
|
2008-10-30 13:57:40 +08:00
|
|
|
void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_rec *rec);
|
2008-10-30 13:56:09 +08:00
|
|
|
void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_ptr *ptr);
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
void (*init_high_key_from_rec)(union xfs_btree_key *key,
|
|
|
|
union xfs_btree_rec *rec);
|
2008-10-30 13:56:09 +08:00
|
|
|
|
|
|
|
/* difference between key value and cursor value */
|
2017-06-17 02:00:05 +08:00
|
|
|
int64_t (*key_diff)(struct xfs_btree_cur *cur,
|
2008-10-30 13:56:09 +08:00
|
|
|
union xfs_btree_key *key);
|
|
|
|
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
/*
|
|
|
|
* Difference between key2 and key1 -- positive if key1 > key2,
|
|
|
|
* negative if key1 < key2, and zero if equal.
|
|
|
|
*/
|
2017-06-17 02:00:05 +08:00
|
|
|
int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
union xfs_btree_key *key1,
|
|
|
|
union xfs_btree_key *key2);
|
|
|
|
|
2012-11-14 14:54:40 +08:00
|
|
|
const struct xfs_buf_ops *buf_ops;
|
2012-11-14 14:53:49 +08:00
|
|
|
|
2008-10-30 13:58:32 +08:00
|
|
|
/* check that k1 is lower than k2 */
|
|
|
|
int (*keys_inorder)(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_key *k1,
|
|
|
|
union xfs_btree_key *k2);
|
|
|
|
|
|
|
|
/* check that r1 is lower than r2 */
|
|
|
|
int (*recs_inorder)(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_rec *r1,
|
|
|
|
union xfs_btree_rec *r2);
|
2008-10-30 13:53:59 +08:00
|
|
|
};
|
|
|
|
|
2008-10-30 13:56:32 +08:00
|
|
|
/*
|
|
|
|
* Reasons for the update_lastrec method to be called.
|
|
|
|
*/
|
|
|
|
#define LASTREC_UPDATE 0
|
2008-10-30 13:57:40 +08:00
|
|
|
#define LASTREC_INSREC 1
|
2008-10-30 13:58:01 +08:00
|
|
|
#define LASTREC_DELREC 2
|
2008-10-30 13:56:32 +08:00
|
|
|
|
|
|
|
|
2016-08-03 09:10:21 +08:00
|
|
|
union xfs_btree_irec {
|
|
|
|
struct xfs_alloc_rec_incore a;
|
|
|
|
struct xfs_bmbt_irec b;
|
|
|
|
struct xfs_inobt_rec_incore i;
|
2016-08-03 09:39:05 +08:00
|
|
|
struct xfs_rmap_irec r;
|
2016-10-04 00:11:18 +08:00
|
|
|
struct xfs_refcount_irec rc;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Per-AG btree private information. */
|
|
|
|
union xfs_btree_cur_private {
|
|
|
|
struct {
|
|
|
|
unsigned long nr_ops; /* # record updates */
|
|
|
|
int shape_changes; /* # of extent splits */
|
|
|
|
} refc;
|
2016-08-03 09:10:21 +08:00
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Btree cursor structure.
|
|
|
|
* This collects all information needed by the btree code in one place.
|
|
|
|
*/
|
|
|
|
typedef struct xfs_btree_cur
|
|
|
|
{
|
|
|
|
struct xfs_trans *bc_tp; /* transaction we're in, if any */
|
|
|
|
struct xfs_mount *bc_mp; /* file system mount struct */
|
2008-10-30 13:53:59 +08:00
|
|
|
const struct xfs_btree_ops *bc_ops;
|
2008-10-30 13:54:22 +08:00
|
|
|
uint bc_flags; /* btree features - below */
|
2016-08-03 09:10:21 +08:00
|
|
|
union xfs_btree_irec bc_rec; /* current insert/search record value */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */
|
|
|
|
int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */
|
2017-06-17 02:00:05 +08:00
|
|
|
uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */
|
2005-04-17 06:20:36 +08:00
|
|
|
#define XFS_BTCUR_LEFTRA 1 /* left sibling has been read-ahead */
|
|
|
|
#define XFS_BTCUR_RIGHTRA 2 /* right sibling has been read-ahead */
|
2017-06-17 02:00:05 +08:00
|
|
|
uint8_t bc_nlevels; /* number of levels in the tree */
|
|
|
|
uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_btnum_t bc_btnum; /* identifies which btree type */
|
2016-12-05 11:38:58 +08:00
|
|
|
int bc_statoff; /* offset of btre stats array */
|
2005-04-17 06:20:36 +08:00
|
|
|
union {
|
2008-08-13 14:25:27 +08:00
|
|
|
struct { /* needed for BNO, CNT, INO */
|
|
|
|
struct xfs_buf *agbp; /* agf/agi buffer pointer */
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_agnumber_t agno; /* ag number */
|
2016-10-04 00:11:18 +08:00
|
|
|
union xfs_btree_cur_private priv;
|
2005-04-17 06:20:36 +08:00
|
|
|
} a;
|
|
|
|
struct { /* needed for BMAP */
|
|
|
|
struct xfs_inode *ip; /* pointer to our inode */
|
|
|
|
int allocated; /* count of alloced */
|
|
|
|
short forksize; /* fork's inode space */
|
|
|
|
char whichfork; /* data or attr fork */
|
|
|
|
char flags; /* flags */
|
2017-08-30 01:08:39 +08:00
|
|
|
#define XFS_BTCUR_BPRV_WASDEL (1<<0) /* was delayed */
|
|
|
|
#define XFS_BTCUR_BPRV_INVALID_OWNER (1<<1) /* for ext swap */
|
2005-04-17 06:20:36 +08:00
|
|
|
} b;
|
|
|
|
} bc_private; /* per-btree type data */
|
|
|
|
} xfs_btree_cur_t;
|
|
|
|
|
2008-10-30 13:54:22 +08:00
|
|
|
/* cursor flags */
|
2008-10-30 13:54:33 +08:00
|
|
|
#define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */
|
2008-10-30 13:54:22 +08:00
|
|
|
#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
|
2008-10-30 13:56:32 +08:00
|
|
|
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
|
2013-04-22 03:53:46 +08:00
|
|
|
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
|
xfs: support btrees with overlapping intervals for keys
On a filesystem with both reflink and reverse mapping enabled, it's
possible to have multiple rmap records referring to the same blocks on
disk. When overlapping intervals are possible, querying a classic
btree to find all records intersecting a given interval is inefficient
because we cannot use the left side of the search interval to filter
out non-matching records the same way that we can use the existing
btree key to filter out records coming after the right side of the
search interval. This will become important once we want to use the
rmap btree to rebuild BMBTs, or implement the (future) fsmap ioctl.
(For the non-overlapping case, we can perform such queries trivially
by starting at the left side of the interval and walking the tree
until we pass the right side.)
Therefore, extend the btree code to come closer to supporting
intervals as a first-class record attribute. This involves widening
the btree node's key space to store both the lowest key reachable via
the node pointer (as the btree does now) and the highest key reachable
via the same pointer and teaching the btree modifying functions to
keep the highest-key records up to date.
This behavior can be turned on via a new btree ops flag so that btrees
that cannot store overlapping intervals don't pay the overhead costs
in terms of extra code and disk format changes.
When we're deleting a record in a btree that supports overlapped
interval records and the deletion results in two btree blocks being
joined, we defer updating the high/low keys until after all possible
joining (at higher levels in the tree) have finished. At this point,
the btree pointers at all levels have been updated to remove the empty
blocks and we can update the low and high keys.
When we're doing this, we must be careful to update the keys of all
node pointers up to the root instead of stopping at the first set of
keys that don't need updating. This is because it's possible for a
single deletion to cause joining of multiple levels of tree, and so
we need to update everything going back to the root.
The diff_two_keys functions return < 0, 0, or > 0 if key1 is less than,
equal to, or greater than key2, respectively. This is consistent
with the rest of the kernel and the C library.
In btree_updkeys(), we need to evaluate the force_all parameter before
running the key diff to avoid reading uninitialized memory when we're
forcing a key update. This happens when we've allocated an empty slot
at level N + 1 to point to a new block at level N and we're in the
process of filling out the new keys.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 09:08:36 +08:00
|
|
|
#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
|
2008-10-30 13:54:22 +08:00
|
|
|
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define XFS_BTREE_NOERROR 0
|
|
|
|
#define XFS_BTREE_ERROR 1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert from buffer to btree block header.
|
|
|
|
*/
|
2011-07-23 07:40:15 +08:00
|
|
|
#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr))
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-10-18 12:37:33 +08:00
|
|
|
/*
|
|
|
|
* Internal long and short btree block checks. They return NULL if the
|
|
|
|
* block is ok or the address of the failed check otherwise.
|
|
|
|
*/
|
|
|
|
xfs_failaddr_t __xfs_btree_check_lblock(struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_btree_block *block, int level, struct xfs_buf *bp);
|
|
|
|
xfs_failaddr_t __xfs_btree_check_sblock(struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_btree_block *block, int level, struct xfs_buf *bp);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2008-10-30 13:54:53 +08:00
|
|
|
* Check that block header is ok.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-10-30 13:54:53 +08:00
|
|
|
int
|
|
|
|
xfs_btree_check_block(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
struct xfs_btree_block *block, /* generic btree block pointer */
|
2005-04-17 06:20:36 +08:00
|
|
|
int level, /* level of the btree block */
|
|
|
|
struct xfs_buf *bp); /* buffer containing block, if any */
|
|
|
|
|
|
|
|
/*
|
2008-10-30 13:54:53 +08:00
|
|
|
* Check that (long) pointer is ok.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-10-18 12:37:33 +08:00
|
|
|
bool /* error (0 or EFSCORRUPTED) */
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_btree_check_lptr(
|
2008-10-30 13:54:53 +08:00
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
2017-10-18 12:37:33 +08:00
|
|
|
xfs_fsblock_t fsbno, /* btree block disk address */
|
|
|
|
int level); /* btree block level */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that (short) pointer is ok.
|
|
|
|
*/
|
|
|
|
bool /* error (0 or EFSCORRUPTED) */
|
|
|
|
xfs_btree_check_sptr(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
xfs_agblock_t agbno, /* btree block disk address */
|
2005-04-17 06:20:36 +08:00
|
|
|
int level); /* btree block level */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete the btree cursor.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_btree_del_cursor(
|
|
|
|
xfs_btree_cur_t *cur, /* btree cursor */
|
|
|
|
int error); /* del because of error */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Duplicate the btree cursor.
|
|
|
|
* Allocate a new one, copy the record, re-get the buffers.
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_btree_dup_cursor(
|
|
|
|
xfs_btree_cur_t *cur, /* input cursor */
|
|
|
|
xfs_btree_cur_t **ncur);/* output cursor */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a buffer for the block, return it with no data read.
|
|
|
|
* Long-form addressing.
|
|
|
|
*/
|
|
|
|
struct xfs_buf * /* buffer for fsbno */
|
|
|
|
xfs_btree_get_bufl(
|
|
|
|
struct xfs_mount *mp, /* file system mount point */
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
xfs_fsblock_t fsbno, /* file system block number */
|
|
|
|
uint lock); /* lock flags for get_buf */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a buffer for the block, return it with no data read.
|
|
|
|
* Short-form addressing.
|
|
|
|
*/
|
|
|
|
struct xfs_buf * /* buffer for agno/agbno */
|
|
|
|
xfs_btree_get_bufs(
|
|
|
|
struct xfs_mount *mp, /* file system mount point */
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
xfs_agnumber_t agno, /* allocation group number */
|
|
|
|
xfs_agblock_t agbno, /* allocation group block number */
|
|
|
|
uint lock); /* lock flags for get_buf */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for the cursor referring to the last block at the given level.
|
|
|
|
*/
|
|
|
|
int /* 1=is last block, 0=not last block */
|
|
|
|
xfs_btree_islastblock(
|
|
|
|
xfs_btree_cur_t *cur, /* btree cursor */
|
|
|
|
int level); /* level to check */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute first and last byte offsets for the fields given.
|
|
|
|
* Interprets the offsets table, which contains struct field offsets.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_btree_offsets(
|
2017-06-17 02:00:05 +08:00
|
|
|
int64_t fields, /* bitmask of fields */
|
2005-04-17 06:20:36 +08:00
|
|
|
const short *offsets,/* table of field offsets */
|
|
|
|
int nbits, /* number of bits to inspect */
|
|
|
|
int *first, /* output: first byte offset */
|
|
|
|
int *last); /* output: last byte offset */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a buffer for the block, return it read in.
|
|
|
|
* Long-form addressing.
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_btree_read_bufl(
|
|
|
|
struct xfs_mount *mp, /* file system mount point */
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
xfs_fsblock_t fsbno, /* file system block number */
|
|
|
|
uint lock, /* lock flags for read_buf */
|
|
|
|
struct xfs_buf **bpp, /* buffer for fsbno */
|
2012-11-12 19:54:08 +08:00
|
|
|
int refval, /* ref count value for buffer */
|
2012-11-14 14:54:40 +08:00
|
|
|
const struct xfs_buf_ops *ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read-ahead the block, don't wait for it, don't return a buffer.
|
|
|
|
* Long-form addressing.
|
|
|
|
*/
|
|
|
|
void /* error */
|
|
|
|
xfs_btree_reada_bufl(
|
|
|
|
struct xfs_mount *mp, /* file system mount point */
|
|
|
|
xfs_fsblock_t fsbno, /* file system block number */
|
2012-11-12 19:54:08 +08:00
|
|
|
xfs_extlen_t count, /* count of filesystem blocks */
|
2012-11-14 14:54:40 +08:00
|
|
|
const struct xfs_buf_ops *ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read-ahead the block, don't wait for it, don't return a buffer.
|
|
|
|
* Short-form addressing.
|
|
|
|
*/
|
|
|
|
void /* error */
|
|
|
|
xfs_btree_reada_bufs(
|
|
|
|
struct xfs_mount *mp, /* file system mount point */
|
|
|
|
xfs_agnumber_t agno, /* allocation group number */
|
|
|
|
xfs_agblock_t agbno, /* allocation group block number */
|
2012-11-12 19:54:08 +08:00
|
|
|
xfs_extlen_t count, /* count of filesystem blocks */
|
2012-11-14 14:54:40 +08:00
|
|
|
const struct xfs_buf_ops *ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-11-14 06:40:27 +08:00
|
|
|
/*
|
|
|
|
* Initialise a new btree block header
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_btree_init_block(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
2017-01-28 15:16:39 +08:00
|
|
|
xfs_btnum_t btnum,
|
2012-11-14 06:40:27 +08:00
|
|
|
__u16 level,
|
|
|
|
__u16 numrecs,
|
2013-04-22 03:53:46 +08:00
|
|
|
__u64 owner,
|
2012-11-14 06:40:27 +08:00
|
|
|
unsigned int flags);
|
2008-10-30 13:55:34 +08:00
|
|
|
|
2013-04-22 03:53:46 +08:00
|
|
|
void
|
|
|
|
xfs_btree_init_block_int(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_btree_block *buf,
|
|
|
|
xfs_daddr_t blkno,
|
2017-01-28 15:16:39 +08:00
|
|
|
xfs_btnum_t btnum,
|
2013-04-22 03:53:46 +08:00
|
|
|
__u16 level,
|
|
|
|
__u16 numrecs,
|
|
|
|
__u64 owner,
|
|
|
|
unsigned int flags);
|
|
|
|
|
2008-10-30 13:55:45 +08:00
|
|
|
/*
|
|
|
|
* Common btree core entry points.
|
|
|
|
*/
|
|
|
|
int xfs_btree_increment(struct xfs_btree_cur *, int, int *);
|
2008-10-30 13:55:58 +08:00
|
|
|
int xfs_btree_decrement(struct xfs_btree_cur *, int, int *);
|
2008-10-30 13:56:09 +08:00
|
|
|
int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *);
|
2008-10-30 13:56:32 +08:00
|
|
|
int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *);
|
2008-10-30 13:57:28 +08:00
|
|
|
int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
|
2008-10-30 13:57:40 +08:00
|
|
|
int xfs_btree_insert(struct xfs_btree_cur *, int *);
|
2008-10-30 13:58:01 +08:00
|
|
|
int xfs_btree_delete(struct xfs_btree_cur *, int *);
|
2008-10-30 13:58:11 +08:00
|
|
|
int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *);
|
2017-06-17 02:00:05 +08:00
|
|
|
int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner,
|
xfs: recovery of swap extents operations for CRC filesystems
This is the recovery side of the btree block owner change operation
performed by swapext on CRC enabled filesystems. We detect that an
owner change is needed by the flag that has been placed on the inode
log format flag field. Because the inode recovery is being replayed
after the buffers that make up the BMBT in the given checkpoint, we
can walk all the buffers and directly modify them when we see the
flag set on an inode.
Because the inode can be relogged and hence present in multiple
chekpoints with the "change owner" flag set, we could do multiple
passes across the inode to do this change. While this isn't optimal,
we can't directly ignore the flag as there may be multiple
independent swap extent operations being replayed on the same inode
in different checkpoints so we can't ignore them.
Further, because the owner change operation uses ordered buffers, we
might have buffers that are newer on disk than the current
checkpoint and so already have the owner changed in them. Hence we
cannot just peek at a buffer in the tree and check that it has the
correct owner and assume that the change was completed.
So, for the moment just brute force the owner change every time we
see an inode with the flag set. Note that we have to be careful here
because the owner of the buffers may point to either the old owner
or the new owner. Currently the verifier can't verify the owner
directly, so there is no failure case here right now. If we verify
the owner exactly in future, then we'll have to take this into
account.
This was tested in terms of normal operation via xfstests - all of
the fsr tests now pass without failure. however, we really need to
modify xfs/227 to stress v3 inodes correctly to ensure we fully
cover this case for v5 filesystems.
In terms of recovery testing, I used a hacked version of xfs_fsr
that held the temp inode open for a few seconds before exiting so
that the filesystem could be shut down with an open owner change
recovery flags set on at least the temp inode. fsr leaves the temp
inode unlinked and in btree format, so this was necessary for the
owner change to be reliably replayed.
logprint confirmed the tmp inode in the log had the correct flag set:
INO: cnt:3 total:3 a:0x69e9e0 len:56 a:0x69ea20 len:176 a:0x69eae0 len:88
INODE: #regs:3 ino:0x44 flags:0x209 dsize:88
^^^^^
0x200 is set, indicating a data fork owner change needed to be
replayed on inode 0x44. A printk in the revoery code confirmed that
the inode change was recovered:
XFS (vdc): Mounting Filesystem
XFS (vdc): Starting recovery (logdev: internal)
recovering owner change ino 0x44
XFS (vdc): Version 5 superblock detected. This kernel L support enabled!
Use of these features in this kernel is at your own risk!
XFS (vdc): Ending recovery (logdev: internal)
The script used to test this was:
$ cat ./recovery-fsr.sh
#!/bin/bash
dev=/dev/vdc
mntpt=/mnt/scratch
testfile=$mntpt/testfile
umount $mntpt
mkfs.xfs -f -m crc=1 $dev
mount $dev $mntpt
chmod 777 $mntpt
for i in `seq 10000 -1 0`; do
xfs_io -f -d -c "pwrite $(($i * 4096)) 4096" $testfile > /dev/null 2>&1
done
xfs_bmap -vp $testfile |head -20
xfs_fsr -d -v $testfile &
sleep 10
/home/dave/src/xfstests-dev/src/godown -f $mntpt
wait
umount $mntpt
xfs_logprint -t $dev |tail -20
time mount $dev $mntpt
xfs_bmap -vp $testfile
umount $mntpt
$
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-30 08:23:45 +08:00
|
|
|
struct list_head *buffer_list);
|
2008-10-30 13:55:45 +08:00
|
|
|
|
2013-04-22 03:53:46 +08:00
|
|
|
/*
|
|
|
|
* btree block CRC helpers
|
|
|
|
*/
|
|
|
|
void xfs_btree_lblock_calc_crc(struct xfs_buf *);
|
|
|
|
bool xfs_btree_lblock_verify_crc(struct xfs_buf *);
|
|
|
|
void xfs_btree_sblock_calc_crc(struct xfs_buf *);
|
|
|
|
bool xfs_btree_sblock_verify_crc(struct xfs_buf *);
|
|
|
|
|
2008-10-30 13:58:21 +08:00
|
|
|
/*
|
|
|
|
* Internal btree helpers also used by xfs_bmap.c.
|
|
|
|
*/
|
|
|
|
void xfs_btree_log_block(struct xfs_btree_cur *, struct xfs_buf *, int);
|
|
|
|
void xfs_btree_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int);
|
|
|
|
|
2008-10-30 13:55:34 +08:00
|
|
|
/*
|
|
|
|
* Helpers.
|
|
|
|
*/
|
2008-10-30 13:55:45 +08:00
|
|
|
static inline int xfs_btree_get_numrecs(struct xfs_btree_block *block)
|
|
|
|
{
|
|
|
|
return be16_to_cpu(block->bb_numrecs);
|
|
|
|
}
|
|
|
|
|
2008-10-30 13:56:43 +08:00
|
|
|
static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block,
|
2017-06-17 02:00:05 +08:00
|
|
|
uint16_t numrecs)
|
2008-10-30 13:56:43 +08:00
|
|
|
{
|
|
|
|
block->bb_numrecs = cpu_to_be16(numrecs);
|
|
|
|
}
|
|
|
|
|
2008-10-30 13:55:34 +08:00
|
|
|
static inline int xfs_btree_get_level(struct xfs_btree_block *block)
|
|
|
|
{
|
|
|
|
return be16_to_cpu(block->bb_level);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Min and max functions for extlen, agblock, fileoff, and filblks types.
|
|
|
|
*/
|
2007-06-28 14:43:39 +08:00
|
|
|
#define XFS_EXTLEN_MIN(a,b) min_t(xfs_extlen_t, (a), (b))
|
|
|
|
#define XFS_EXTLEN_MAX(a,b) max_t(xfs_extlen_t, (a), (b))
|
|
|
|
#define XFS_AGBLOCK_MIN(a,b) min_t(xfs_agblock_t, (a), (b))
|
|
|
|
#define XFS_AGBLOCK_MAX(a,b) max_t(xfs_agblock_t, (a), (b))
|
|
|
|
#define XFS_FILEOFF_MIN(a,b) min_t(xfs_fileoff_t, (a), (b))
|
|
|
|
#define XFS_FILEOFF_MAX(a,b) max_t(xfs_fileoff_t, (a), (b))
|
|
|
|
#define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b))
|
|
|
|
#define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b))
|
2005-11-02 11:38:42 +08:00
|
|
|
|
2018-01-09 02:51:03 +08:00
|
|
|
xfs_failaddr_t xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
|
|
|
|
xfs_failaddr_t xfs_btree_sblock_verify(struct xfs_buf *bp,
|
|
|
|
unsigned int max_recs);
|
|
|
|
xfs_failaddr_t xfs_btree_lblock_v5hdr_verify(struct xfs_buf *bp,
|
|
|
|
uint64_t owner);
|
|
|
|
xfs_failaddr_t xfs_btree_lblock_verify(struct xfs_buf *bp,
|
|
|
|
unsigned int max_recs);
|
2018-01-09 02:51:00 +08:00
|
|
|
|
2018-04-07 01:09:42 +08:00
|
|
|
uint xfs_btree_compute_maxlevels(uint *limits, unsigned long len);
|
2018-05-10 01:02:01 +08:00
|
|
|
unsigned long long xfs_btree_calc_size(uint *limits, unsigned long long len);
|
2016-01-04 13:13:21 +08:00
|
|
|
|
2016-08-03 09:10:21 +08:00
|
|
|
/* return codes */
|
|
|
|
#define XFS_BTREE_QUERY_RANGE_CONTINUE 0 /* keep iterating */
|
|
|
|
#define XFS_BTREE_QUERY_RANGE_ABORT 1 /* stop iterating */
|
|
|
|
typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_rec *rec, void *priv);
|
|
|
|
|
|
|
|
int xfs_btree_query_range(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_irec *low_rec, union xfs_btree_irec *high_rec,
|
|
|
|
xfs_btree_query_range_fn fn, void *priv);
|
2017-03-29 05:56:35 +08:00
|
|
|
int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
|
|
|
|
void *priv);
|
2016-08-03 09:10:21 +08:00
|
|
|
|
2016-08-03 09:10:55 +08:00
|
|
|
typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
|
|
|
|
void *data);
|
|
|
|
int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
|
|
|
|
xfs_btree_visit_blocks_fn fn, void *data);
|
|
|
|
|
2016-09-19 08:25:20 +08:00
|
|
|
int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
|
|
|
|
|
2017-06-17 02:00:07 +08:00
|
|
|
union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n,
|
|
|
|
struct xfs_btree_block *block);
|
|
|
|
union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n,
|
|
|
|
struct xfs_btree_block *block);
|
|
|
|
union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n,
|
|
|
|
struct xfs_btree_block *block);
|
|
|
|
union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n,
|
|
|
|
struct xfs_btree_block *block);
|
|
|
|
int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level,
|
|
|
|
union xfs_btree_ptr *pp, struct xfs_btree_block **blkp);
|
|
|
|
struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
|
|
|
|
int level, struct xfs_buf **bpp);
|
2017-10-18 12:37:37 +08:00
|
|
|
bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr);
|
|
|
|
int64_t xfs_btree_diff_two_ptrs(struct xfs_btree_cur *cur,
|
|
|
|
const union xfs_btree_ptr *a,
|
|
|
|
const union xfs_btree_ptr *b);
|
|
|
|
void xfs_btree_get_sibling(struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_btree_block *block,
|
|
|
|
union xfs_btree_ptr *ptr, int lr);
|
2017-10-26 06:03:46 +08:00
|
|
|
void xfs_btree_get_keys(struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_btree_block *block, union xfs_btree_key *key);
|
|
|
|
union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_key *key);
|
2018-01-17 10:52:12 +08:00
|
|
|
int xfs_btree_has_record(struct xfs_btree_cur *cur, union xfs_btree_irec *low,
|
|
|
|
union xfs_btree_irec *high, bool *exists);
|
2018-05-10 01:02:03 +08:00
|
|
|
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
|
2017-06-17 02:00:07 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __XFS_BTREE_H__ */
|