btrfs: reloc: refactor direct tree backref processing into its own function

For BTRFS_SHARED_BLOCK_REF_KEY, its processing is straightforward, as we
now the parent node bytenr directly.

If the parent is already cached, or a root, call it a day.
If the parent is not cached, add it pending list.

This patch will just refactor this part into its own function,
handle_direct_tree_backref() and add some comment explaining the
@ref_key parameter.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2020-03-05 14:06:29 +08:00 committed by David Sterba
parent 2433bea592
commit 4007ea87d9
1 changed files with 78 additions and 52 deletions

View File

@ -698,6 +698,81 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
return btrfs_get_fs_root(fs_info, &key, false); return btrfs_get_fs_root(fs_info, &key, false);
} }
/*
* Handle direct tree backref
*
* Direct tree backref means, the backref item shows its parent bytenr
* directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
*
* @ref_key: The converted backref key.
* For keyed backref, it's the item key.
* For inlined backref, objectid is the bytenr,
* type is btrfs_inline_ref_type, offset is
* btrfs_inline_ref_offset.
*/
static int handle_direct_tree_backref(struct backref_cache *cache,
struct btrfs_key *ref_key,
struct backref_node *cur)
{
struct backref_edge *edge;
struct backref_node *upper;
struct rb_node *rb_node;
ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
/* Only reloc root uses backref pointing to itself */
if (ref_key->objectid == ref_key->offset) {
struct btrfs_root *root;
cur->is_reloc_root = 1;
/* Only reloc backref cache cares about a specific root */
if (cache->is_reloc) {
root = find_reloc_root(cache->fs_info, cur->bytenr);
if (WARN_ON(!root))
return -ENOENT;
cur->root = root;
} else {
/*
* For generic purpose backref cache, reloc root node
* is useless.
*/
list_add(&cur->list, &cache->useless_node);
}
return 0;
}
edge = alloc_backref_edge(cache);
if (!edge)
return -ENOMEM;
rb_node = tree_search(&cache->rb_root, ref_key->offset);
if (!rb_node) {
/* Parent node not yet cached */
upper = alloc_backref_node(cache);
if (!upper) {
free_backref_edge(cache, edge);
return -ENOMEM;
}
upper->bytenr = ref_key->offset;
upper->level = cur->level + 1;
/*
* Backrefs for the upper level block isn't cached, add the
* block to pending list
*/
list_add_tail(&edge->list[UPPER], &cache->pending_edge);
} else {
/* Parent node already cached */
upper = rb_entry(rb_node, struct backref_node, rb_node);
ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
}
list_add_tail(&edge->list[LOWER], &cur->upper);
edge->node[LOWER] = cur;
edge->node[UPPER] = upper;
return 0;
}
/* /*
* build backref tree for a given tree block. root of the backref tree * build backref tree for a given tree block. root of the backref tree
* corresponds the tree block, leaves of the backref tree correspond * corresponds the tree block, leaves of the backref tree correspond
@ -719,7 +794,6 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
{ {
struct btrfs_backref_iter *iter; struct btrfs_backref_iter *iter;
struct backref_cache *cache = &rc->backref_cache; struct backref_cache *cache = &rc->backref_cache;
struct btrfs_fs_info *fs_info = cache->fs_info;
/* For searching parent of TREE_BLOCK_REF */ /* For searching parent of TREE_BLOCK_REF */
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_root *root; struct btrfs_root *root;
@ -841,59 +915,11 @@ again:
/* SHARED_BLOCK_REF means key.offset is the parent bytenr */ /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
if (key.objectid == key.offset) { ret = handle_direct_tree_backref(cache, &key, cur);
cur->is_reloc_root = 1; if (ret < 0) {
/* Only reloc backref cache cares exact root */ err = ret;
if (cache->is_reloc) {
root = find_reloc_root(fs_info,
cur->bytenr);
if (WARN_ON(!root)) {
err = -ENOENT;
goto out; goto out;
} }
cur->root = root;
} else {
/*
* For generic purpose backref cache,
* reloc root node is useless.
*/
list_add(&cur->list,
&cache->useless_node);
}
break;
}
edge = alloc_backref_edge(cache);
if (!edge) {
err = -ENOMEM;
goto out;
}
rb_node = tree_search(&cache->rb_root, key.offset);
if (!rb_node) {
upper = alloc_backref_node(cache);
if (!upper) {
free_backref_edge(cache, edge);
err = -ENOMEM;
goto out;
}
upper->bytenr = key.offset;
upper->level = cur->level + 1;
/*
* backrefs for the upper level block isn't
* cached, add the block to pending list
*/
list_add_tail(&edge->list[UPPER],
&cache->pending_edge);
} else {
upper = rb_entry(rb_node, struct backref_node,
rb_node);
ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
}
list_add_tail(&edge->list[LOWER], &cur->upper);
edge->node[LOWER] = cur;
edge->node[UPPER] = upper;
continue; continue;
} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
err = -EINVAL; err = -EINVAL;