btrfs: reloc: move error handling of build_backref_tree() to backref.c
The error cleanup will be extracted as a new function, btrfs_backref_error_cleanup(), and moved to backref.c and exported for later usage. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
fc997ed05a
commit
1b23ea180b
fs/btrfs
|
@ -3063,3 +3063,57 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
|
||||||
|
struct btrfs_backref_node *node)
|
||||||
|
{
|
||||||
|
struct btrfs_backref_node *lower;
|
||||||
|
struct btrfs_backref_node *upper;
|
||||||
|
struct btrfs_backref_edge *edge;
|
||||||
|
|
||||||
|
while (!list_empty(&cache->useless_node)) {
|
||||||
|
lower = list_first_entry(&cache->useless_node,
|
||||||
|
struct btrfs_backref_node, list);
|
||||||
|
list_del_init(&lower->list);
|
||||||
|
}
|
||||||
|
while (!list_empty(&cache->pending_edge)) {
|
||||||
|
edge = list_first_entry(&cache->pending_edge,
|
||||||
|
struct btrfs_backref_edge, list[UPPER]);
|
||||||
|
list_del(&edge->list[UPPER]);
|
||||||
|
list_del(&edge->list[LOWER]);
|
||||||
|
lower = edge->node[LOWER];
|
||||||
|
upper = edge->node[UPPER];
|
||||||
|
btrfs_backref_free_edge(cache, edge);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lower is no longer linked to any upper backref nodes and
|
||||||
|
* isn't in the cache, we can free it ourselves.
|
||||||
|
*/
|
||||||
|
if (list_empty(&lower->upper) &&
|
||||||
|
RB_EMPTY_NODE(&lower->rb_node))
|
||||||
|
list_add(&lower->list, &cache->useless_node);
|
||||||
|
|
||||||
|
if (!RB_EMPTY_NODE(&upper->rb_node))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Add this guy's upper edges to the list to process */
|
||||||
|
list_for_each_entry(edge, &upper->upper, list[LOWER])
|
||||||
|
list_add_tail(&edge->list[UPPER],
|
||||||
|
&cache->pending_edge);
|
||||||
|
if (list_empty(&upper->upper))
|
||||||
|
list_add(&upper->list, &cache->useless_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (!list_empty(&cache->useless_node)) {
|
||||||
|
lower = list_first_entry(&cache->useless_node,
|
||||||
|
struct btrfs_backref_node, list);
|
||||||
|
list_del_init(&lower->list);
|
||||||
|
if (lower == node)
|
||||||
|
node = NULL;
|
||||||
|
btrfs_backref_free_node(cache, lower);
|
||||||
|
}
|
||||||
|
|
||||||
|
btrfs_backref_cleanup_node(cache, node);
|
||||||
|
ASSERT(list_empty(&cache->useless_node) &&
|
||||||
|
list_empty(&cache->pending_edge));
|
||||||
|
}
|
||||||
|
|
|
@ -372,4 +372,7 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
|
||||||
int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
|
int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
|
||||||
struct btrfs_backref_node *start);
|
struct btrfs_backref_node *start);
|
||||||
|
|
||||||
|
void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
|
||||||
|
struct btrfs_backref_node *node);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -474,8 +474,6 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
|
||||||
/* For searching parent of TREE_BLOCK_REF */
|
/* For searching parent of TREE_BLOCK_REF */
|
||||||
struct btrfs_path *path;
|
struct btrfs_path *path;
|
||||||
struct btrfs_backref_node *cur;
|
struct btrfs_backref_node *cur;
|
||||||
struct btrfs_backref_node *upper;
|
|
||||||
struct btrfs_backref_node *lower;
|
|
||||||
struct btrfs_backref_node *node = NULL;
|
struct btrfs_backref_node *node = NULL;
|
||||||
struct btrfs_backref_edge *edge;
|
struct btrfs_backref_edge *edge;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -532,51 +530,7 @@ out:
|
||||||
btrfs_backref_iter_free(iter);
|
btrfs_backref_iter_free(iter);
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
if (err) {
|
if (err) {
|
||||||
while (!list_empty(&cache->useless_node)) {
|
btrfs_backref_error_cleanup(cache, node);
|
||||||
lower = list_first_entry(&cache->useless_node,
|
|
||||||
struct btrfs_backref_node, list);
|
|
||||||
list_del_init(&lower->list);
|
|
||||||
}
|
|
||||||
while (!list_empty(&cache->pending_edge)) {
|
|
||||||
edge = list_first_entry(&cache->pending_edge,
|
|
||||||
struct btrfs_backref_edge, list[UPPER]);
|
|
||||||
list_del(&edge->list[UPPER]);
|
|
||||||
list_del(&edge->list[LOWER]);
|
|
||||||
lower = edge->node[LOWER];
|
|
||||||
upper = edge->node[UPPER];
|
|
||||||
btrfs_backref_free_edge(cache, edge);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Lower is no longer linked to any upper backref nodes
|
|
||||||
* and isn't in the cache, we can free it ourselves.
|
|
||||||
*/
|
|
||||||
if (list_empty(&lower->upper) &&
|
|
||||||
RB_EMPTY_NODE(&lower->rb_node))
|
|
||||||
list_add(&lower->list, &cache->useless_node);
|
|
||||||
|
|
||||||
if (!RB_EMPTY_NODE(&upper->rb_node))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Add this guy's upper edges to the list to process */
|
|
||||||
list_for_each_entry(edge, &upper->upper, list[LOWER])
|
|
||||||
list_add_tail(&edge->list[UPPER],
|
|
||||||
&cache->pending_edge);
|
|
||||||
if (list_empty(&upper->upper))
|
|
||||||
list_add(&upper->list, &cache->useless_node);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!list_empty(&cache->useless_node)) {
|
|
||||||
lower = list_first_entry(&cache->useless_node,
|
|
||||||
struct btrfs_backref_node, list);
|
|
||||||
list_del_init(&lower->list);
|
|
||||||
if (lower == node)
|
|
||||||
node = NULL;
|
|
||||||
btrfs_backref_free_node(cache, lower);
|
|
||||||
}
|
|
||||||
|
|
||||||
btrfs_backref_cleanup_node(cache, node);
|
|
||||||
ASSERT(list_empty(&cache->useless_node) &&
|
|
||||||
list_empty(&cache->pending_edge));
|
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
ASSERT(!node || !node->detached);
|
ASSERT(!node || !node->detached);
|
||||||
|
|
Loading…
Reference in New Issue