btrfs: backref: introduce the skeleton of btrfs_backref_iter
Due to the complex nature of btrfs extent tree, when we want to iterate all backrefs of one extent, this involves quite a lot of work, like searching the EXTENT_ITEM/METADATA_ITEM, iteration through inline and keyed backrefs. Normally this would result in a complex code, something like: btrfs_search_slot() /* Ensure we are at EXTENT_ITEM/METADATA_ITEM */ while (1) { /* Loop for extent tree items */ while (ptr < end) { /* Loop for inlined items */ /* Real work here */ } next: ret = btrfs_next_item() /* Ensure we're still at keyed item for specified bytenr */ } The idea of btrfs_backref_iter is to avoid such complex and hard to read code structure, but something like the following: iter = btrfs_backref_iter_alloc(); ret = btrfs_backref_iter_start(iter, bytenr); if (ret < 0) goto out; for (; ; ret = btrfs_backref_iter_next(iter)) { /* Real work here */ } out: btrfs_backref_iter_free(iter); This patch is just the skeleton + btrfs_backref_iter_start() code. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
78d933c79c
commit
a37f232b7b
|
@ -2295,3 +2295,113 @@ void free_ipath(struct inode_fs_paths *ipath)
|
|||
kvfree(ipath->fspath);
|
||||
kfree(ipath);
|
||||
}
|
||||
|
||||
struct btrfs_backref_iter *btrfs_backref_iter_alloc(
|
||||
struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
|
||||
{
|
||||
struct btrfs_backref_iter *ret;
|
||||
|
||||
ret = kzalloc(sizeof(*ret), gfp_flag);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
ret->path = btrfs_alloc_path();
|
||||
if (!ret) {
|
||||
kfree(ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Current backref iterator only supports iteration in commit root */
|
||||
ret->path->search_commit_root = 1;
|
||||
ret->path->skip_locking = 1;
|
||||
ret->fs_info = fs_info;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = iter->fs_info;
|
||||
struct btrfs_path *path = iter->path;
|
||||
struct btrfs_extent_item *ei;
|
||||
struct btrfs_key key;
|
||||
int ret;
|
||||
|
||||
key.objectid = bytenr;
|
||||
key.type = BTRFS_METADATA_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
iter->bytenr = bytenr;
|
||||
|
||||
ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0) {
|
||||
ret = -EUCLEAN;
|
||||
goto release;
|
||||
}
|
||||
if (path->slots[0] == 0) {
|
||||
WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
|
||||
ret = -EUCLEAN;
|
||||
goto release;
|
||||
}
|
||||
path->slots[0]--;
|
||||
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
|
||||
key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
|
||||
ret = -ENOENT;
|
||||
goto release;
|
||||
}
|
||||
memcpy(&iter->cur_key, &key, sizeof(key));
|
||||
iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
|
||||
path->slots[0]);
|
||||
iter->end_ptr = (u32)(iter->item_ptr +
|
||||
btrfs_item_size_nr(path->nodes[0], path->slots[0]));
|
||||
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||
struct btrfs_extent_item);
|
||||
|
||||
/*
|
||||
* Only support iteration on tree backref yet.
|
||||
*
|
||||
* This is an extra precaution for non skinny-metadata, where
|
||||
* EXTENT_ITEM is also used for tree blocks, that we can only use
|
||||
* extent flags to determine if it's a tree block.
|
||||
*/
|
||||
if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
|
||||
ret = -ENOTSUPP;
|
||||
goto release;
|
||||
}
|
||||
iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
|
||||
|
||||
/* If there is no inline backref, go search for keyed backref */
|
||||
if (iter->cur_ptr >= iter->end_ptr) {
|
||||
ret = btrfs_next_item(fs_info->extent_root, path);
|
||||
|
||||
/* No inline nor keyed ref */
|
||||
if (ret > 0) {
|
||||
ret = -ENOENT;
|
||||
goto release;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto release;
|
||||
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
|
||||
path->slots[0]);
|
||||
if (iter->cur_key.objectid != bytenr ||
|
||||
(iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
|
||||
iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
|
||||
ret = -ENOENT;
|
||||
goto release;
|
||||
}
|
||||
iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
|
||||
path->slots[0]);
|
||||
iter->item_ptr = iter->cur_ptr;
|
||||
iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
|
||||
path->nodes[0], path->slots[0]));
|
||||
}
|
||||
|
||||
return 0;
|
||||
release:
|
||||
btrfs_backref_iter_release(iter);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -78,4 +78,42 @@ struct prelim_ref {
|
|||
u64 wanted_disk_byte;
|
||||
};
|
||||
|
||||
/*
|
||||
* Iterate backrefs of one extent.
|
||||
*
|
||||
* Now it only supports iteration of tree block in commit root.
|
||||
*/
|
||||
struct btrfs_backref_iter {
|
||||
u64 bytenr;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_key cur_key;
|
||||
u32 item_ptr;
|
||||
u32 cur_ptr;
|
||||
u32 end_ptr;
|
||||
};
|
||||
|
||||
struct btrfs_backref_iter *btrfs_backref_iter_alloc(
|
||||
struct btrfs_fs_info *fs_info, gfp_t gfp_flag);
|
||||
|
||||
static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter)
|
||||
{
|
||||
if (!iter)
|
||||
return;
|
||||
btrfs_free_path(iter->path);
|
||||
kfree(iter);
|
||||
}
|
||||
|
||||
int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);
|
||||
|
||||
static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
|
||||
{
|
||||
iter->bytenr = 0;
|
||||
iter->item_ptr = 0;
|
||||
iter->cur_ptr = 0;
|
||||
iter->end_ptr = 0;
|
||||
btrfs_release_path(iter->path);
|
||||
memset(&iter->cur_key, 0, sizeof(iter->cur_key));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue