btrfs: add new helper btrfs_lock_and_flush_ordered_range
There is a certain idiom used in multiple places in btrfs' codebase, dealing with flushing an ordered range. Factor this in a separate function that can be reused. Future patches will replace the existing code with that function. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
1200b51f57
commit
ffa87214c1
|
@ -962,6 +962,39 @@ out:
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* btrfs_flush_ordered_range - Lock the passed range and ensures all pending
|
||||||
|
* ordered extents in it are run to completion.
|
||||||
|
*
|
||||||
|
* @tree: IO tree used for locking out other users of the range
|
||||||
|
* @inode: Inode whose ordered tree is to be searched
|
||||||
|
* @start: Beginning of range to flush
|
||||||
|
* @end: Last byte of range to lock
|
||||||
|
* @cached_state: If passed, will return the extent state responsible for the
|
||||||
|
* locked range. It's the caller's responsibility to free the cached state.
|
||||||
|
*
|
||||||
|
* This function always returns with the given range locked, ensuring after it's
|
||||||
|
* called no order extent can be pending.
|
||||||
|
*/
|
||||||
|
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
|
||||||
|
struct btrfs_inode *inode, u64 start,
|
||||||
|
u64 end,
|
||||||
|
struct extent_state **cached_state)
|
||||||
|
{
|
||||||
|
struct btrfs_ordered_extent *ordered;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
lock_extent_bits(tree, start, end, cached_state);
|
||||||
|
ordered = btrfs_lookup_ordered_range(inode, start,
|
||||||
|
end - start + 1);
|
||||||
|
if (!ordered)
|
||||||
|
break;
|
||||||
|
unlock_extent_cached(tree, start, end, cached_state);
|
||||||
|
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
|
||||||
|
btrfs_put_ordered_extent(ordered);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int __init ordered_data_init(void)
|
int __init ordered_data_init(void)
|
||||||
{
|
{
|
||||||
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
|
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
|
||||||
|
|
|
@ -188,6 +188,10 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
||||||
const u64 range_start, const u64 range_len);
|
const u64 range_start, const u64 range_len);
|
||||||
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||||
const u64 range_start, const u64 range_len);
|
const u64 range_start, const u64 range_len);
|
||||||
|
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
|
||||||
|
struct btrfs_inode *inode, u64 start,
|
||||||
|
u64 end,
|
||||||
|
struct extent_state **cached_state);
|
||||||
int __init ordered_data_init(void);
|
int __init ordered_data_init(void);
|
||||||
void __cold ordered_data_exit(void);
|
void __cold ordered_data_exit(void);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue