btrfs: refactor btrfs_dec_test_* functions for ordered extents
The refactoring involves the following modifications: - Return bool instead of int - Parameter update for @cached of btrfs_dec_test_first_ordered_pending() For btrfs_dec_test_first_ordered_pending(), @cached is only used to return the finished ordered extent. Rename it to @finished_ret. - Comment updates * Change one stale comment Which still refers to btrfs_dec_test_ordered_pending(), but the context is calling btrfs_dec_test_first_ordered_pending(). * Follow the common comment style for both functions Add more detailed descriptions for parameters and the return value * Move the reason why test_and_set_bit() is used into the call sites - Change how the return value is calculated The most anti-human part of the return value is: if (...) ret = 1; ... return ret == 0; This means, when we set ret to 1, the function returns 0. Change the local variable name to @finished, and directly return the value of it. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
523929f1ca
commit
58f74b2203
|
@ -7797,10 +7797,8 @@ static void __endio_write_update_ordered(struct btrfs_inode *inode,
|
||||||
NULL);
|
NULL);
|
||||||
btrfs_queue_work(wq, &ordered->work);
|
btrfs_queue_work(wq, &ordered->work);
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* If btrfs_dec_test_ordered_pending does not find any ordered
|
/* No ordered extent found in the range, exit */
|
||||||
* extent in the range, we can exit.
|
|
||||||
*/
|
|
||||||
if (ordered_offset == last_offset)
|
if (ordered_offset == last_offset)
|
||||||
return;
|
return;
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -297,26 +297,33 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this is used to account for finished IO across a given range
|
* Finish IO for one ordered extent across a given range. The range can
|
||||||
* of the file. The IO may span ordered extents. If
|
* contain several ordered extents.
|
||||||
* a given ordered_extent is completely done, 1 is returned, otherwise
|
|
||||||
* 0.
|
|
||||||
*
|
*
|
||||||
* test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
|
* @found_ret: Return the finished ordered extent
|
||||||
* to make sure this function only returns 1 once for a given ordered extent.
|
* @file_offset: File offset for the finished IO
|
||||||
|
* Will also be updated to one byte past the range that is
|
||||||
|
* recordered as finished. This allows caller to walk forward.
|
||||||
|
* @io_size: Length of the finish IO range
|
||||||
|
* @uptodate: If the IO finished without problem
|
||||||
*
|
*
|
||||||
* file_offset is updated to one byte past the range that is recorded as
|
* Return true if any ordered extent is finished in the range, and update
|
||||||
* complete. This allows you to walk forward in the file.
|
* @found_ret and @file_offset.
|
||||||
|
* Return false otherwise.
|
||||||
|
*
|
||||||
|
* NOTE: Although The range can cross multiple ordered extents, only one
|
||||||
|
* ordered extent will be updated during one call. The caller is responsible to
|
||||||
|
* iterate all ordered extents in the range.
|
||||||
*/
|
*/
|
||||||
int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
|
bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
|
||||||
struct btrfs_ordered_extent **cached,
|
struct btrfs_ordered_extent **finished_ret,
|
||||||
u64 *file_offset, u64 io_size, int uptodate)
|
u64 *file_offset, u64 io_size, int uptodate)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||||
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
|
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
|
||||||
struct rb_node *node;
|
struct rb_node *node;
|
||||||
struct btrfs_ordered_extent *entry = NULL;
|
struct btrfs_ordered_extent *entry = NULL;
|
||||||
int ret;
|
bool finished = false;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 dec_end;
|
u64 dec_end;
|
||||||
u64 dec_start;
|
u64 dec_start;
|
||||||
|
@ -324,16 +331,12 @@ int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
|
||||||
|
|
||||||
spin_lock_irqsave(&tree->lock, flags);
|
spin_lock_irqsave(&tree->lock, flags);
|
||||||
node = tree_search(tree, *file_offset);
|
node = tree_search(tree, *file_offset);
|
||||||
if (!node) {
|
if (!node)
|
||||||
ret = 1;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
||||||
if (!offset_in_entry(entry, *file_offset)) {
|
if (!offset_in_entry(entry, *file_offset))
|
||||||
ret = 1;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
dec_start = max(*file_offset, entry->file_offset);
|
dec_start = max(*file_offset, entry->file_offset);
|
||||||
dec_end = min(*file_offset + io_size,
|
dec_end = min(*file_offset + io_size,
|
||||||
|
@ -354,39 +357,50 @@ int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
|
||||||
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
|
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
|
||||||
|
|
||||||
if (entry->bytes_left == 0) {
|
if (entry->bytes_left == 0) {
|
||||||
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
/*
|
||||||
|
* Ensure only one caller can set the flag and finished_ret
|
||||||
|
* accordingly
|
||||||
|
*/
|
||||||
|
finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
||||||
/* test_and_set_bit implies a barrier */
|
/* test_and_set_bit implies a barrier */
|
||||||
cond_wake_up_nomb(&entry->wait);
|
cond_wake_up_nomb(&entry->wait);
|
||||||
} else {
|
|
||||||
ret = 1;
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (!ret && cached && entry) {
|
if (finished && finished_ret && entry) {
|
||||||
*cached = entry;
|
*finished_ret = entry;
|
||||||
refcount_inc(&entry->refs);
|
refcount_inc(&entry->refs);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&tree->lock, flags);
|
spin_unlock_irqrestore(&tree->lock, flags);
|
||||||
return ret == 0;
|
return finished;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this is used to account for finished IO across a given range
|
* Finish IO for one ordered extent across a given range. The range can only
|
||||||
* of the file. The IO should not span ordered extents. If
|
* contain one ordered extent.
|
||||||
* a given ordered_extent is completely done, 1 is returned, otherwise
|
|
||||||
* 0.
|
|
||||||
*
|
*
|
||||||
* test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
|
* @cached: The cached ordered extent. If not NULL, we can skip the tree
|
||||||
* to make sure this function only returns 1 once for a given ordered extent.
|
* search and use the ordered extent directly.
|
||||||
|
* Will be also used to store the finished ordered extent.
|
||||||
|
* @file_offset: File offset for the finished IO
|
||||||
|
* @io_size: Length of the finish IO range
|
||||||
|
* @uptodate: If the IO finishes without problem
|
||||||
|
*
|
||||||
|
* Return true if the ordered extent is finished in the range, and update
|
||||||
|
* @cached.
|
||||||
|
* Return false otherwise.
|
||||||
|
*
|
||||||
|
* NOTE: The range can NOT cross multiple ordered extents.
|
||||||
|
* Thus caller should ensure the range doesn't cross ordered extents.
|
||||||
*/
|
*/
|
||||||
int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
|
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
|
||||||
struct btrfs_ordered_extent **cached,
|
struct btrfs_ordered_extent **cached,
|
||||||
u64 file_offset, u64 io_size, int uptodate)
|
u64 file_offset, u64 io_size, int uptodate)
|
||||||
{
|
{
|
||||||
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
|
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
|
||||||
struct rb_node *node;
|
struct rb_node *node;
|
||||||
struct btrfs_ordered_extent *entry = NULL;
|
struct btrfs_ordered_extent *entry = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
bool finished = false;
|
||||||
|
|
||||||
spin_lock_irqsave(&tree->lock, flags);
|
spin_lock_irqsave(&tree->lock, flags);
|
||||||
if (cached && *cached) {
|
if (cached && *cached) {
|
||||||
|
@ -395,41 +409,39 @@ int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
|
||||||
}
|
}
|
||||||
|
|
||||||
node = tree_search(tree, file_offset);
|
node = tree_search(tree, file_offset);
|
||||||
if (!node) {
|
if (!node)
|
||||||
ret = 1;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
||||||
have_entry:
|
have_entry:
|
||||||
if (!offset_in_entry(entry, file_offset)) {
|
if (!offset_in_entry(entry, file_offset))
|
||||||
ret = 1;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
if (io_size > entry->bytes_left) {
|
if (io_size > entry->bytes_left)
|
||||||
btrfs_crit(inode->root->fs_info,
|
btrfs_crit(inode->root->fs_info,
|
||||||
"bad ordered accounting left %llu size %llu",
|
"bad ordered accounting left %llu size %llu",
|
||||||
entry->bytes_left, io_size);
|
entry->bytes_left, io_size);
|
||||||
}
|
|
||||||
entry->bytes_left -= io_size;
|
entry->bytes_left -= io_size;
|
||||||
if (!uptodate)
|
if (!uptodate)
|
||||||
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
|
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
|
||||||
|
|
||||||
if (entry->bytes_left == 0) {
|
if (entry->bytes_left == 0) {
|
||||||
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
/*
|
||||||
|
* Ensure only one caller can set the flag and finished_ret
|
||||||
|
* accordingly
|
||||||
|
*/
|
||||||
|
finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
||||||
/* test_and_set_bit implies a barrier */
|
/* test_and_set_bit implies a barrier */
|
||||||
cond_wake_up_nomb(&entry->wait);
|
cond_wake_up_nomb(&entry->wait);
|
||||||
} else {
|
|
||||||
ret = 1;
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (!ret && cached && entry) {
|
if (finished && cached && entry) {
|
||||||
*cached = entry;
|
*cached = entry;
|
||||||
refcount_inc(&entry->refs);
|
refcount_inc(&entry->refs);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&tree->lock, flags);
|
spin_unlock_irqrestore(&tree->lock, flags);
|
||||||
return ret == 0;
|
return finished;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -152,11 +152,11 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
|
||||||
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
|
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
|
||||||
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
|
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
|
||||||
struct btrfs_ordered_extent *entry);
|
struct btrfs_ordered_extent *entry);
|
||||||
int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
|
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
|
||||||
struct btrfs_ordered_extent **cached,
|
struct btrfs_ordered_extent **cached,
|
||||||
u64 file_offset, u64 io_size, int uptodate);
|
u64 file_offset, u64 io_size, int uptodate);
|
||||||
int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
|
bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
|
||||||
struct btrfs_ordered_extent **cached,
|
struct btrfs_ordered_extent **finished_ret,
|
||||||
u64 *file_offset, u64 io_size,
|
u64 *file_offset, u64 io_size,
|
||||||
int uptodate);
|
int uptodate);
|
||||||
int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
|
int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
|
||||||
|
|
Loading…
Reference in New Issue