Btrfs: use fastpath in extent state ops as much as possible
Fully utilize our extent state's new helper functions to use fastpath as much as possible. Signed-off-by: Liu Bo <liubo2009@cn.fujitsu.com> Reviewed-by: Josef Bacik <josef@redhat.com>
This commit is contained in:
parent
f8c5d0b443
commit
d1ac6e41d5
|
@ -569,10 +569,8 @@ hit_next:
|
|||
if (err)
|
||||
goto out;
|
||||
if (state->end <= end) {
|
||||
clear_state_bit(tree, state, &bits, wake);
|
||||
if (last_end == (u64)-1)
|
||||
goto out;
|
||||
start = last_end + 1;
|
||||
state = clear_state_bit(tree, state, &bits, wake);
|
||||
goto next;
|
||||
}
|
||||
goto search_again;
|
||||
}
|
||||
|
@ -780,7 +778,6 @@ hit_next:
|
|||
* Just lock what we found and keep going
|
||||
*/
|
||||
if (state->start == start && state->end <= end) {
|
||||
struct rb_node *next_node;
|
||||
if (state->state & exclusive_bits) {
|
||||
*failed_start = state->start;
|
||||
err = -EEXIST;
|
||||
|
@ -788,20 +785,15 @@ hit_next:
|
|||
}
|
||||
|
||||
set_state_bits(tree, state, &bits);
|
||||
|
||||
cache_state(state, cached_state);
|
||||
merge_state(tree, state);
|
||||
if (last_end == (u64)-1)
|
||||
goto out;
|
||||
|
||||
start = last_end + 1;
|
||||
next_node = rb_next(&state->rb_node);
|
||||
if (next_node && start < end && prealloc && !need_resched()) {
|
||||
state = rb_entry(next_node, struct extent_state,
|
||||
rb_node);
|
||||
if (state->start == start)
|
||||
goto hit_next;
|
||||
}
|
||||
state = next_state(state);
|
||||
if (start < end && state && state->start == start &&
|
||||
!need_resched())
|
||||
goto hit_next;
|
||||
goto search_again;
|
||||
}
|
||||
|
||||
|
@ -844,6 +836,10 @@ hit_next:
|
|||
if (last_end == (u64)-1)
|
||||
goto out;
|
||||
start = last_end + 1;
|
||||
state = next_state(state);
|
||||
if (start < end && state && state->start == start &&
|
||||
!need_resched())
|
||||
goto hit_next;
|
||||
}
|
||||
goto search_again;
|
||||
}
|
||||
|
@ -993,21 +989,14 @@ hit_next:
|
|||
* Just lock what we found and keep going
|
||||
*/
|
||||
if (state->start == start && state->end <= end) {
|
||||
struct rb_node *next_node;
|
||||
|
||||
set_state_bits(tree, state, &bits);
|
||||
clear_state_bit(tree, state, &clear_bits, 0);
|
||||
state = clear_state_bit(tree, state, &clear_bits, 0);
|
||||
if (last_end == (u64)-1)
|
||||
goto out;
|
||||
|
||||
start = last_end + 1;
|
||||
next_node = rb_next(&state->rb_node);
|
||||
if (next_node && start < end && prealloc && !need_resched()) {
|
||||
state = rb_entry(next_node, struct extent_state,
|
||||
rb_node);
|
||||
if (state->start == start)
|
||||
goto hit_next;
|
||||
}
|
||||
if (start < end && state && state->start == start &&
|
||||
!need_resched())
|
||||
goto hit_next;
|
||||
goto search_again;
|
||||
}
|
||||
|
||||
|
@ -1041,10 +1030,13 @@ hit_next:
|
|||
goto out;
|
||||
if (state->end <= end) {
|
||||
set_state_bits(tree, state, &bits);
|
||||
clear_state_bit(tree, state, &clear_bits, 0);
|
||||
state = clear_state_bit(tree, state, &clear_bits, 0);
|
||||
if (last_end == (u64)-1)
|
||||
goto out;
|
||||
start = last_end + 1;
|
||||
if (start < end && state && state->start == start &&
|
||||
!need_resched())
|
||||
goto hit_next;
|
||||
}
|
||||
goto search_again;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue