ext4: Abstract out overlap fix/check logic in ext4_mb_normalize_request()
Abstract out the logic of fixing PA overlaps in ext4_mb_normalize_request to improve readability of code. This also makes it easier to make changes to the overlap logic in future. There are no functional changes in this patch Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com> Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/9b35f3955a1d7b66bbd713eca1e63026e01f78c1.1679731817.git.ojaswin@linux.ibm.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
parent
7692094ac5
commit
0830344c95
|
@ -4001,6 +4001,73 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Given an allocation context "ac" and a range "start", "end", check
|
||||
* and adjust boundaries if the range overlaps with any of the existing
|
||||
* preallocatoins stored in the corresponding inode of the allocation context.
|
||||
*
|
||||
*Parameters:
|
||||
* ac allocation context
|
||||
* start start of the new range
|
||||
* end end of the new range
|
||||
*/
|
||||
static inline void
|
||||
ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
|
||||
ext4_lblk_t *start, ext4_lblk_t *end)
|
||||
{
|
||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
|
||||
struct ext4_prealloc_space *tmp_pa;
|
||||
ext4_lblk_t new_start, new_end;
|
||||
ext4_lblk_t tmp_pa_start, tmp_pa_end;
|
||||
|
||||
new_start = *start;
|
||||
new_end = *end;
|
||||
|
||||
/* check we don't cross already preallocated blocks */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||
if (tmp_pa->pa_deleted)
|
||||
continue;
|
||||
spin_lock(&tmp_pa->pa_lock);
|
||||
if (tmp_pa->pa_deleted) {
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp_pa_start = tmp_pa->pa_lstart;
|
||||
tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
|
||||
|
||||
/* PA must not overlap original request */
|
||||
BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
|
||||
ac->ac_o_ex.fe_logical < tmp_pa_start));
|
||||
|
||||
/* skip PAs this normalized request doesn't overlap with */
|
||||
if (tmp_pa_start >= new_end || tmp_pa_end <= new_start) {
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
BUG_ON(tmp_pa_start <= new_start && tmp_pa_end >= new_end);
|
||||
|
||||
/* adjust start or end to be adjacent to this pa */
|
||||
if (tmp_pa_end <= ac->ac_o_ex.fe_logical) {
|
||||
BUG_ON(tmp_pa_end < new_start);
|
||||
new_start = tmp_pa_end;
|
||||
} else if (tmp_pa_start > ac->ac_o_ex.fe_logical) {
|
||||
BUG_ON(tmp_pa_start > new_end);
|
||||
new_end = tmp_pa_start;
|
||||
}
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* XXX: extra loop to check we really don't overlap preallocations */
|
||||
ext4_mb_pa_assert_overlap(ac, new_start, new_end);
|
||||
|
||||
*start = new_start;
|
||||
*end = new_end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normalization means making request better in terms of
|
||||
* size and alignment
|
||||
|
@ -4016,9 +4083,6 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
|||
loff_t size, start_off;
|
||||
loff_t orig_size __maybe_unused;
|
||||
ext4_lblk_t start;
|
||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||
struct ext4_prealloc_space *tmp_pa;
|
||||
ext4_lblk_t tmp_pa_start, tmp_pa_end;
|
||||
|
||||
/* do normalize only data requests, metadata requests
|
||||
do not need preallocation */
|
||||
|
@ -4119,47 +4183,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
|||
|
||||
end = start + size;
|
||||
|
||||
/* check we don't cross already preallocated blocks */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||
if (tmp_pa->pa_deleted)
|
||||
continue;
|
||||
spin_lock(&tmp_pa->pa_lock);
|
||||
if (tmp_pa->pa_deleted) {
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
ext4_mb_pa_adjust_overlap(ac, &start, &end);
|
||||
|
||||
tmp_pa_start = tmp_pa->pa_lstart;
|
||||
tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
|
||||
|
||||
/* PA must not overlap original request */
|
||||
BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
|
||||
ac->ac_o_ex.fe_logical < tmp_pa_start));
|
||||
|
||||
/* skip PAs this normalized request doesn't overlap with */
|
||||
if (tmp_pa_start >= end || tmp_pa_end <= start) {
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
BUG_ON(tmp_pa_start <= start && tmp_pa_end >= end);
|
||||
|
||||
/* adjust start or end to be adjacent to this pa */
|
||||
if (tmp_pa_end <= ac->ac_o_ex.fe_logical) {
|
||||
BUG_ON(tmp_pa_end < start);
|
||||
start = tmp_pa_end;
|
||||
} else if (tmp_pa_start > ac->ac_o_ex.fe_logical) {
|
||||
BUG_ON(tmp_pa_start > end);
|
||||
end = tmp_pa_start;
|
||||
}
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
size = end - start;
|
||||
|
||||
/* XXX: extra loop to check we really don't overlap preallocations */
|
||||
ext4_mb_pa_assert_overlap(ac, start, end);
|
||||
|
||||
/*
|
||||
* In this function "start" and "size" are normalized for better
|
||||
* alignment and length such that we could preallocate more blocks.
|
||||
|
|
Loading…
Reference in New Issue