btrfs: zoned: calculate free space from zone capacity

Now that we introduced capacity in a block group, we need to calculate free
space using the capacity instead of the length. Thus, bytes we account
capacity - alloc_pointer as free, and account bytes [capacity, length] as
zone unusable.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Naohiro Aota 2021-08-19 21:19:10 +09:00 committed by David Sterba
parent c46c4247ab
commit 98173255bd
4 changed files with 16 additions and 6 deletions

View File

@ -2484,7 +2484,8 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
*/
trace_btrfs_add_block_group(fs_info, cache, 1);
btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
cache->bytes_super, 0, &cache->space_info);
cache->bytes_super, cache->zone_unusable,
&cache->space_info);
btrfs_update_global_block_rsv(fs_info);
link_block_group(cache);
@ -2599,7 +2600,9 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
if (!--cache->ro) {
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable = cache->alloc_offset - cache->used;
cache->zone_unusable =
(cache->alloc_offset - cache->used) +
(cache->length - cache->zone_capacity);
sinfo->bytes_zone_unusable += cache->zone_unusable;
sinfo->bytes_readonly -= cache->zone_unusable;
}

View File

@ -3796,7 +3796,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
goto out;
}
avail = block_group->length - block_group->alloc_offset;
WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
avail = block_group->zone_capacity - block_group->alloc_offset;
if (avail < num_bytes) {
if (ffe_ctl->max_extent_size < avail) {
/*

View File

@ -2539,10 +2539,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 offset = bytenr - block_group->start;
u64 to_free, to_unusable;
const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
bool initial = (size == block_group->length);
WARN_ON(!initial && offset + size > block_group->zone_capacity);
spin_lock(&ctl->tree_lock);
if (!used)
to_free = size;
else if (initial)
to_free = block_group->zone_capacity;
else if (offset >= block_group->alloc_offset)
to_free = size;
else if (offset + size <= block_group->alloc_offset)
@ -2755,7 +2760,7 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
*/
if (btrfs_is_zoned(fs_info)) {
btrfs_info(fs_info, "free space %llu",
block_group->length - block_group->alloc_offset);
block_group->zone_capacity - block_group->alloc_offset);
return;
}

View File

@ -1265,8 +1265,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
return;
WARN_ON(cache->bytes_super != 0);
unusable = cache->alloc_offset - cache->used;
free = cache->length - cache->alloc_offset;
unusable = (cache->alloc_offset - cache->used) +
(cache->length - cache->zone_capacity);
free = cache->zone_capacity - cache->alloc_offset;
/* We only need ->free_space in ALLOC_SEQ block groups */
cache->last_byte_to_unpin = (u64)-1;