btrfs: zoned: introduce block group context to btrfs_eb_write_context
For metadata write out on the zoned mode, we call btrfs_check_meta_write_pointer() to check if an extent buffer to be written is aligned to the write pointer. We look up a block group containing the extent buffer for every extent buffer, which takes unnecessary effort as the writing extent buffers are mostly contiguous. Introduce "zoned_bg" to cache the block group working on. Also, while at it, rename "cache" to "block_group". Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
861093eff4
commit
7db94301a9
|
@ -1809,7 +1809,6 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
|
|||
{
|
||||
struct writeback_control *wbc = ctx->wbc;
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct btrfs_block_group *cache = NULL;
|
||||
struct extent_buffer *eb;
|
||||
int ret;
|
||||
|
||||
|
@ -1847,7 +1846,7 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
|
|||
|
||||
ctx->eb = eb;
|
||||
|
||||
if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
|
||||
if (!btrfs_check_meta_write_pointer(eb->fs_info, ctx)) {
|
||||
/*
|
||||
* If for_sync, this hole will be filled with
|
||||
* trasnsaction commit.
|
||||
|
@ -1861,18 +1860,15 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
|
|||
}
|
||||
|
||||
if (!lock_extent_buffer_for_io(eb, wbc)) {
|
||||
btrfs_revert_meta_write_pointer(cache, eb);
|
||||
if (cache)
|
||||
btrfs_put_block_group(cache);
|
||||
btrfs_revert_meta_write_pointer(ctx->zoned_bg, eb);
|
||||
free_extent_buffer(eb);
|
||||
return 0;
|
||||
}
|
||||
if (cache) {
|
||||
if (ctx->zoned_bg) {
|
||||
/*
|
||||
* Implies write in zoned mode. Mark the last eb in a block group.
|
||||
*/
|
||||
btrfs_schedule_zone_finish_bg(cache, eb);
|
||||
btrfs_put_block_group(cache);
|
||||
btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
|
||||
}
|
||||
write_one_eb(eb, wbc);
|
||||
free_extent_buffer(eb);
|
||||
|
@ -1985,6 +1981,9 @@ retry:
|
|||
ret = 0;
|
||||
if (!ret && BTRFS_FS_ERROR(fs_info))
|
||||
ret = -EROFS;
|
||||
|
||||
if (ctx.zoned_bg)
|
||||
btrfs_put_block_group(ctx.zoned_bg);
|
||||
btrfs_zoned_meta_io_unlock(fs_info);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -96,6 +96,8 @@ struct extent_buffer {
|
|||
struct btrfs_eb_write_context {
|
||||
struct writeback_control *wbc;
|
||||
struct extent_buffer *eb;
|
||||
/* Block group @eb resides in. Only used for zoned mode. */
|
||||
struct btrfs_block_group *zoned_bg;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1748,30 +1748,35 @@ out:
|
|||
}
|
||||
|
||||
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
|
||||
struct extent_buffer *eb,
|
||||
struct btrfs_block_group **cache_ret)
|
||||
struct btrfs_eb_write_context *ctx)
|
||||
{
|
||||
struct btrfs_block_group *cache;
|
||||
bool ret = true;
|
||||
const struct extent_buffer *eb = ctx->eb;
|
||||
struct btrfs_block_group *block_group = ctx->zoned_bg;
|
||||
|
||||
if (!btrfs_is_zoned(fs_info))
|
||||
return true;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, eb->start);
|
||||
if (!cache)
|
||||
return true;
|
||||
|
||||
if (cache->meta_write_pointer != eb->start) {
|
||||
btrfs_put_block_group(cache);
|
||||
cache = NULL;
|
||||
ret = false;
|
||||
} else {
|
||||
cache->meta_write_pointer = eb->start + eb->len;
|
||||
if (block_group) {
|
||||
if (block_group->start > eb->start ||
|
||||
block_group->start + block_group->length <= eb->start) {
|
||||
btrfs_put_block_group(block_group);
|
||||
block_group = NULL;
|
||||
ctx->zoned_bg = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
*cache_ret = cache;
|
||||
if (!block_group) {
|
||||
block_group = btrfs_lookup_block_group(fs_info, eb->start);
|
||||
if (!block_group)
|
||||
return true;
|
||||
ctx->zoned_bg = block_group;
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (block_group->meta_write_pointer != eb->start)
|
||||
return false;
|
||||
block_group->meta_write_pointer = eb->start + eb->len;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
|
||||
|
|
|
@ -59,8 +59,7 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
|
|||
bool btrfs_use_zone_append(struct btrfs_bio *bbio);
|
||||
void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
|
||||
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
|
||||
struct extent_buffer *eb,
|
||||
struct btrfs_block_group **cache_ret);
|
||||
struct btrfs_eb_write_context *ctx);
|
||||
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
|
||||
struct extent_buffer *eb);
|
||||
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
|
||||
|
@ -190,8 +189,7 @@ static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
|
|||
}
|
||||
|
||||
static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
|
||||
struct extent_buffer *eb,
|
||||
struct btrfs_block_group **cache_ret)
|
||||
struct btrfs_eb_write_context *ctx)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue