btrfs: track ordered bytes instead of just dio ordered bytes
We track dio_bytes because the shrink delalloc code needs to know if we have more DIO in flight than we have normal buffered IO. The reason for this is because we can't "flush" DIO, we have to just wait on the ordered extents to finish. However this is true of all ordered extents. If we have more ordered space outstanding than dirty pages we should be waiting on ordered extents. We already are ok on this front technically, because we always do a FLUSH_DELALLOC_WAIT loop, but I want to use the ordered counter in the preemptive flushing code as well, so change this to count all ordered bytes instead of just DIO ordered bytes. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
ac1ea10e75
commit
5deb17e18e
|
@ -797,7 +797,7 @@ struct btrfs_fs_info {
|
|||
/* used to keep from writing metadata until there is a nice batch */
|
||||
struct percpu_counter dirty_metadata_bytes;
|
||||
struct percpu_counter delalloc_bytes;
|
||||
struct percpu_counter dio_bytes;
|
||||
struct percpu_counter ordered_bytes;
|
||||
s32 dirty_metadata_batch;
|
||||
s32 delalloc_batch;
|
||||
|
||||
|
|
|
@ -1469,7 +1469,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
|
|||
{
|
||||
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
|
||||
percpu_counter_destroy(&fs_info->delalloc_bytes);
|
||||
percpu_counter_destroy(&fs_info->dio_bytes);
|
||||
percpu_counter_destroy(&fs_info->ordered_bytes);
|
||||
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
|
||||
btrfs_free_csum_hash(fs_info);
|
||||
btrfs_free_stripe_hash_table(fs_info);
|
||||
|
@ -2802,7 +2802,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
|
|||
sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
|
||||
sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
|
||||
|
||||
ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
|
||||
ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -4163,9 +4163,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
|
|||
percpu_counter_sum(&fs_info->delalloc_bytes));
|
||||
}
|
||||
|
||||
if (percpu_counter_sum(&fs_info->dio_bytes))
|
||||
if (percpu_counter_sum(&fs_info->ordered_bytes))
|
||||
btrfs_info(fs_info, "at unmount dio bytes count %lld",
|
||||
percpu_counter_sum(&fs_info->dio_bytes));
|
||||
percpu_counter_sum(&fs_info->ordered_bytes));
|
||||
|
||||
btrfs_sysfs_remove_mounted(fs_info);
|
||||
btrfs_sysfs_remove_fsid(fs_info->fs_devices);
|
||||
|
|
|
@ -206,11 +206,11 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
|
|||
type == BTRFS_ORDERED_COMPRESSED);
|
||||
set_bit(type, &entry->flags);
|
||||
|
||||
if (dio) {
|
||||
percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
|
||||
fs_info->delalloc_batch);
|
||||
percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
|
||||
fs_info->delalloc_batch);
|
||||
|
||||
if (dio)
|
||||
set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
|
||||
}
|
||||
|
||||
/* one ref for the tree */
|
||||
refcount_set(&entry->refs, 1);
|
||||
|
@ -503,9 +503,8 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
|
|||
btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
|
||||
false);
|
||||
|
||||
if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
|
||||
percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
|
||||
fs_info->delalloc_batch);
|
||||
percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
|
||||
fs_info->delalloc_batch);
|
||||
|
||||
tree = &btrfs_inode->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
|
|
|
@ -489,7 +489,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
|
|||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
u64 delalloc_bytes;
|
||||
u64 dio_bytes;
|
||||
u64 ordered_bytes;
|
||||
u64 items;
|
||||
long time_left;
|
||||
int loops;
|
||||
|
@ -513,25 +513,20 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
|
|||
|
||||
delalloc_bytes = percpu_counter_sum_positive(
|
||||
&fs_info->delalloc_bytes);
|
||||
dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
|
||||
if (delalloc_bytes == 0 && dio_bytes == 0) {
|
||||
if (trans)
|
||||
return;
|
||||
if (wait_ordered)
|
||||
btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
|
||||
ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
|
||||
if (delalloc_bytes == 0 && ordered_bytes == 0)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are doing more ordered than delalloc we need to just wait on
|
||||
* ordered extents, otherwise we'll waste time trying to flush delalloc
|
||||
* that likely won't give us the space back we need.
|
||||
*/
|
||||
if (dio_bytes > delalloc_bytes)
|
||||
if (ordered_bytes > delalloc_bytes)
|
||||
wait_ordered = true;
|
||||
|
||||
loops = 0;
|
||||
while ((delalloc_bytes || dio_bytes) && loops < 3) {
|
||||
while ((delalloc_bytes || ordered_bytes) && loops < 3) {
|
||||
u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
|
||||
long nr_pages = min_t(u64, temp, LONG_MAX);
|
||||
|
||||
|
@ -556,7 +551,8 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
|
|||
|
||||
delalloc_bytes = percpu_counter_sum_positive(
|
||||
&fs_info->delalloc_bytes);
|
||||
dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
|
||||
ordered_bytes = percpu_counter_sum_positive(
|
||||
&fs_info->ordered_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue