Merge branch 'allocator-fixes' into for-linus-4.4
Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
commit
a9e6d15356
|
@ -362,6 +362,12 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (btrfs_test_is_dummy_root(root)) {
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, index);
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (path->search_commit_root)
|
||||
root_level = btrfs_header_level(root->commit_root);
|
||||
else if (time_seq == (u64)-1)
|
||||
|
|
|
@ -1154,6 +1154,10 @@ struct btrfs_space_info {
|
|||
delalloc/allocations */
|
||||
u64 bytes_readonly; /* total bytes that are read only */
|
||||
|
||||
u64 max_extent_size; /* This will hold the maximum extent size of
|
||||
the space info if we had an ENOSPC in the
|
||||
allocator. */
|
||||
|
||||
unsigned int full:1; /* indicates that we cannot allocate any more
|
||||
chunks for this space */
|
||||
unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
|
||||
|
@ -1228,6 +1232,9 @@ struct btrfs_free_cluster {
|
|||
/* first extent starting offset */
|
||||
u64 window_start;
|
||||
|
||||
/* We did a full search and couldn't create a cluster */
|
||||
bool fragmented;
|
||||
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
/*
|
||||
* when a cluster is allocated from a block group, we put the
|
||||
|
@ -2148,6 +2155,8 @@ struct btrfs_ioctl_defrag_range_args {
|
|||
#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
|
||||
#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
|
||||
#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23)
|
||||
#define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24)
|
||||
#define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25)
|
||||
|
||||
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
|
||||
#define BTRFS_DEFAULT_MAX_INLINE (8192)
|
||||
|
@ -2172,6 +2181,18 @@ struct btrfs_ioctl_defrag_range_args {
|
|||
btrfs_clear_opt(root->fs_info->mount_opt, opt); \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
static inline int
|
||||
btrfs_should_fragment_free_space(struct btrfs_root *root,
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
{
|
||||
return (btrfs_test_opt(root, FRAGMENT_METADATA) &&
|
||||
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
|
||||
(btrfs_test_opt(root, FRAGMENT_DATA) &&
|
||||
block_group->flags & BTRFS_BLOCK_GROUP_DATA);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Requests for changes that need to be done during transaction commit.
|
||||
*
|
||||
|
|
|
@ -4327,25 +4327,6 @@ again:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void btrfs_free_pending_ordered(struct btrfs_transaction *cur_trans,
|
||||
struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
while (!list_empty(&cur_trans->pending_ordered)) {
|
||||
ordered = list_first_entry(&cur_trans->pending_ordered,
|
||||
struct btrfs_ordered_extent,
|
||||
trans_list);
|
||||
list_del_init(&ordered->trans_list);
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
}
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
}
|
||||
|
||||
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
|
@ -4357,7 +4338,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
|
|||
cur_trans->state = TRANS_STATE_UNBLOCKED;
|
||||
wake_up(&root->fs_info->transaction_wait);
|
||||
|
||||
btrfs_free_pending_ordered(cur_trans, root->fs_info);
|
||||
btrfs_destroy_delayed_inodes(root);
|
||||
btrfs_assert_delayed_root_empty(root);
|
||||
|
||||
|
|
|
@ -332,6 +332,27 @@ static void put_caching_control(struct btrfs_caching_control *ctl)
|
|||
kfree(ctl);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
static void fragment_free_space(struct btrfs_root *root,
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
{
|
||||
u64 start = block_group->key.objectid;
|
||||
u64 len = block_group->key.offset;
|
||||
u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
|
||||
root->nodesize : root->sectorsize;
|
||||
u64 step = chunk << 1;
|
||||
|
||||
while (len > chunk) {
|
||||
btrfs_remove_free_space(block_group, start, chunk);
|
||||
start += step;
|
||||
if (len < step)
|
||||
len = 0;
|
||||
else
|
||||
len -= step;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* this is only called by cache_block_group, since we could have freed extents
|
||||
* we need to check the pinned_extents for any extents that can't be used yet
|
||||
|
@ -388,6 +409,7 @@ static noinline void caching_thread(struct btrfs_work *work)
|
|||
u64 last = 0;
|
||||
u32 nritems;
|
||||
int ret = -ENOMEM;
|
||||
bool wakeup = true;
|
||||
|
||||
caching_ctl = container_of(work, struct btrfs_caching_control, work);
|
||||
block_group = caching_ctl->block_group;
|
||||
|
@ -400,6 +422,15 @@ static noinline void caching_thread(struct btrfs_work *work)
|
|||
|
||||
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
/*
|
||||
* If we're fragmenting we don't want to make anybody think we can
|
||||
* allocate from this block group until we've had a chance to fragment
|
||||
* the free space.
|
||||
*/
|
||||
if (btrfs_should_fragment_free_space(extent_root, block_group))
|
||||
wakeup = false;
|
||||
#endif
|
||||
/*
|
||||
* We don't want to deadlock with somebody trying to allocate a new
|
||||
* extent for the extent root while also trying to search the extent
|
||||
|
@ -441,7 +472,8 @@ next:
|
|||
|
||||
if (need_resched() ||
|
||||
rwsem_is_contended(&fs_info->commit_root_sem)) {
|
||||
caching_ctl->progress = last;
|
||||
if (wakeup)
|
||||
caching_ctl->progress = last;
|
||||
btrfs_release_path(path);
|
||||
up_read(&fs_info->commit_root_sem);
|
||||
mutex_unlock(&caching_ctl->mutex);
|
||||
|
@ -464,7 +496,8 @@ next:
|
|||
key.offset = 0;
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
|
||||
caching_ctl->progress = last;
|
||||
if (wakeup)
|
||||
caching_ctl->progress = last;
|
||||
btrfs_release_path(path);
|
||||
goto next;
|
||||
}
|
||||
|
@ -491,7 +524,8 @@ next:
|
|||
|
||||
if (total_found > (1024 * 1024 * 2)) {
|
||||
total_found = 0;
|
||||
wake_up(&caching_ctl->wait);
|
||||
if (wakeup)
|
||||
wake_up(&caching_ctl->wait);
|
||||
}
|
||||
}
|
||||
path->slots[0]++;
|
||||
|
@ -501,13 +535,27 @@ next:
|
|||
total_found += add_new_free_space(block_group, fs_info, last,
|
||||
block_group->key.objectid +
|
||||
block_group->key.offset);
|
||||
caching_ctl->progress = (u64)-1;
|
||||
|
||||
spin_lock(&block_group->lock);
|
||||
block_group->caching_ctl = NULL;
|
||||
block_group->cached = BTRFS_CACHE_FINISHED;
|
||||
spin_unlock(&block_group->lock);
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
if (btrfs_should_fragment_free_space(extent_root, block_group)) {
|
||||
u64 bytes_used;
|
||||
|
||||
spin_lock(&block_group->space_info->lock);
|
||||
spin_lock(&block_group->lock);
|
||||
bytes_used = block_group->key.offset -
|
||||
btrfs_block_group_used(&block_group->item);
|
||||
block_group->space_info->bytes_used += bytes_used >> 1;
|
||||
spin_unlock(&block_group->lock);
|
||||
spin_unlock(&block_group->space_info->lock);
|
||||
fragment_free_space(extent_root, block_group);
|
||||
}
|
||||
#endif
|
||||
|
||||
caching_ctl->progress = (u64)-1;
|
||||
err:
|
||||
btrfs_free_path(path);
|
||||
up_read(&fs_info->commit_root_sem);
|
||||
|
@ -607,6 +655,22 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
|||
}
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
if (ret == 1 &&
|
||||
btrfs_should_fragment_free_space(fs_info->extent_root,
|
||||
cache)) {
|
||||
u64 bytes_used;
|
||||
|
||||
spin_lock(&cache->space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
bytes_used = cache->key.offset -
|
||||
btrfs_block_group_used(&cache->item);
|
||||
cache->space_info->bytes_used += bytes_used >> 1;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&cache->space_info->lock);
|
||||
fragment_free_space(fs_info->extent_root, cache);
|
||||
}
|
||||
#endif
|
||||
mutex_unlock(&caching_ctl->mutex);
|
||||
|
||||
wake_up(&caching_ctl->wait);
|
||||
|
@ -3343,6 +3407,15 @@ again:
|
|||
}
|
||||
spin_unlock(&block_group->lock);
|
||||
|
||||
/*
|
||||
* We hit an ENOSPC when setting up the cache in this transaction, just
|
||||
* skip doing the setup, we've already cleared the cache so we're safe.
|
||||
*/
|
||||
if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
|
||||
ret = -ENOSPC;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to preallocate enough space based on how big the block group is.
|
||||
* Keep in mind this has to include any pinned space which could end up
|
||||
|
@ -3363,8 +3436,18 @@ again:
|
|||
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
|
||||
num_pages, num_pages,
|
||||
&alloc_hint);
|
||||
/*
|
||||
* Our cache requires contiguous chunks so that we don't modify a bunch
|
||||
* of metadata or split extents when writing the cache out, which means
|
||||
* we can enospc if we are heavily fragmented in addition to just normal
|
||||
* out of space conditions. So if we hit this just skip setting up any
|
||||
* other block groups for this transaction, maybe we'll unpin enough
|
||||
* space the next time around.
|
||||
*/
|
||||
if (!ret)
|
||||
dcs = BTRFS_DC_SETUP;
|
||||
else if (ret == -ENOSPC)
|
||||
set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
|
||||
btrfs_free_reserved_data_space(inode, 0, num_pages);
|
||||
|
||||
out_put:
|
||||
|
@ -3751,6 +3834,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
found->bytes_readonly = 0;
|
||||
found->bytes_may_use = 0;
|
||||
found->full = 0;
|
||||
found->max_extent_size = 0;
|
||||
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
||||
found->chunk_alloc = 0;
|
||||
found->flush = 0;
|
||||
|
@ -4003,7 +4087,8 @@ commit_trans:
|
|||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
if (have_pinned_space >= 0 ||
|
||||
trans->transaction->have_free_bgs ||
|
||||
test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
|
||||
&trans->transaction->flags) ||
|
||||
need_commit > 0) {
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
if (ret)
|
||||
|
@ -6112,6 +6197,34 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
|
|||
update_global_block_rsv(fs_info);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the free cluster for the given space info and sets empty_cluster to
|
||||
* what it should be based on the mount options.
|
||||
*/
|
||||
static struct btrfs_free_cluster *
|
||||
fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
|
||||
u64 *empty_cluster)
|
||||
{
|
||||
struct btrfs_free_cluster *ret = NULL;
|
||||
bool ssd = btrfs_test_opt(root, SSD);
|
||||
|
||||
*empty_cluster = 0;
|
||||
if (btrfs_mixed_space_info(space_info))
|
||||
return ret;
|
||||
|
||||
if (ssd)
|
||||
*empty_cluster = 2 * 1024 * 1024;
|
||||
if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
|
||||
ret = &root->fs_info->meta_alloc_cluster;
|
||||
if (!ssd)
|
||||
*empty_cluster = 64 * 1024;
|
||||
} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
|
||||
ret = &root->fs_info->data_alloc_cluster;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
|
||||
const bool return_free_space)
|
||||
{
|
||||
|
@ -6119,7 +6232,10 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
|
|||
struct btrfs_block_group_cache *cache = NULL;
|
||||
struct btrfs_space_info *space_info;
|
||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||
struct btrfs_free_cluster *cluster = NULL;
|
||||
u64 len;
|
||||
u64 total_unpinned = 0;
|
||||
u64 empty_cluster = 0;
|
||||
bool readonly;
|
||||
|
||||
while (start <= end) {
|
||||
|
@ -6128,8 +6244,14 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
|
|||
start >= cache->key.objectid + cache->key.offset) {
|
||||
if (cache)
|
||||
btrfs_put_block_group(cache);
|
||||
total_unpinned = 0;
|
||||
cache = btrfs_lookup_block_group(fs_info, start);
|
||||
BUG_ON(!cache); /* Logic error */
|
||||
|
||||
cluster = fetch_cluster_info(root,
|
||||
cache->space_info,
|
||||
&empty_cluster);
|
||||
empty_cluster <<= 1;
|
||||
}
|
||||
|
||||
len = cache->key.objectid + cache->key.offset - start;
|
||||
|
@ -6142,12 +6264,27 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
|
|||
}
|
||||
|
||||
start += len;
|
||||
total_unpinned += len;
|
||||
space_info = cache->space_info;
|
||||
|
||||
/*
|
||||
* If this space cluster has been marked as fragmented and we've
|
||||
* unpinned enough in this block group to potentially allow a
|
||||
* cluster to be created inside of it go ahead and clear the
|
||||
* fragmented check.
|
||||
*/
|
||||
if (cluster && cluster->fragmented &&
|
||||
total_unpinned > empty_cluster) {
|
||||
spin_lock(&cluster->lock);
|
||||
cluster->fragmented = 0;
|
||||
spin_unlock(&cluster->lock);
|
||||
}
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
cache->pinned -= len;
|
||||
space_info->bytes_pinned -= len;
|
||||
space_info->max_extent_size = 0;
|
||||
percpu_counter_add(&space_info->total_bytes_pinned, -len);
|
||||
if (cache->ro) {
|
||||
space_info->bytes_readonly += len;
|
||||
|
@ -6880,7 +7017,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
|
|||
struct btrfs_block_group_cache *block_group = NULL;
|
||||
u64 search_start = 0;
|
||||
u64 max_extent_size = 0;
|
||||
int empty_cluster = 2 * 1024 * 1024;
|
||||
u64 empty_cluster = 0;
|
||||
struct btrfs_space_info *space_info;
|
||||
int loop = 0;
|
||||
int index = __get_raid_index(flags);
|
||||
|
@ -6890,6 +7027,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
|
|||
bool failed_alloc = false;
|
||||
bool use_cluster = true;
|
||||
bool have_caching_bg = false;
|
||||
bool full_search = false;
|
||||
|
||||
WARN_ON(num_bytes < root->sectorsize);
|
||||
ins->type = BTRFS_EXTENT_ITEM_KEY;
|
||||
|
@ -6905,36 +7043,47 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the space info is for both data and metadata it means we have a
|
||||
* small filesystem and we can't use the clustering stuff.
|
||||
* If our free space is heavily fragmented we may not be able to make
|
||||
* big contiguous allocations, so instead of doing the expensive search
|
||||
* for free space, simply return ENOSPC with our max_extent_size so we
|
||||
* can go ahead and search for a more manageable chunk.
|
||||
*
|
||||
* If our max_extent_size is large enough for our allocation simply
|
||||
* disable clustering since we will likely not be able to find enough
|
||||
* space to create a cluster and induce latency trying.
|
||||
*/
|
||||
if (btrfs_mixed_space_info(space_info))
|
||||
use_cluster = false;
|
||||
|
||||
if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
|
||||
last_ptr = &root->fs_info->meta_alloc_cluster;
|
||||
if (!btrfs_test_opt(root, SSD))
|
||||
empty_cluster = 64 * 1024;
|
||||
}
|
||||
|
||||
if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
|
||||
btrfs_test_opt(root, SSD)) {
|
||||
last_ptr = &root->fs_info->data_alloc_cluster;
|
||||
if (unlikely(space_info->max_extent_size)) {
|
||||
spin_lock(&space_info->lock);
|
||||
if (space_info->max_extent_size &&
|
||||
num_bytes > space_info->max_extent_size) {
|
||||
ins->offset = space_info->max_extent_size;
|
||||
spin_unlock(&space_info->lock);
|
||||
return -ENOSPC;
|
||||
} else if (space_info->max_extent_size) {
|
||||
use_cluster = false;
|
||||
}
|
||||
spin_unlock(&space_info->lock);
|
||||
}
|
||||
|
||||
last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
|
||||
if (last_ptr) {
|
||||
spin_lock(&last_ptr->lock);
|
||||
if (last_ptr->block_group)
|
||||
hint_byte = last_ptr->window_start;
|
||||
if (last_ptr->fragmented) {
|
||||
/*
|
||||
* We still set window_start so we can keep track of the
|
||||
* last place we found an allocation to try and save
|
||||
* some time.
|
||||
*/
|
||||
hint_byte = last_ptr->window_start;
|
||||
use_cluster = false;
|
||||
}
|
||||
spin_unlock(&last_ptr->lock);
|
||||
}
|
||||
|
||||
search_start = max(search_start, first_logical_byte(root, 0));
|
||||
search_start = max(search_start, hint_byte);
|
||||
|
||||
if (!last_ptr)
|
||||
empty_cluster = 0;
|
||||
|
||||
if (search_start == hint_byte) {
|
||||
block_group = btrfs_lookup_block_group(root->fs_info,
|
||||
search_start);
|
||||
|
@ -6969,6 +7118,8 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
|
|||
}
|
||||
search:
|
||||
have_caching_bg = false;
|
||||
if (index == 0 || index == __get_raid_index(flags))
|
||||
full_search = true;
|
||||
down_read(&space_info->groups_sem);
|
||||
list_for_each_entry(block_group, &space_info->block_groups[index],
|
||||
list) {
|
||||
|
@ -7002,6 +7153,7 @@ search:
|
|||
have_block_group:
|
||||
cached = block_group_cache_done(block_group);
|
||||
if (unlikely(!cached)) {
|
||||
have_caching_bg = true;
|
||||
ret = cache_block_group(block_group, 0);
|
||||
BUG_ON(ret < 0);
|
||||
ret = 0;
|
||||
|
@ -7016,7 +7168,7 @@ have_block_group:
|
|||
* Ok we want to try and use the cluster allocator, so
|
||||
* lets look there
|
||||
*/
|
||||
if (last_ptr) {
|
||||
if (last_ptr && use_cluster) {
|
||||
struct btrfs_block_group_cache *used_block_group;
|
||||
unsigned long aligned_cluster;
|
||||
/*
|
||||
|
@ -7142,6 +7294,16 @@ refill_cluster:
|
|||
}
|
||||
|
||||
unclustered_alloc:
|
||||
/*
|
||||
* We are doing an unclustered alloc, set the fragmented flag so
|
||||
* we don't bother trying to setup a cluster again until we get
|
||||
* more space.
|
||||
*/
|
||||
if (unlikely(last_ptr)) {
|
||||
spin_lock(&last_ptr->lock);
|
||||
last_ptr->fragmented = 1;
|
||||
spin_unlock(&last_ptr->lock);
|
||||
}
|
||||
spin_lock(&block_group->free_space_ctl->tree_lock);
|
||||
if (cached &&
|
||||
block_group->free_space_ctl->free_space <
|
||||
|
@ -7174,8 +7336,6 @@ unclustered_alloc:
|
|||
failed_alloc = true;
|
||||
goto have_block_group;
|
||||
} else if (!offset) {
|
||||
if (!cached)
|
||||
have_caching_bg = true;
|
||||
goto loop;
|
||||
}
|
||||
checks:
|
||||
|
@ -7232,7 +7392,20 @@ loop:
|
|||
*/
|
||||
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
|
||||
index = 0;
|
||||
loop++;
|
||||
if (loop == LOOP_CACHING_NOWAIT) {
|
||||
/*
|
||||
* We want to skip the LOOP_CACHING_WAIT step if we
|
||||
* don't have any unached bgs and we've alrelady done a
|
||||
* full search through.
|
||||
*/
|
||||
if (have_caching_bg || !full_search)
|
||||
loop = LOOP_CACHING_WAIT;
|
||||
else
|
||||
loop = LOOP_ALLOC_CHUNK;
|
||||
} else {
|
||||
loop++;
|
||||
}
|
||||
|
||||
if (loop == LOOP_ALLOC_CHUNK) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
int exist = 0;
|
||||
|
@ -7250,6 +7423,15 @@ loop:
|
|||
|
||||
ret = do_chunk_alloc(trans, root, flags,
|
||||
CHUNK_ALLOC_FORCE);
|
||||
|
||||
/*
|
||||
* If we can't allocate a new chunk we've already looped
|
||||
* through at least once, move on to the NO_EMPTY_SIZE
|
||||
* case.
|
||||
*/
|
||||
if (ret == -ENOSPC)
|
||||
loop = LOOP_NO_EMPTY_SIZE;
|
||||
|
||||
/*
|
||||
* Do not bail out on ENOSPC since we
|
||||
* can do more things.
|
||||
|
@ -7266,6 +7448,15 @@ loop:
|
|||
}
|
||||
|
||||
if (loop == LOOP_NO_EMPTY_SIZE) {
|
||||
/*
|
||||
* Don't loop again if we already have no empty_size and
|
||||
* no empty_cluster.
|
||||
*/
|
||||
if (empty_size == 0 &&
|
||||
empty_cluster == 0) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
empty_size = 0;
|
||||
empty_cluster = 0;
|
||||
}
|
||||
|
@ -7274,11 +7465,20 @@ loop:
|
|||
} else if (!ins->objectid) {
|
||||
ret = -ENOSPC;
|
||||
} else if (ins->objectid) {
|
||||
if (!use_cluster && last_ptr) {
|
||||
spin_lock(&last_ptr->lock);
|
||||
last_ptr->window_start = ins->objectid;
|
||||
spin_unlock(&last_ptr->lock);
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
out:
|
||||
if (ret == -ENOSPC)
|
||||
if (ret == -ENOSPC) {
|
||||
spin_lock(&space_info->lock);
|
||||
space_info->max_extent_size = max_extent_size;
|
||||
spin_unlock(&space_info->lock);
|
||||
ins->offset = max_extent_size;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -7327,7 +7527,7 @@ int btrfs_reserve_extent(struct btrfs_root *root,
|
|||
u64 empty_size, u64 hint_byte,
|
||||
struct btrfs_key *ins, int is_data, int delalloc)
|
||||
{
|
||||
bool final_tried = false;
|
||||
bool final_tried = num_bytes == min_alloc_size;
|
||||
u64 flags;
|
||||
int ret;
|
||||
|
||||
|
@ -8929,7 +9129,7 @@ again:
|
|||
* back off and let this transaction commit
|
||||
*/
|
||||
mutex_lock(&root->fs_info->ro_block_group_mutex);
|
||||
if (trans->transaction->dirty_bg_run) {
|
||||
if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
|
||||
u64 transid = trans->transid;
|
||||
|
||||
mutex_unlock(&root->fs_info->ro_block_group_mutex);
|
||||
|
@ -9679,6 +9879,14 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|||
|
||||
free_excluded_extents(root, cache);
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
if (btrfs_should_fragment_free_space(root, cache)) {
|
||||
u64 new_bytes_used = size - bytes_used;
|
||||
|
||||
bytes_used += new_bytes_used >> 1;
|
||||
fragment_free_space(root, cache);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Call to ensure the corresponding space_info object is created and
|
||||
* assigned to our block group, but don't update its counters just yet.
|
||||
|
|
|
@ -1730,7 +1730,7 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
|
|||
*/
|
||||
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||
struct btrfs_free_space *bitmap_info, u64 *offset,
|
||||
u64 *bytes)
|
||||
u64 *bytes, bool for_alloc)
|
||||
{
|
||||
unsigned long found_bits = 0;
|
||||
unsigned long max_bits = 0;
|
||||
|
@ -1738,11 +1738,26 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
unsigned long next_zero;
|
||||
unsigned long extent_bits;
|
||||
|
||||
/*
|
||||
* Skip searching the bitmap if we don't have a contiguous section that
|
||||
* is large enough for this allocation.
|
||||
*/
|
||||
if (for_alloc &&
|
||||
bitmap_info->max_extent_size &&
|
||||
bitmap_info->max_extent_size < *bytes) {
|
||||
*bytes = bitmap_info->max_extent_size;
|
||||
return -1;
|
||||
}
|
||||
|
||||
i = offset_to_bit(bitmap_info->offset, ctl->unit,
|
||||
max_t(u64, *offset, bitmap_info->offset));
|
||||
bits = bytes_to_bits(*bytes, ctl->unit);
|
||||
|
||||
for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
|
||||
if (for_alloc && bits == 1) {
|
||||
found_bits = 1;
|
||||
break;
|
||||
}
|
||||
next_zero = find_next_zero_bit(bitmap_info->bitmap,
|
||||
BITS_PER_BITMAP, i);
|
||||
extent_bits = next_zero - i;
|
||||
|
@ -1762,6 +1777,7 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
}
|
||||
|
||||
*bytes = (u64)(max_bits) * ctl->unit;
|
||||
bitmap_info->max_extent_size = *bytes;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1813,7 +1829,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
|
|||
if (entry->bitmap) {
|
||||
u64 size = *bytes;
|
||||
|
||||
ret = search_bitmap(ctl, entry, &tmp, &size);
|
||||
ret = search_bitmap(ctl, entry, &tmp, &size, true);
|
||||
if (!ret) {
|
||||
*offset = tmp;
|
||||
*bytes = size;
|
||||
|
@ -1874,7 +1890,8 @@ again:
|
|||
search_start = *offset;
|
||||
search_bytes = ctl->unit;
|
||||
search_bytes = min(search_bytes, end - search_start + 1);
|
||||
ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
|
||||
ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
|
||||
false);
|
||||
if (ret < 0 || search_start != *offset)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1919,7 +1936,7 @@ again:
|
|||
search_start = *offset;
|
||||
search_bytes = ctl->unit;
|
||||
ret = search_bitmap(ctl, bitmap_info, &search_start,
|
||||
&search_bytes);
|
||||
&search_bytes, false);
|
||||
if (ret < 0 || search_start != *offset)
|
||||
return -EAGAIN;
|
||||
|
||||
|
@ -1943,6 +1960,12 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
|
||||
bitmap_set_bits(ctl, info, offset, bytes_to_set);
|
||||
|
||||
/*
|
||||
* We set some bytes, we have no idea what the max extent size is
|
||||
* anymore.
|
||||
*/
|
||||
info->max_extent_size = 0;
|
||||
|
||||
return bytes_to_set;
|
||||
|
||||
}
|
||||
|
@ -1951,12 +1974,19 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
struct btrfs_free_space *info)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group = ctl->private;
|
||||
bool forced = false;
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root,
|
||||
block_group))
|
||||
forced = true;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we are below the extents threshold then we can add this as an
|
||||
* extent, and don't have to deal with the bitmap
|
||||
*/
|
||||
if (ctl->free_extents < ctl->extents_thresh) {
|
||||
if (!forced && ctl->free_extents < ctl->extents_thresh) {
|
||||
/*
|
||||
* If this block group has some small extents we don't want to
|
||||
* use up all of our free slots in the cache with them, we want
|
||||
|
@ -2661,7 +2691,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
|
|||
search_start = min_start;
|
||||
search_bytes = bytes;
|
||||
|
||||
err = search_bitmap(ctl, entry, &search_start, &search_bytes);
|
||||
err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
|
||||
if (err) {
|
||||
if (search_bytes > *max_extent_size)
|
||||
*max_extent_size = search_bytes;
|
||||
|
@ -2775,6 +2805,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
|
|||
unsigned long want_bits;
|
||||
unsigned long min_bits;
|
||||
unsigned long found_bits;
|
||||
unsigned long max_bits = 0;
|
||||
unsigned long start = 0;
|
||||
unsigned long total_found = 0;
|
||||
int ret;
|
||||
|
@ -2784,6 +2815,13 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
|
|||
want_bits = bytes_to_bits(bytes, ctl->unit);
|
||||
min_bits = bytes_to_bits(min_bytes, ctl->unit);
|
||||
|
||||
/*
|
||||
* Don't bother looking for a cluster in this bitmap if it's heavily
|
||||
* fragmented.
|
||||
*/
|
||||
if (entry->max_extent_size &&
|
||||
entry->max_extent_size < cont1_bytes)
|
||||
return -ENOSPC;
|
||||
again:
|
||||
found_bits = 0;
|
||||
for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
|
||||
|
@ -2791,13 +2829,19 @@ again:
|
|||
BITS_PER_BITMAP, i);
|
||||
if (next_zero - i >= min_bits) {
|
||||
found_bits = next_zero - i;
|
||||
if (found_bits > max_bits)
|
||||
max_bits = found_bits;
|
||||
break;
|
||||
}
|
||||
if (next_zero - i > max_bits)
|
||||
max_bits = next_zero - i;
|
||||
i = next_zero;
|
||||
}
|
||||
|
||||
if (!found_bits)
|
||||
if (!found_bits) {
|
||||
entry->max_extent_size = (u64)max_bits * ctl->unit;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (!total_found) {
|
||||
start = i;
|
||||
|
@ -3056,6 +3100,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
|
|||
spin_lock_init(&cluster->refill_lock);
|
||||
cluster->root = RB_ROOT;
|
||||
cluster->max_size = 0;
|
||||
cluster->fragmented = false;
|
||||
INIT_LIST_HEAD(&cluster->block_group_list);
|
||||
cluster->block_group = NULL;
|
||||
}
|
||||
|
@ -3223,7 +3268,7 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
|
|||
}
|
||||
|
||||
bytes = minlen;
|
||||
ret2 = search_bitmap(ctl, entry, &start, &bytes);
|
||||
ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
|
||||
if (ret2 || start >= end) {
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
mutex_unlock(&ctl->cache_writeout_mutex);
|
||||
|
@ -3376,7 +3421,7 @@ u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
|
|||
u64 count = 1;
|
||||
int ret;
|
||||
|
||||
ret = search_bitmap(ctl, entry, &offset, &count);
|
||||
ret = search_bitmap(ctl, entry, &offset, &count, true);
|
||||
/* Logic error; Should be empty if it can't find anything */
|
||||
ASSERT(!ret);
|
||||
|
||||
|
@ -3532,6 +3577,7 @@ again:
|
|||
spin_lock(&ctl->tree_lock);
|
||||
info->offset = offset;
|
||||
info->bytes = bytes;
|
||||
info->max_extent_size = 0;
|
||||
ret = link_free_space(ctl, info);
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
if (ret)
|
||||
|
@ -3559,6 +3605,7 @@ again:
|
|||
}
|
||||
|
||||
bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
|
||||
|
||||
bytes -= bytes_added;
|
||||
offset += bytes_added;
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
|
@ -3602,7 +3649,7 @@ have_info:
|
|||
|
||||
bit_off = offset;
|
||||
bit_bytes = ctl->unit;
|
||||
ret = search_bitmap(ctl, info, &bit_off, &bit_bytes);
|
||||
ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
|
||||
if (!ret) {
|
||||
if (bit_off == offset) {
|
||||
ret = 1;
|
||||
|
|
|
@ -23,6 +23,7 @@ struct btrfs_free_space {
|
|||
struct rb_node offset_index;
|
||||
u64 offset;
|
||||
u64 bytes;
|
||||
u64 max_extent_size;
|
||||
unsigned long *bitmap;
|
||||
struct list_head list;
|
||||
};
|
||||
|
|
|
@ -9745,6 +9745,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|||
u64 cur_offset = start;
|
||||
u64 i_size;
|
||||
u64 cur_bytes;
|
||||
u64 last_alloc = (u64)-1;
|
||||
int ret = 0;
|
||||
bool own_trans = true;
|
||||
|
||||
|
@ -9761,6 +9762,13 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|||
|
||||
cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
|
||||
cur_bytes = max(cur_bytes, min_size);
|
||||
/*
|
||||
* If we are severely fragmented we could end up with really
|
||||
* small allocations, so if the allocator is returning small
|
||||
* chunks lets make its job easier by only searching for those
|
||||
* sized chunks.
|
||||
*/
|
||||
cur_bytes = min(cur_bytes, last_alloc);
|
||||
ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
|
||||
*alloc_hint, &ins, 1, 0);
|
||||
if (ret) {
|
||||
|
@ -9769,6 +9777,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|||
break;
|
||||
}
|
||||
|
||||
last_alloc = ins.offset;
|
||||
ret = insert_reserved_file_extent(trans, inode,
|
||||
cur_offset, ins.objectid,
|
||||
ins.offset, ins.offset,
|
||||
|
|
|
@ -490,15 +490,16 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
|
|||
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
while (!list_empty(&log->logged_list[index])) {
|
||||
struct inode *inode;
|
||||
ordered = list_first_entry(&log->logged_list[index],
|
||||
struct btrfs_ordered_extent,
|
||||
log_list);
|
||||
list_del_init(&ordered->log_list);
|
||||
inode = ordered->inode;
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
|
||||
if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
|
||||
!test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
|
||||
struct inode *inode = ordered->inode;
|
||||
u64 start = ordered->file_offset;
|
||||
u64 end = ordered->file_offset + ordered->len - 1;
|
||||
|
||||
|
@ -509,20 +510,25 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
|
|||
&ordered->flags));
|
||||
|
||||
/*
|
||||
* If our ordered extent completed it means it updated the
|
||||
* fs/subvol and csum trees already, so no need to make the
|
||||
* current transaction's commit wait for it, as we end up
|
||||
* holding memory unnecessarily and delaying the inode's iput
|
||||
* until the transaction commit (we schedule an iput for the
|
||||
* inode when the ordered extent's refcount drops to 0), which
|
||||
* prevents it from being evictable until the transaction
|
||||
* commits.
|
||||
* In order to keep us from losing our ordered extent
|
||||
* information when committing the transaction we have to make
|
||||
* sure that any logged extents are completed when we go to
|
||||
* commit the transaction. To do this we simply increase the
|
||||
* current transactions pending_ordered counter and decrement it
|
||||
* when the ordered extent completes.
|
||||
*/
|
||||
if (test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags))
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
else
|
||||
list_add_tail(&ordered->trans_list, &trans->ordered);
|
||||
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
|
||||
struct btrfs_ordered_inode_tree *tree;
|
||||
|
||||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
|
||||
set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
|
||||
atomic_inc(&trans->transaction->pending_ordered);
|
||||
}
|
||||
spin_unlock_irq(&tree->lock);
|
||||
}
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
|
@ -584,6 +590,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
|
|||
struct btrfs_ordered_inode_tree *tree;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct rb_node *node;
|
||||
bool dec_pending_ordered = false;
|
||||
|
||||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
|
@ -593,8 +600,37 @@ void btrfs_remove_ordered_extent(struct inode *inode,
|
|||
if (tree->last == node)
|
||||
tree->last = NULL;
|
||||
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
|
||||
if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
|
||||
dec_pending_ordered = true;
|
||||
spin_unlock_irq(&tree->lock);
|
||||
|
||||
/*
|
||||
* The current running transaction is waiting on us, we need to let it
|
||||
* know that we're complete and wake it up.
|
||||
*/
|
||||
if (dec_pending_ordered) {
|
||||
struct btrfs_transaction *trans;
|
||||
|
||||
/*
|
||||
* The checks for trans are just a formality, it should be set,
|
||||
* but if it isn't we don't want to deref/assert under the spin
|
||||
* lock, so be nice and check if trans is set, but ASSERT() so
|
||||
* if it isn't set a developer will notice.
|
||||
*/
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
trans = root->fs_info->running_transaction;
|
||||
if (trans)
|
||||
atomic_inc(&trans->use_count);
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
||||
ASSERT(trans);
|
||||
if (trans) {
|
||||
if (atomic_dec_and_test(&trans->pending_ordered))
|
||||
wake_up(&trans->pending_wait);
|
||||
btrfs_put_transaction(trans);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&root->ordered_extent_lock);
|
||||
list_del_init(&entry->root_extent_list);
|
||||
root->nr_ordered_extents--;
|
||||
|
|
|
@ -73,6 +73,8 @@ struct btrfs_ordered_sum {
|
|||
|
||||
#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
|
||||
* in the logging code. */
|
||||
#define BTRFS_ORDERED_PENDING 11 /* We are waiting for this ordered extent to
|
||||
* complete in the current transaction. */
|
||||
struct btrfs_ordered_extent {
|
||||
/* logical offset in the file */
|
||||
u64 file_offset;
|
||||
|
|
|
@ -303,6 +303,9 @@ enum {
|
|||
Opt_commit_interval, Opt_barrier, Opt_nodefrag, Opt_nodiscard,
|
||||
Opt_noenospc_debug, Opt_noflushoncommit, Opt_acl, Opt_datacow,
|
||||
Opt_datasum, Opt_treelog, Opt_noinode_cache,
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
|
||||
#endif
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
|
@ -355,6 +358,11 @@ static match_table_t tokens = {
|
|||
{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
|
||||
{Opt_fatal_errors, "fatal_errors=%s"},
|
||||
{Opt_commit_interval, "commit=%d"},
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
{Opt_fragment_data, "fragment=data"},
|
||||
{Opt_fragment_metadata, "fragment=metadata"},
|
||||
{Opt_fragment_all, "fragment=all"},
|
||||
#endif
|
||||
{Opt_err, NULL},
|
||||
};
|
||||
|
||||
|
@ -721,6 +729,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
|||
info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
|
||||
}
|
||||
break;
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
case Opt_fragment_all:
|
||||
btrfs_info(root->fs_info, "fragmenting all space");
|
||||
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
|
||||
btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
|
||||
break;
|
||||
case Opt_fragment_metadata:
|
||||
btrfs_info(root->fs_info, "fragmenting metadata");
|
||||
btrfs_set_opt(info->mount_opt,
|
||||
FRAGMENT_METADATA);
|
||||
break;
|
||||
case Opt_fragment_data:
|
||||
btrfs_info(root->fs_info, "fragmenting data");
|
||||
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
|
||||
break;
|
||||
#endif
|
||||
case Opt_err:
|
||||
btrfs_info(root->fs_info, "unrecognized mount option '%s'", p);
|
||||
ret = -EINVAL;
|
||||
|
@ -1172,6 +1196,12 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
|
|||
seq_puts(seq, ",fatal_errors=panic");
|
||||
if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
|
||||
seq_printf(seq, ",commit=%d", info->commit_interval);
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
if (btrfs_test_opt(root, FRAGMENT_DATA))
|
||||
seq_puts(seq, ",fragment=data");
|
||||
if (btrfs_test_opt(root, FRAGMENT_METADATA))
|
||||
seq_puts(seq, ",fragment=metadata");
|
||||
#endif
|
||||
seq_printf(seq, ",subvolid=%llu",
|
||||
BTRFS_I(d_inode(dentry))->root->root_key.objectid);
|
||||
seq_puts(seq, ",subvol=");
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include "btrfs-tests.h"
|
||||
#include "../ctree.h"
|
||||
#include "../disk-io.h"
|
||||
#include "../free-space-cache.h"
|
||||
|
||||
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
||||
|
@ -35,6 +36,12 @@ static struct btrfs_block_group_cache *init_test_block_group(void)
|
|||
kfree(cache);
|
||||
return NULL;
|
||||
}
|
||||
cache->fs_info = btrfs_alloc_dummy_fs_info();
|
||||
if (!cache->fs_info) {
|
||||
kfree(cache->free_space_ctl);
|
||||
kfree(cache);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cache->key.objectid = 0;
|
||||
cache->key.offset = 1024 * 1024 * 1024;
|
||||
|
@ -879,7 +886,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||
int btrfs_test_free_space_cache(void)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
int ret;
|
||||
struct btrfs_root *root = NULL;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
test_msg("Running btrfs free space cache tests\n");
|
||||
|
||||
|
@ -889,6 +897,17 @@ int btrfs_test_free_space_cache(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
root = btrfs_alloc_dummy_root();
|
||||
if (!root)
|
||||
goto out;
|
||||
|
||||
root->fs_info = btrfs_alloc_dummy_fs_info();
|
||||
if (!root->fs_info)
|
||||
goto out;
|
||||
|
||||
root->fs_info->extent_root = root;
|
||||
cache->fs_info = root->fs_info;
|
||||
|
||||
ret = test_extents(cache);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -904,6 +923,7 @@ out:
|
|||
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
||||
kfree(cache->free_space_ctl);
|
||||
kfree(cache);
|
||||
btrfs_free_dummy_root(root);
|
||||
test_msg("Free space cache tests finished\n");
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -232,15 +232,16 @@ loop:
|
|||
extwriter_counter_init(cur_trans, type);
|
||||
init_waitqueue_head(&cur_trans->writer_wait);
|
||||
init_waitqueue_head(&cur_trans->commit_wait);
|
||||
init_waitqueue_head(&cur_trans->pending_wait);
|
||||
cur_trans->state = TRANS_STATE_RUNNING;
|
||||
/*
|
||||
* One for this trans handle, one so it will live on until we
|
||||
* commit the transaction.
|
||||
*/
|
||||
atomic_set(&cur_trans->use_count, 2);
|
||||
cur_trans->have_free_bgs = 0;
|
||||
atomic_set(&cur_trans->pending_ordered, 0);
|
||||
cur_trans->flags = 0;
|
||||
cur_trans->start_time = get_seconds();
|
||||
cur_trans->dirty_bg_run = 0;
|
||||
|
||||
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
|
||||
|
||||
|
@ -266,7 +267,6 @@ loop:
|
|||
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
|
||||
INIT_LIST_HEAD(&cur_trans->pending_chunks);
|
||||
INIT_LIST_HEAD(&cur_trans->switch_commits);
|
||||
INIT_LIST_HEAD(&cur_trans->pending_ordered);
|
||||
INIT_LIST_HEAD(&cur_trans->dirty_bgs);
|
||||
INIT_LIST_HEAD(&cur_trans->io_bgs);
|
||||
INIT_LIST_HEAD(&cur_trans->dropped_roots);
|
||||
|
@ -549,7 +549,6 @@ again:
|
|||
h->can_flush_pending_bgs = true;
|
||||
INIT_LIST_HEAD(&h->qgroup_ref_list);
|
||||
INIT_LIST_HEAD(&h->new_bgs);
|
||||
INIT_LIST_HEAD(&h->ordered);
|
||||
|
||||
smp_mb();
|
||||
if (cur_trans->state >= TRANS_STATE_BLOCKED &&
|
||||
|
@ -780,12 +779,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
if (!list_empty(&trans->new_bgs))
|
||||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
||||
if (!list_empty(&trans->ordered)) {
|
||||
spin_lock(&info->trans_lock);
|
||||
list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
|
||||
spin_unlock(&info->trans_lock);
|
||||
}
|
||||
|
||||
trans->delayed_ref_updates = 0;
|
||||
if (!trans->sync) {
|
||||
must_run_delayed_refs =
|
||||
|
@ -1776,25 +1769,10 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
|
|||
}
|
||||
|
||||
static inline void
|
||||
btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
|
||||
struct btrfs_fs_info *fs_info)
|
||||
btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
while (!list_empty(&cur_trans->pending_ordered)) {
|
||||
ordered = list_first_entry(&cur_trans->pending_ordered,
|
||||
struct btrfs_ordered_extent,
|
||||
trans_list);
|
||||
list_del_init(&ordered->trans_list);
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
|
||||
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
|
||||
&ordered->flags));
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
}
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
wait_event(cur_trans->pending_wait,
|
||||
atomic_read(&cur_trans->pending_ordered) == 0);
|
||||
}
|
||||
|
||||
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
||||
|
@ -1842,7 +1820,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (!cur_trans->dirty_bg_run) {
|
||||
if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
|
||||
int run_it = 0;
|
||||
|
||||
/* this mutex is also taken before trying to set
|
||||
|
@ -1851,18 +1829,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
* after a extents from that block group have been
|
||||
* allocated for cache files. btrfs_set_block_group_ro
|
||||
* will wait for the transaction to commit if it
|
||||
* finds dirty_bg_run = 1
|
||||
* finds BTRFS_TRANS_DIRTY_BG_RUN set.
|
||||
*
|
||||
* The dirty_bg_run flag is also used to make sure only
|
||||
* one process starts all the block group IO. It wouldn't
|
||||
* The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
|
||||
* only one process starts all the block group IO. It wouldn't
|
||||
* hurt to have more than one go through, but there's no
|
||||
* real advantage to it either.
|
||||
*/
|
||||
mutex_lock(&root->fs_info->ro_block_group_mutex);
|
||||
if (!cur_trans->dirty_bg_run) {
|
||||
if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
|
||||
&cur_trans->flags))
|
||||
run_it = 1;
|
||||
cur_trans->dirty_bg_run = 1;
|
||||
}
|
||||
mutex_unlock(&root->fs_info->ro_block_group_mutex);
|
||||
|
||||
if (run_it)
|
||||
|
@ -1874,7 +1851,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
|
||||
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
atomic_inc(&cur_trans->use_count);
|
||||
|
@ -1933,7 +1909,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
|
||||
btrfs_wait_delalloc_flush(root->fs_info);
|
||||
|
||||
btrfs_wait_pending_ordered(cur_trans, root->fs_info);
|
||||
btrfs_wait_pending_ordered(cur_trans);
|
||||
|
||||
btrfs_scrub_pause(root);
|
||||
/*
|
||||
|
@ -2133,7 +2109,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
|
||||
btrfs_finish_extent_commit(trans, root);
|
||||
|
||||
if (cur_trans->have_free_bgs)
|
||||
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
|
||||
btrfs_clear_space_info_full(root->fs_info);
|
||||
|
||||
root->fs_info->last_trans_committed = cur_trans->transid;
|
||||
|
|
|
@ -32,6 +32,10 @@ enum btrfs_trans_state {
|
|||
TRANS_STATE_MAX = 6,
|
||||
};
|
||||
|
||||
#define BTRFS_TRANS_HAVE_FREE_BGS 0
|
||||
#define BTRFS_TRANS_DIRTY_BG_RUN 1
|
||||
#define BTRFS_TRANS_CACHE_ENOSPC 2
|
||||
|
||||
struct btrfs_transaction {
|
||||
u64 transid;
|
||||
/*
|
||||
|
@ -46,11 +50,9 @@ struct btrfs_transaction {
|
|||
*/
|
||||
atomic_t num_writers;
|
||||
atomic_t use_count;
|
||||
atomic_t pending_ordered;
|
||||
|
||||
/*
|
||||
* true if there is free bgs operations in this transaction
|
||||
*/
|
||||
int have_free_bgs;
|
||||
unsigned long flags;
|
||||
|
||||
/* Be protected by fs_info->trans_lock when we want to change it. */
|
||||
enum btrfs_trans_state state;
|
||||
|
@ -59,9 +61,9 @@ struct btrfs_transaction {
|
|||
unsigned long start_time;
|
||||
wait_queue_head_t writer_wait;
|
||||
wait_queue_head_t commit_wait;
|
||||
wait_queue_head_t pending_wait;
|
||||
struct list_head pending_snapshots;
|
||||
struct list_head pending_chunks;
|
||||
struct list_head pending_ordered;
|
||||
struct list_head switch_commits;
|
||||
struct list_head dirty_bgs;
|
||||
struct list_head io_bgs;
|
||||
|
@ -80,7 +82,6 @@ struct btrfs_transaction {
|
|||
spinlock_t dropped_roots_lock;
|
||||
struct btrfs_delayed_ref_root delayed_refs;
|
||||
int aborted;
|
||||
int dirty_bg_run;
|
||||
};
|
||||
|
||||
#define __TRANS_FREEZABLE (1U << 0)
|
||||
|
@ -128,7 +129,6 @@ struct btrfs_trans_handle {
|
|||
*/
|
||||
struct btrfs_root *root;
|
||||
struct seq_list delayed_ref_elem;
|
||||
struct list_head ordered;
|
||||
struct list_head qgroup_ref_list;
|
||||
struct list_head new_bgs;
|
||||
};
|
||||
|
|
|
@ -1462,7 +1462,7 @@ again:
|
|||
btrfs_std_error(root->fs_info, ret,
|
||||
"Failed to remove dev extent item");
|
||||
} else {
|
||||
trans->transaction->have_free_bgs = 1;
|
||||
set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
|
||||
}
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
|
|
Loading…
Reference in New Issue