btrfs: Rewrite retry logic in do_chunk_alloc
do_chunk_alloc implements logic to detect whether there is currently pending chunk allocation (by means of space_info->chunk_alloc being set) and if so it loops around to the 'again' label. Additionally, based on the state of the space_info (e.g. whether it's full or not) and the return value of should_alloc_chunk() it decides whether this is a "hard" error (ENOSPC) or we can just return 0. This patch refactors all of this: 1. Put order to the scattered ifs handling the various cases in an easy-to-read if {} else if{} branches. This makes clear the various cases we are interested in handling. 2. Call should_alloc_chunk only once and use the result in the if/else if constructs. All of this is done under space_info->lock, so even before multiple calls of should_alloc_chunk were unnecessary. 3. Rewrite the "do {} while()" loop currently implemented via label into an explicit loop construct. 4. Move the mutex locking for the case where the caller is the one doing the allocation. For the case where the caller needs to wait a concurrent allocation, introduce a pair of mutex_lock/mutex_unlock to act as a barrier and reword the comment. 5. Switch local vars to bool type where pertinent. All in all this shouldn't introduce any functional changes. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
dec59fa3a7
commit
2556fbb0be
|
@ -4450,7 +4450,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||||
struct btrfs_space_info *space_info;
|
struct btrfs_space_info *space_info;
|
||||||
int wait_for_alloc = 0;
|
bool wait_for_alloc = false;
|
||||||
|
bool should_alloc = false;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* Don't re-enter if we're already allocating a chunk */
|
/* Don't re-enter if we're already allocating a chunk */
|
||||||
|
@ -4460,45 +4461,44 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
||||||
space_info = __find_space_info(fs_info, flags);
|
space_info = __find_space_info(fs_info, flags);
|
||||||
ASSERT(space_info);
|
ASSERT(space_info);
|
||||||
|
|
||||||
again:
|
do {
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
if (force < space_info->force_alloc)
|
if (force < space_info->force_alloc)
|
||||||
force = space_info->force_alloc;
|
force = space_info->force_alloc;
|
||||||
if (space_info->full) {
|
should_alloc = should_alloc_chunk(fs_info, space_info, force);
|
||||||
if (should_alloc_chunk(fs_info, space_info, force))
|
if (space_info->full) {
|
||||||
ret = -ENOSPC;
|
/* No more free physical space */
|
||||||
else
|
if (should_alloc)
|
||||||
ret = 0;
|
ret = -ENOSPC;
|
||||||
spin_unlock(&space_info->lock);
|
else
|
||||||
return ret;
|
ret = 0;
|
||||||
}
|
spin_unlock(&space_info->lock);
|
||||||
|
return ret;
|
||||||
|
} else if (!should_alloc) {
|
||||||
|
spin_unlock(&space_info->lock);
|
||||||
|
return 0;
|
||||||
|
} else if (space_info->chunk_alloc) {
|
||||||
|
/*
|
||||||
|
* Someone is already allocating, so we need to block
|
||||||
|
* until this someone is finished and then loop to
|
||||||
|
* recheck if we should continue with our allocation
|
||||||
|
* attempt.
|
||||||
|
*/
|
||||||
|
wait_for_alloc = true;
|
||||||
|
spin_unlock(&space_info->lock);
|
||||||
|
mutex_lock(&fs_info->chunk_mutex);
|
||||||
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
|
} else {
|
||||||
|
/* Proceed with allocation */
|
||||||
|
space_info->chunk_alloc = 1;
|
||||||
|
wait_for_alloc = false;
|
||||||
|
spin_unlock(&space_info->lock);
|
||||||
|
}
|
||||||
|
|
||||||
if (!should_alloc_chunk(fs_info, space_info, force)) {
|
cond_resched();
|
||||||
spin_unlock(&space_info->lock);
|
} while (wait_for_alloc);
|
||||||
return 0;
|
|
||||||
} else if (space_info->chunk_alloc) {
|
|
||||||
wait_for_alloc = 1;
|
|
||||||
} else {
|
|
||||||
space_info->chunk_alloc = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&space_info->lock);
|
|
||||||
|
|
||||||
mutex_lock(&fs_info->chunk_mutex);
|
mutex_lock(&fs_info->chunk_mutex);
|
||||||
|
|
||||||
/*
|
|
||||||
* The chunk_mutex is held throughout the entirety of a chunk
|
|
||||||
* allocation, so once we've acquired the chunk_mutex we know that the
|
|
||||||
* other guy is done and we need to recheck and see if we should
|
|
||||||
* allocate.
|
|
||||||
*/
|
|
||||||
if (wait_for_alloc) {
|
|
||||||
mutex_unlock(&fs_info->chunk_mutex);
|
|
||||||
wait_for_alloc = 0;
|
|
||||||
cond_resched();
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
|
|
||||||
trans->allocating_chunk = true;
|
trans->allocating_chunk = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue