Btrfs: nuke fs wide allocation mutex V2
This patch removes the giant fs_info->alloc_mutex and replaces it with a bunch of little locks. There is now a pinned_mutex, which is used when messing with the pinned_extents extent io tree, and the extent_ins_mutex which is used with the pending_del and extent_ins extent io trees. The locking for the extent tree stuff was inspired by a patch that Yan Zheng wrote to fix a race condition, I cleaned it up some and changed the locking around a little bit, but the idea remains the same. Basically instead of holding the extent_ins_mutex throughout the processing of an extent on the extent_ins or pending_del trees, we just hold it while we're searching and when we clear the bits on those trees, and lock the extent for the duration of the operations on the extent. Also to keep from getting hung up waiting to lock an extent, I've added a try_lock_extent so if we cannot lock the extent, move on to the next one in the tree and we'll come back to that one. I have tested this heavily and it does not appear to break anything. This has to be applied on top of my find_free_extent redo patch. I tested this patch on top of Yan's space reblancing code and it worked fine. The only thing that has changed since the last version is I pulled out all my debugging stuff, apparently I forgot to run guilt refresh before I sent the last patch out. Thank you, Signed-off-by: Josef Bacik <jbacik@redhat.com>
This commit is contained in:
parent
80eb234af0
commit
2517920135
|
@ -1387,8 +1387,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||
lowest_level = p->lowest_level;
|
||||
WARN_ON(lowest_level && ins_len > 0);
|
||||
WARN_ON(p->nodes[0] != NULL);
|
||||
WARN_ON(cow && root == root->fs_info->extent_root &&
|
||||
!mutex_is_locked(&root->fs_info->alloc_mutex));
|
||||
|
||||
if (ins_len < 0)
|
||||
lowest_unlock = 2;
|
||||
|
||||
|
|
|
@ -558,6 +558,7 @@ struct btrfs_block_group_cache {
|
|||
struct btrfs_key key;
|
||||
struct btrfs_block_group_item item;
|
||||
spinlock_t lock;
|
||||
struct mutex alloc_mutex;
|
||||
u64 pinned;
|
||||
u64 reserved;
|
||||
u64 flags;
|
||||
|
@ -635,7 +636,8 @@ struct btrfs_fs_info {
|
|||
struct mutex tree_log_mutex;
|
||||
struct mutex transaction_kthread_mutex;
|
||||
struct mutex cleaner_mutex;
|
||||
struct mutex alloc_mutex;
|
||||
struct mutex extent_ins_mutex;
|
||||
struct mutex pinned_mutex;
|
||||
struct mutex chunk_mutex;
|
||||
struct mutex drop_mutex;
|
||||
struct mutex volume_mutex;
|
||||
|
@ -1941,8 +1943,12 @@ int btrfs_acl_chmod(struct inode *inode);
|
|||
/* free-space-cache.c */
|
||||
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 bytenr, u64 size);
|
||||
int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes);
|
||||
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 bytenr, u64 size);
|
||||
int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes);
|
||||
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
|
||||
*block_group);
|
||||
struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
|
||||
|
|
|
@ -1460,7 +1460,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
|||
mutex_init(&fs_info->trans_mutex);
|
||||
mutex_init(&fs_info->tree_log_mutex);
|
||||
mutex_init(&fs_info->drop_mutex);
|
||||
mutex_init(&fs_info->alloc_mutex);
|
||||
mutex_init(&fs_info->extent_ins_mutex);
|
||||
mutex_init(&fs_info->pinned_mutex);
|
||||
mutex_init(&fs_info->chunk_mutex);
|
||||
mutex_init(&fs_info->transaction_kthread_mutex);
|
||||
mutex_init(&fs_info->cleaner_mutex);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -938,6 +938,20 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
|
|||
}
|
||||
EXPORT_SYMBOL(lock_extent);
|
||||
|
||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
gfp_t mask)
|
||||
{
|
||||
int err;
|
||||
u64 failed_start;
|
||||
|
||||
err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
|
||||
&failed_start, mask);
|
||||
if (err == -EEXIST)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(try_lock_extent);
|
||||
|
||||
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
gfp_t mask)
|
||||
{
|
||||
|
|
|
@ -128,6 +128,8 @@ int try_release_extent_state(struct extent_map_tree *map,
|
|||
gfp_t mask);
|
||||
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
|
||||
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
|
||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
gfp_t mask);
|
||||
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||
get_extent_t *get_extent);
|
||||
int __init extent_io_init(void);
|
||||
|
|
|
@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
struct btrfs_free_space *right_info;
|
||||
struct btrfs_free_space *left_info;
|
||||
|
@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
|||
* are adding, if there is remove that struct and add a new one to
|
||||
* cover the entire range
|
||||
*/
|
||||
spin_lock(&block_group->lock);
|
||||
|
||||
right_info = tree_search_offset(&block_group->free_space_offset,
|
||||
offset+bytes, 0, 1);
|
||||
left_info = tree_search_offset(&block_group->free_space_offset,
|
||||
|
@ -261,7 +259,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
|||
if (ret)
|
||||
kfree(info);
|
||||
out:
|
||||
spin_unlock(&block_group->lock);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
|
||||
if (ret == -EEXIST)
|
||||
|
@ -274,13 +271,13 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
static int
|
||||
__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
struct btrfs_free_space *info;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&block_group->lock);
|
||||
info = tree_search_offset(&block_group->free_space_offset, offset, 0,
|
||||
1);
|
||||
|
||||
|
@ -334,17 +331,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
|||
/* step two, insert a new info struct to cover anything
|
||||
* before the hole
|
||||
*/
|
||||
spin_unlock(&block_group->lock);
|
||||
ret = btrfs_add_free_space(block_group, old_start,
|
||||
offset - old_start);
|
||||
ret = __btrfs_add_free_space(block_group, old_start,
|
||||
offset - old_start);
|
||||
BUG_ON(ret);
|
||||
goto out_nolock;
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
}
|
||||
out:
|
||||
spin_unlock(&block_group->lock);
|
||||
out_nolock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_free_space *sp;
|
||||
|
||||
mutex_lock(&block_group->alloc_mutex);
|
||||
ret = __btrfs_add_free_space(block_group, offset, bytes);
|
||||
sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
|
||||
BUG_ON(!sp);
|
||||
mutex_unlock(&block_group->alloc_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_free_space *sp;
|
||||
|
||||
ret = __btrfs_add_free_space(block_group, offset, bytes);
|
||||
sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
|
||||
BUG_ON(!sp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&block_group->alloc_mutex);
|
||||
ret = __btrfs_remove_free_space(block_group, offset, bytes);
|
||||
mutex_unlock(&block_group->alloc_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __btrfs_remove_free_space(block_group, offset, bytes);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -386,18 +429,18 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
|
|||
struct btrfs_free_space *info;
|
||||
struct rb_node *node;
|
||||
|
||||
spin_lock(&block_group->lock);
|
||||
mutex_lock(&block_group->alloc_mutex);
|
||||
while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
|
||||
info = rb_entry(node, struct btrfs_free_space, bytes_index);
|
||||
unlink_free_space(block_group, info);
|
||||
kfree(info);
|
||||
if (need_resched()) {
|
||||
spin_unlock(&block_group->lock);
|
||||
mutex_unlock(&block_group->alloc_mutex);
|
||||
cond_resched();
|
||||
spin_lock(&block_group->lock);
|
||||
mutex_lock(&block_group->alloc_mutex);
|
||||
}
|
||||
}
|
||||
spin_unlock(&block_group->lock);
|
||||
mutex_unlock(&block_group->alloc_mutex);
|
||||
}
|
||||
|
||||
struct btrfs_free_space *btrfs_find_free_space_offset(struct
|
||||
|
@ -407,10 +450,10 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct
|
|||
{
|
||||
struct btrfs_free_space *ret;
|
||||
|
||||
spin_lock(&block_group->lock);
|
||||
mutex_lock(&block_group->alloc_mutex);
|
||||
ret = tree_search_offset(&block_group->free_space_offset, offset,
|
||||
bytes, 0);
|
||||
spin_unlock(&block_group->lock);
|
||||
mutex_unlock(&block_group->alloc_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -422,10 +465,10 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct
|
|||
{
|
||||
struct btrfs_free_space *ret;
|
||||
|
||||
spin_lock(&block_group->lock);
|
||||
mutex_lock(&block_group->alloc_mutex);
|
||||
|
||||
ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
|
||||
spin_unlock(&block_group->lock);
|
||||
mutex_unlock(&block_group->alloc_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -434,16 +477,13 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
|
|||
*block_group, u64 offset,
|
||||
u64 bytes)
|
||||
{
|
||||
struct btrfs_free_space *ret;
|
||||
struct btrfs_free_space *ret = NULL;
|
||||
|
||||
spin_lock(&block_group->lock);
|
||||
ret = tree_search_offset(&block_group->free_space_offset, offset,
|
||||
bytes, 0);
|
||||
if (!ret)
|
||||
ret = tree_search_bytes(&block_group->free_space_bytes,
|
||||
offset, bytes);
|
||||
|
||||
spin_unlock(&block_group->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -670,7 +670,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
|
|||
atomic_dec(&root->fs_info->throttles);
|
||||
wake_up(&root->fs_info->transaction_throttle);
|
||||
|
||||
mutex_lock(&root->fs_info->alloc_mutex);
|
||||
num_bytes -= btrfs_root_used(&dirty->root->root_item);
|
||||
bytes_used = btrfs_root_used(&root->root_item);
|
||||
if (num_bytes) {
|
||||
|
@ -678,7 +677,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
|
|||
btrfs_set_root_used(&root->root_item,
|
||||
bytes_used - num_bytes);
|
||||
}
|
||||
mutex_unlock(&root->fs_info->alloc_mutex);
|
||||
|
||||
ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
|
||||
if (ret) {
|
||||
|
|
|
@ -125,9 +125,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||
if (is_extent)
|
||||
btrfs_extent_post_op(trans, root);
|
||||
out:
|
||||
if (is_extent)
|
||||
mutex_unlock(&root->fs_info->alloc_mutex);
|
||||
|
||||
if (path)
|
||||
btrfs_free_path(path);
|
||||
if (ret == -EAGAIN) {
|
||||
|
|
|
@ -271,10 +271,10 @@ static int process_one_buffer(struct btrfs_root *log,
|
|||
struct walk_control *wc, u64 gen)
|
||||
{
|
||||
if (wc->pin) {
|
||||
mutex_lock(&log->fs_info->alloc_mutex);
|
||||
mutex_lock(&log->fs_info->pinned_mutex);
|
||||
btrfs_update_pinned_extents(log->fs_info->extent_root,
|
||||
eb->start, eb->len, 1);
|
||||
mutex_unlock(&log->fs_info->alloc_mutex);
|
||||
mutex_unlock(&log->fs_info->pinned_mutex);
|
||||
}
|
||||
|
||||
if (btrfs_buffer_uptodate(eb, gen)) {
|
||||
|
|
|
@ -58,14 +58,12 @@ void btrfs_unlock_volumes(void)
|
|||
|
||||
static void lock_chunks(struct btrfs_root *root)
|
||||
{
|
||||
mutex_lock(&root->fs_info->alloc_mutex);
|
||||
mutex_lock(&root->fs_info->chunk_mutex);
|
||||
}
|
||||
|
||||
static void unlock_chunks(struct btrfs_root *root)
|
||||
{
|
||||
mutex_unlock(&root->fs_info->chunk_mutex);
|
||||
mutex_unlock(&root->fs_info->alloc_mutex);
|
||||
}
|
||||
|
||||
int btrfs_cleanup_fs_uuids(void)
|
||||
|
|
Loading…
Reference in New Issue