btrfs: move extent state init and alloc functions to their own file

Start cleaning up extent_io.c by moving the extent state code out of it.
This patch starts with the extent state allocation code and the
extent_io_tree init code.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2022-09-09 17:53:21 -04:00 committed by David Sterba
parent c45379a20f
commit 83cf709a89
4 changed files with 166 additions and 156 deletions

View File

@ -31,7 +31,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \ uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \ block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
subpage.o tree-mod-log.o subpage.o tree-mod-log.o extent-io-tree.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o

160
fs/btrfs/extent-io-tree.c Normal file
View File

@ -0,0 +1,160 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <trace/events/btrfs.h>
#include "ctree.h"
#include "extent-io-tree.h"
static struct kmem_cache *extent_state_cache;
#ifdef CONFIG_BTRFS_DEBUG
static LIST_HEAD(states);
static DEFINE_SPINLOCK(leak_lock);
static inline void btrfs_leak_debug_add_state(struct extent_state *state)
{
unsigned long flags;
spin_lock_irqsave(&leak_lock, flags);
list_add(&state->leak_list, &states);
spin_unlock_irqrestore(&leak_lock, flags);
}
static inline void btrfs_leak_debug_del_state(struct extent_state *state)
{
unsigned long flags;
spin_lock_irqsave(&leak_lock, flags);
list_del(&state->leak_list);
spin_unlock_irqrestore(&leak_lock, flags);
}
static inline void btrfs_extent_state_leak_debug_check(void)
{
struct extent_state *state;
while (!list_empty(&states)) {
state = list_entry(states.next, struct extent_state, leak_list);
pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
state->start, state->end, state->state,
extent_state_in_tree(state),
refcount_read(&state->refs));
list_del(&state->leak_list);
kmem_cache_free(extent_state_cache, state);
}
}
#else
#define btrfs_leak_debug_add_state(state) do {} while (0)
#define btrfs_leak_debug_del_state(state) do {} while (0)
#define btrfs_extent_state_leak_debug_check() do {} while (0)
#endif
/*
* For the file_extent_tree, we want to hold the inode lock when we lookup and
* update the disk_i_size, but lockdep will complain because our io_tree we hold
* the tree lock and get the inode lock when setting delalloc. These two things
* are unrelated, so make a class for the file_extent_tree so we don't get the
* two locking patterns mixed up.
*/
static struct lock_class_key file_extent_tree_class;
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
struct extent_io_tree *tree, unsigned int owner,
void *private_data)
{
tree->fs_info = fs_info;
tree->state = RB_ROOT;
tree->dirty_bytes = 0;
spin_lock_init(&tree->lock);
tree->private_data = private_data;
tree->owner = owner;
if (owner == IO_TREE_INODE_FILE_EXTENT)
lockdep_set_class(&tree->lock, &file_extent_tree_class);
}
void extent_io_tree_release(struct extent_io_tree *tree)
{
spin_lock(&tree->lock);
/*
* Do a single barrier for the waitqueue_active check here, the state
* of the waitqueue should not change once extent_io_tree_release is
* called.
*/
smp_mb();
while (!RB_EMPTY_ROOT(&tree->state)) {
struct rb_node *node;
struct extent_state *state;
node = rb_first(&tree->state);
state = rb_entry(node, struct extent_state, rb_node);
rb_erase(&state->rb_node, &tree->state);
RB_CLEAR_NODE(&state->rb_node);
/*
* btree io trees aren't supposed to have tasks waiting for
* changes in the flags of extent states ever.
*/
ASSERT(!waitqueue_active(&state->wq));
free_extent_state(state);
cond_resched_lock(&tree->lock);
}
spin_unlock(&tree->lock);
}
struct extent_state *alloc_extent_state(gfp_t mask)
{
struct extent_state *state;
/*
* The given mask might be not appropriate for the slab allocator,
* drop the unsupported bits
*/
mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
state = kmem_cache_alloc(extent_state_cache, mask);
if (!state)
return state;
state->state = 0;
RB_CLEAR_NODE(&state->rb_node);
btrfs_leak_debug_add_state(state);
refcount_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
trace_alloc_extent_state(state, mask, _RET_IP_);
return state;
}
struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc)
{
if (!prealloc)
prealloc = alloc_extent_state(GFP_ATOMIC);
return prealloc;
}
void free_extent_state(struct extent_state *state)
{
if (!state)
return;
if (refcount_dec_and_test(&state->refs)) {
WARN_ON(extent_state_in_tree(state));
btrfs_leak_debug_del_state(state);
trace_free_extent_state(state, _RET_IP_);
kmem_cache_free(extent_state_cache, state);
}
}
void __cold extent_state_free_cachep(void)
{
btrfs_extent_state_leak_debug_check();
kmem_cache_destroy(extent_state_cache);
}
int __init extent_state_init_cachep(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
return 0;
}

View File

@ -253,4 +253,9 @@ int btrfs_clean_io_failure(struct btrfs_inode *inode, u64 start,
struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc); struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc);
struct extent_state *alloc_extent_state(gfp_t mask); struct extent_state *alloc_extent_state(gfp_t mask);
static inline bool extent_state_in_tree(const struct extent_state *state)
{
return !RB_EMPTY_NODE(&state->rb_node);
}
#endif /* BTRFS_EXTENT_IO_TREE_H */ #endif /* BTRFS_EXTENT_IO_TREE_H */

View File

@ -31,18 +31,9 @@
#include "block-group.h" #include "block-group.h"
#include "compression.h" #include "compression.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache; static struct kmem_cache *extent_buffer_cache;
static inline bool extent_state_in_tree(const struct extent_state *state)
{
return !RB_EMPTY_NODE(&state->rb_node);
}
#ifdef CONFIG_BTRFS_DEBUG #ifdef CONFIG_BTRFS_DEBUG
static LIST_HEAD(states);
static DEFINE_SPINLOCK(leak_lock);
static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb) static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
{ {
struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_fs_info *fs_info = eb->fs_info;
@ -53,15 +44,6 @@ static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
} }
static inline void btrfs_leak_debug_add_state(struct extent_state *state)
{
unsigned long flags;
spin_lock_irqsave(&leak_lock, flags);
list_add(&state->leak_list, &states);
spin_unlock_irqrestore(&leak_lock, flags);
}
static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
{ {
struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_fs_info *fs_info = eb->fs_info;
@ -72,15 +54,6 @@ static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
} }
static inline void btrfs_leak_debug_del_state(struct extent_state *state)
{
unsigned long flags;
spin_lock_irqsave(&leak_lock, flags);
list_del(&state->leak_list);
spin_unlock_irqrestore(&leak_lock, flags);
}
void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
{ {
struct extent_buffer *eb; struct extent_buffer *eb;
@ -108,21 +81,6 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
} }
static inline void btrfs_extent_state_leak_debug_check(void)
{
struct extent_state *state;
while (!list_empty(&states)) {
state = list_entry(states.next, struct extent_state, leak_list);
pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
state->start, state->end, state->state,
extent_state_in_tree(state),
refcount_read(&state->refs));
list_del(&state->leak_list);
kmem_cache_free(extent_state_cache, state);
}
}
#define btrfs_debug_check_extent_io_range(tree, start, end) \ #define btrfs_debug_check_extent_io_range(tree, start, end) \
__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
static inline void __btrfs_debug_check_extent_io_range(const char *caller, static inline void __btrfs_debug_check_extent_io_range(const char *caller,
@ -143,10 +101,7 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
} }
#else #else
#define btrfs_leak_debug_add_eb(eb) do {} while (0) #define btrfs_leak_debug_add_eb(eb) do {} while (0)
#define btrfs_leak_debug_add_state(state) do {} while (0)
#define btrfs_leak_debug_del_eb(eb) do {} while (0) #define btrfs_leak_debug_del_eb(eb) do {} while (0)
#define btrfs_leak_debug_del_state(state) do {} while (0)
#define btrfs_extent_state_leak_debug_check() do {} while (0)
#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
#endif #endif
@ -249,17 +204,6 @@ static void submit_write_bio(struct extent_page_data *epd, int ret)
} }
} }
int __init extent_state_init_cachep(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
return 0;
}
int __init extent_buffer_init_cachep(void) int __init extent_buffer_init_cachep(void)
{ {
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
@ -271,12 +215,6 @@ int __init extent_buffer_init_cachep(void)
return 0; return 0;
} }
void __cold extent_state_free_cachep(void)
{
btrfs_extent_state_leak_debug_check();
kmem_cache_destroy(extent_state_cache);
}
void __cold extent_buffer_free_cachep(void) void __cold extent_buffer_free_cachep(void)
{ {
/* /*
@ -287,91 +225,6 @@ void __cold extent_buffer_free_cachep(void)
kmem_cache_destroy(extent_buffer_cache); kmem_cache_destroy(extent_buffer_cache);
} }
/*
* For the file_extent_tree, we want to hold the inode lock when we lookup and
* update the disk_i_size, but lockdep will complain because our io_tree we hold
* the tree lock and get the inode lock when setting delalloc. These two things
* are unrelated, so make a class for the file_extent_tree so we don't get the
* two locking patterns mixed up.
*/
static struct lock_class_key file_extent_tree_class;
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
struct extent_io_tree *tree, unsigned int owner,
void *private_data)
{
tree->fs_info = fs_info;
tree->state = RB_ROOT;
tree->dirty_bytes = 0;
spin_lock_init(&tree->lock);
tree->private_data = private_data;
tree->owner = owner;
if (owner == IO_TREE_INODE_FILE_EXTENT)
lockdep_set_class(&tree->lock, &file_extent_tree_class);
}
void extent_io_tree_release(struct extent_io_tree *tree)
{
spin_lock(&tree->lock);
/*
* Do a single barrier for the waitqueue_active check here, the state
* of the waitqueue should not change once extent_io_tree_release is
* called.
*/
smp_mb();
while (!RB_EMPTY_ROOT(&tree->state)) {
struct rb_node *node;
struct extent_state *state;
node = rb_first(&tree->state);
state = rb_entry(node, struct extent_state, rb_node);
rb_erase(&state->rb_node, &tree->state);
RB_CLEAR_NODE(&state->rb_node);
/*
* btree io trees aren't supposed to have tasks waiting for
* changes in the flags of extent states ever.
*/
ASSERT(!waitqueue_active(&state->wq));
free_extent_state(state);
cond_resched_lock(&tree->lock);
}
spin_unlock(&tree->lock);
}
struct extent_state *alloc_extent_state(gfp_t mask)
{
struct extent_state *state;
/*
* The given mask might be not appropriate for the slab allocator,
* drop the unsupported bits
*/
mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
state = kmem_cache_alloc(extent_state_cache, mask);
if (!state)
return state;
state->state = 0;
RB_CLEAR_NODE(&state->rb_node);
btrfs_leak_debug_add_state(state);
refcount_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
trace_alloc_extent_state(state, mask, _RET_IP_);
return state;
}
void free_extent_state(struct extent_state *state)
{
if (!state)
return;
if (refcount_dec_and_test(&state->refs)) {
WARN_ON(extent_state_in_tree(state));
btrfs_leak_debug_del_state(state);
trace_free_extent_state(state, _RET_IP_);
kmem_cache_free(extent_state_cache, state);
}
}
/** /**
* Search @tree for an entry that contains @offset. Such entry would have * Search @tree for an entry that contains @offset. Such entry would have
* entry->start <= offset && entry->end >= offset. * entry->start <= offset && entry->end >= offset.
@ -710,14 +563,6 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
return next; return next;
} }
struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc)
{
if (!prealloc)
prealloc = alloc_extent_state(GFP_ATOMIC);
return prealloc;
}
static void extent_io_tree_panic(struct extent_io_tree *tree, int err) static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{ {
btrfs_panic(tree->fs_info, err, btrfs_panic(tree->fs_info, err,