Btrfs: Do delalloc accounting via hooks in the extent_state code
Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
9c58309d6c
commit
291d673e6a
|
@ -2180,7 +2180,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
|
|||
unsigned long last_index;
|
||||
unsigned long i;
|
||||
struct page *page;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
struct file_ra_state *ra;
|
||||
|
||||
|
@ -2220,11 +2219,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
|
|||
set_extent_delalloc(io_tree, page_start,
|
||||
page_end, GFP_NOFS);
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE -
|
||||
existing_delalloc;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
|
||||
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
||||
set_page_dirty(page);
|
||||
unlock_page(page);
|
||||
|
|
|
@ -256,6 +256,26 @@ static int merge_state(struct extent_io_tree *tree,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void set_state_cb(struct extent_io_tree *tree,
|
||||
struct extent_state *state,
|
||||
unsigned long bits)
|
||||
{
|
||||
if (tree->ops && tree->ops->set_bit_hook) {
|
||||
tree->ops->set_bit_hook(tree->mapping->host, state->start,
|
||||
state->end, bits);
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_state_cb(struct extent_io_tree *tree,
|
||||
struct extent_state *state,
|
||||
unsigned long bits)
|
||||
{
|
||||
if (tree->ops && tree->ops->set_bit_hook) {
|
||||
tree->ops->clear_bit_hook(tree->mapping->host, state->start,
|
||||
state->end, bits);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* insert an extent_state struct into the tree. 'bits' are set on the
|
||||
* struct before it is inserted.
|
||||
|
@ -281,6 +301,7 @@ static int insert_state(struct extent_io_tree *tree,
|
|||
state->state |= bits;
|
||||
state->start = start;
|
||||
state->end = end;
|
||||
set_state_cb(tree, state, bits);
|
||||
node = tree_insert(&tree->state, end, &state->rb_node);
|
||||
if (node) {
|
||||
struct extent_state *found;
|
||||
|
@ -349,6 +370,7 @@ static int clear_state_bit(struct extent_io_tree *tree,
|
|||
tree->dirty_bytes -= range;
|
||||
}
|
||||
state->state &= ~bits;
|
||||
clear_state_cb(tree, state, bits);
|
||||
if (wake)
|
||||
wake_up(&state->wq);
|
||||
if (delete || state->state == 0) {
|
||||
|
@ -553,6 +575,7 @@ static void set_state_bits(struct extent_io_tree *tree,
|
|||
tree->dirty_bytes += range;
|
||||
}
|
||||
state->state |= bits;
|
||||
set_state_cb(tree, state, bits);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -975,6 +998,7 @@ search_again:
|
|||
goto search_again;
|
||||
}
|
||||
state->state |= EXTENT_LOCKED;
|
||||
set_state_cb(tree, state, EXTENT_LOCKED);
|
||||
if (!found)
|
||||
*start = state->start;
|
||||
found++;
|
||||
|
@ -1474,6 +1498,7 @@ static int end_bio_extent_readpage(struct bio *bio,
|
|||
state = NULL;
|
||||
}
|
||||
clear->state |= EXTENT_UPTODATE;
|
||||
set_state_cb(tree, clear, EXTENT_UPTODATE);
|
||||
clear_state_bit(tree, clear, EXTENT_LOCKED,
|
||||
1, 0);
|
||||
if (cur == start)
|
||||
|
|
|
@ -33,6 +33,10 @@ struct extent_io_ops {
|
|||
struct extent_state *state);
|
||||
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
|
||||
struct extent_state *state);
|
||||
int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
|
||||
unsigned long bits);
|
||||
int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end,
|
||||
unsigned long bits);
|
||||
};
|
||||
|
||||
struct extent_io_tree {
|
||||
|
|
|
@ -323,10 +323,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
set_extent_delalloc(io_tree, start_pos, end_of_last_block,
|
||||
GFP_NOFS);
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
|
||||
start_pos) - existing_delalloc;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
btrfs_add_ordered_inode(inode);
|
||||
} else {
|
||||
u64 aligned_end;
|
||||
|
|
|
@ -80,8 +80,6 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
|
|||
u64 thresh;
|
||||
int ret = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
if (for_del)
|
||||
thresh = total * 90;
|
||||
else
|
||||
|
@ -249,7 +247,6 @@ not_found:
|
|||
static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
u64 num_bytes;
|
||||
int ret;
|
||||
mutex_lock(&root->fs_info->fs_mutex);
|
||||
if (btrfs_test_opt(root, NODATACOW) ||
|
||||
|
@ -258,20 +255,34 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
|
|||
else
|
||||
ret = cow_file_range(inode, start, end);
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
num_bytes = end + 1 - start;
|
||||
if (root->fs_info->delalloc_bytes < num_bytes) {
|
||||
printk("delalloc accounting error total %llu sub %llu\n",
|
||||
root->fs_info->delalloc_bytes, num_bytes);
|
||||
} else {
|
||||
root->fs_info->delalloc_bytes -= num_bytes;
|
||||
}
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
|
||||
mutex_unlock(&root->fs_info->fs_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
|
||||
unsigned long bits)
|
||||
{
|
||||
if ((bits & EXTENT_DELALLOC)) {
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->delalloc_bytes += end - start + 1;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
|
||||
unsigned long bits)
|
||||
{
|
||||
if ((bits & EXTENT_DELALLOC)) {
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->delalloc_bytes -= end - start + 1;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
|
@ -908,28 +919,17 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page,
|
|||
{
|
||||
char *kaddr;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
|
||||
u64 existing_delalloc;
|
||||
u64 delalloc_start;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!PageLocked(page));
|
||||
set_page_extent_mapped(page);
|
||||
|
||||
lock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
||||
delalloc_start = page_start;
|
||||
existing_delalloc = count_range_bits(&BTRFS_I(inode)->io_tree,
|
||||
&delalloc_start, page_end,
|
||||
PAGE_CACHE_SIZE, EXTENT_DELALLOC);
|
||||
set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
|
||||
page_end, GFP_NOFS);
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE - existing_delalloc;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
|
||||
if (zero_start != PAGE_CACHE_SIZE) {
|
||||
kaddr = kmap(page);
|
||||
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
|
||||
|
@ -2456,8 +2456,6 @@ int btrfs_defrag_file(struct file *file) {
|
|||
unsigned long ra_index = 0;
|
||||
u64 page_start;
|
||||
u64 page_end;
|
||||
u64 delalloc_start;
|
||||
u64 existing_delalloc;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
|
@ -2491,19 +2489,9 @@ int btrfs_defrag_file(struct file *file) {
|
|||
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
||||
|
||||
lock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
||||
delalloc_start = page_start;
|
||||
existing_delalloc =
|
||||
count_range_bits(&BTRFS_I(inode)->io_tree,
|
||||
&delalloc_start, page_end,
|
||||
PAGE_CACHE_SIZE, EXTENT_DELALLOC);
|
||||
set_extent_delalloc(io_tree, page_start,
|
||||
page_end, GFP_NOFS);
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE -
|
||||
existing_delalloc;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
|
||||
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
||||
set_page_dirty(page);
|
||||
unlock_page(page);
|
||||
|
|
Loading…
Reference in New Issue