btrfs: add debug check for extent_io range alignment
The 'end' value must exactly cover the end of the interval, which means one byte less than the expected block alignment, or in case of a file smaller than one block, one byte less than the inode size. Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
This commit is contained in:
parent
15b0a89d71
commit
8d599ae1bf
|
@ -77,10 +77,29 @@ void btrfs_leak_debug_check(void)
|
|||
kmem_cache_free(extent_buffer_cache, eb);
|
||||
}
|
||||
}
|
||||
|
||||
#define btrfs_debug_check_extent_io_range(inode, start, end) \
|
||||
__btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
|
||||
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
|
||||
struct inode *inode, u64 start, u64 end)
|
||||
{
|
||||
u64 isize = i_size_read(inode);
|
||||
|
||||
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
|
||||
printk_ratelimited(KERN_DEBUG
|
||||
"btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
|
||||
caller,
|
||||
(unsigned long long)btrfs_ino(inode),
|
||||
(unsigned long long)isize,
|
||||
(unsigned long long)start,
|
||||
(unsigned long long)end);
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define btrfs_leak_debug_add(new, head) do {} while (0)
|
||||
#define btrfs_leak_debug_del(entry) do {} while (0)
|
||||
#define btrfs_leak_debug_check() do {} while (0)
|
||||
#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define BUFFER_LRU_MAX 64
|
||||
|
@ -522,6 +541,8 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
int err;
|
||||
int clear = 0;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
if (delete)
|
||||
bits |= ~EXTENT_CTLBITS;
|
||||
bits |= EXTENT_FIRST_DELALLOC;
|
||||
|
@ -677,6 +698,8 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
struct extent_state *state;
|
||||
struct rb_node *node;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
spin_lock(&tree->lock);
|
||||
again:
|
||||
while (1) {
|
||||
|
@ -769,6 +792,8 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
u64 last_start;
|
||||
u64 last_end;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
bits |= EXTENT_FIRST_DELALLOC;
|
||||
again:
|
||||
if (!prealloc && (mask & __GFP_WAIT)) {
|
||||
|
@ -989,6 +1014,8 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
u64 last_start;
|
||||
u64 last_end;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
again:
|
||||
if (!prealloc && (mask & __GFP_WAIT)) {
|
||||
prealloc = alloc_extent_state(mask);
|
||||
|
|
Loading…
Reference in New Issue