From 308d9800b2c4f1fb344dbf055912d3140438bac0 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 11 Mar 2014 13:56:15 +0000 Subject: [PATCH] Btrfs: cache extent states in defrag code path When locking file ranges in the inode's io_tree, cache the first extent state that belongs to the target range, so that when unlocking the range we don't need to search in the io_tree again, reducing cpu time and making and therefore holding the io_tree's lock for a shorter period. Signed-off-by: Filipe David Borba Manana Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e1747701f520..3ad5c10d3704 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -986,10 +986,13 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) read_unlock(&em_tree->lock); if (!em) { + struct extent_state *cached = NULL; + u64 end = start + len - 1; + /* get the big lock and read metadata off disk */ - lock_extent(io_tree, start, start + len - 1); + lock_extent_bits(io_tree, start, end, 0, &cached); em = btrfs_get_extent(inode, NULL, 0, start, len, 0); - unlock_extent(io_tree, start, start + len - 1); + unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS); if (IS_ERR(em)) return NULL; @@ -1128,10 +1131,12 @@ again: page_start = page_offset(page); page_end = page_start + PAGE_CACHE_SIZE - 1; while (1) { - lock_extent(tree, page_start, page_end); + lock_extent_bits(tree, page_start, page_end, + 0, &cached_state); ordered = btrfs_lookup_ordered_extent(inode, page_start); - unlock_extent(tree, page_start, page_end); + unlock_extent_cached(tree, page_start, page_end, + &cached_state, GFP_NOFS); if (!ordered) break;