mm: filemap: move radix tree hole searching here
The radix tree hole searching code is only used for page cache, for example the readahead code trying to get a a picture of the area surrounding a fault. It sufficed to rely on the radix tree definition of holes, which is "empty tree slot". But this is about to change, though, as shadow page descriptors will be stored in the page cache after the actual pages get evicted from memory. Move the functions over to mm/filemap.c and make them native page cache operations, where they can later be adapted to handle the new definition of "page cache hole". Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Bob Liu <bob.liu@oracle.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Metin Doslu <metin@citusdata.com> Cc: Michel Lespinasse <walken@google.com> Cc: Ozgun Erdogan <ozgun@citusdata.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin <klamm@yandex-team.ru> Cc: Ryan Mallon <rmallon@gmail.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6dbaf22ce1
commit
e7b563bb2a
|
@ -1213,7 +1213,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
|
|||
end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
|
||||
if (end != NFS_I(inode)->npages) {
|
||||
rcu_read_lock();
|
||||
end = radix_tree_next_hole(&mapping->page_tree, idx + 1, ULONG_MAX);
|
||||
end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
|
|
@ -243,6 +243,11 @@ static inline struct page *page_cache_alloc_readahead(struct address_space *x)
|
|||
|
||||
typedef int filler_t(void *, struct page *);
|
||||
|
||||
pgoff_t page_cache_next_hole(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan);
|
||||
pgoff_t page_cache_prev_hole(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan);
|
||||
|
||||
extern struct page * find_get_page(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
extern struct page * find_lock_page(struct address_space *mapping,
|
||||
|
|
|
@ -227,10 +227,6 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
|||
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
|
||||
void ***results, unsigned long *indices,
|
||||
unsigned long first_index, unsigned int max_items);
|
||||
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
|
||||
unsigned long index, unsigned long max_scan);
|
||||
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
|
||||
unsigned long index, unsigned long max_scan);
|
||||
int radix_tree_preload(gfp_t gfp_mask);
|
||||
int radix_tree_maybe_preload(gfp_t gfp_mask);
|
||||
void radix_tree_init(void);
|
||||
|
|
|
@ -946,81 +946,6 @@ next:
|
|||
}
|
||||
EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
|
||||
|
||||
|
||||
/**
|
||||
* radix_tree_next_hole - find the next hole (not-present entry)
|
||||
* @root: tree root
|
||||
* @index: index key
|
||||
* @max_scan: maximum range to search
|
||||
*
|
||||
* Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
|
||||
* indexed hole.
|
||||
*
|
||||
* Returns: the index of the hole if found, otherwise returns an index
|
||||
* outside of the set specified (in which case 'return - index >= max_scan'
|
||||
* will be true). In rare cases of index wrap-around, 0 will be returned.
|
||||
*
|
||||
* radix_tree_next_hole may be called under rcu_read_lock. However, like
|
||||
* radix_tree_gang_lookup, this will not atomically search a snapshot of
|
||||
* the tree at a single point in time. For example, if a hole is created
|
||||
* at index 5, then subsequently a hole is created at index 10,
|
||||
* radix_tree_next_hole covering both indexes may return 10 if called
|
||||
* under rcu_read_lock.
|
||||
*/
|
||||
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
|
||||
unsigned long index, unsigned long max_scan)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < max_scan; i++) {
|
||||
if (!radix_tree_lookup(root, index))
|
||||
break;
|
||||
index++;
|
||||
if (index == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
EXPORT_SYMBOL(radix_tree_next_hole);
|
||||
|
||||
/**
|
||||
* radix_tree_prev_hole - find the prev hole (not-present entry)
|
||||
* @root: tree root
|
||||
* @index: index key
|
||||
* @max_scan: maximum range to search
|
||||
*
|
||||
* Search backwards in the range [max(index-max_scan+1, 0), index]
|
||||
* for the first hole.
|
||||
*
|
||||
* Returns: the index of the hole if found, otherwise returns an index
|
||||
* outside of the set specified (in which case 'index - return >= max_scan'
|
||||
* will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
|
||||
*
|
||||
* radix_tree_next_hole may be called under rcu_read_lock. However, like
|
||||
* radix_tree_gang_lookup, this will not atomically search a snapshot of
|
||||
* the tree at a single point in time. For example, if a hole is created
|
||||
* at index 10, then subsequently a hole is created at index 5,
|
||||
* radix_tree_prev_hole covering both indexes may return 5 if called under
|
||||
* rcu_read_lock.
|
||||
*/
|
||||
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
|
||||
unsigned long index, unsigned long max_scan)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < max_scan; i++) {
|
||||
if (!radix_tree_lookup(root, index))
|
||||
break;
|
||||
index--;
|
||||
if (index == ULONG_MAX)
|
||||
break;
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
EXPORT_SYMBOL(radix_tree_prev_hole);
|
||||
|
||||
/**
|
||||
* radix_tree_gang_lookup - perform multiple lookup on a radix tree
|
||||
* @root: radix tree root
|
||||
|
|
76
mm/filemap.c
76
mm/filemap.c
|
@ -685,6 +685,82 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* page_cache_next_hole - find the next hole (not-present entry)
|
||||
* @mapping: mapping
|
||||
* @index: index
|
||||
* @max_scan: maximum range to search
|
||||
*
|
||||
* Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
|
||||
* lowest indexed hole.
|
||||
*
|
||||
* Returns: the index of the hole if found, otherwise returns an index
|
||||
* outside of the set specified (in which case 'return - index >=
|
||||
* max_scan' will be true). In rare cases of index wrap-around, 0 will
|
||||
* be returned.
|
||||
*
|
||||
* page_cache_next_hole may be called under rcu_read_lock. However,
|
||||
* like radix_tree_gang_lookup, this will not atomically search a
|
||||
* snapshot of the tree at a single point in time. For example, if a
|
||||
* hole is created at index 5, then subsequently a hole is created at
|
||||
* index 10, page_cache_next_hole covering both indexes may return 10
|
||||
* if called under rcu_read_lock.
|
||||
*/
|
||||
pgoff_t page_cache_next_hole(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < max_scan; i++) {
|
||||
if (!radix_tree_lookup(&mapping->page_tree, index))
|
||||
break;
|
||||
index++;
|
||||
if (index == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
EXPORT_SYMBOL(page_cache_next_hole);
|
||||
|
||||
/**
|
||||
* page_cache_prev_hole - find the prev hole (not-present entry)
|
||||
* @mapping: mapping
|
||||
* @index: index
|
||||
* @max_scan: maximum range to search
|
||||
*
|
||||
* Search backwards in the range [max(index-max_scan+1, 0), index] for
|
||||
* the first hole.
|
||||
*
|
||||
* Returns: the index of the hole if found, otherwise returns an index
|
||||
* outside of the set specified (in which case 'index - return >=
|
||||
* max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
|
||||
* will be returned.
|
||||
*
|
||||
* page_cache_prev_hole may be called under rcu_read_lock. However,
|
||||
* like radix_tree_gang_lookup, this will not atomically search a
|
||||
* snapshot of the tree at a single point in time. For example, if a
|
||||
* hole is created at index 10, then subsequently a hole is created at
|
||||
* index 5, page_cache_prev_hole covering both indexes may return 5 if
|
||||
* called under rcu_read_lock.
|
||||
*/
|
||||
pgoff_t page_cache_prev_hole(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < max_scan; i++) {
|
||||
if (!radix_tree_lookup(&mapping->page_tree, index))
|
||||
break;
|
||||
index--;
|
||||
if (index == ULONG_MAX)
|
||||
break;
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
EXPORT_SYMBOL(page_cache_prev_hole);
|
||||
|
||||
/**
|
||||
* find_get_page - find and get a page reference
|
||||
* @mapping: the address_space to search
|
||||
|
|
|
@ -347,7 +347,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
|
|||
pgoff_t head;
|
||||
|
||||
rcu_read_lock();
|
||||
head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
|
||||
head = page_cache_prev_hole(mapping, offset - 1, max);
|
||||
rcu_read_unlock();
|
||||
|
||||
return offset - 1 - head;
|
||||
|
@ -427,7 +427,7 @@ ondemand_readahead(struct address_space *mapping,
|
|||
pgoff_t start;
|
||||
|
||||
rcu_read_lock();
|
||||
start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
|
||||
start = page_cache_next_hole(mapping, offset + 1, max);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!start || start - offset > max)
|
||||
|
|
Loading…
Reference in New Issue