mm/rmap: calculate page offset when needed
Call page_to_pgoff() to get the page offset once we are sure we actually need it, and any very obvious initial function checks have passed. Trivial micro-optimization, and potentially save some cycles. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2847cf95c6
commit
b258d86065
|
@ -1635,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
|
||||||
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
|
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
|
||||||
{
|
{
|
||||||
struct anon_vma *anon_vma;
|
struct anon_vma *anon_vma;
|
||||||
pgoff_t pgoff = page_to_pgoff(page);
|
pgoff_t pgoff;
|
||||||
struct anon_vma_chain *avc;
|
struct anon_vma_chain *avc;
|
||||||
int ret = SWAP_AGAIN;
|
int ret = SWAP_AGAIN;
|
||||||
|
|
||||||
|
@ -1643,6 +1643,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
|
||||||
if (!anon_vma)
|
if (!anon_vma)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
pgoff = page_to_pgoff(page);
|
||||||
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
||||||
struct vm_area_struct *vma = avc->vma;
|
struct vm_area_struct *vma = avc->vma;
|
||||||
unsigned long address = vma_address(page, vma);
|
unsigned long address = vma_address(page, vma);
|
||||||
|
@ -1676,7 +1677,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
|
||||||
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
pgoff_t pgoff = page_to_pgoff(page);
|
pgoff_t pgoff;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
int ret = SWAP_AGAIN;
|
int ret = SWAP_AGAIN;
|
||||||
|
|
||||||
|
@ -1691,6 +1692,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
||||||
if (!mapping)
|
if (!mapping)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
pgoff = page_to_pgoff(page);
|
||||||
i_mmap_lock_read(mapping);
|
i_mmap_lock_read(mapping);
|
||||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
||||||
unsigned long address = vma_address(page, vma);
|
unsigned long address = vma_address(page, vma);
|
||||||
|
|
Loading…
Reference in New Issue