mm/mlock.c: use page_zone() instead of page_zone_id()
page_zone_id() is a specialized function to compare the zone for the pages that are within the section range. If the section of the pages are different, page_zone_id() can be different even if their zone is the same. This wrong usage doesn't cause any actual problem since __munlock_pagevec_fill() would be called again with failed index. However, it's better to use more appropriate function here. Link: http://lkml.kernel.org/r/1503559211-10259-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
638032224e
commit
9472f23c9e
10
mm/mlock.c
10
mm/mlock.c
|
@ -365,8 +365,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
|
||||||
* @start + PAGE_SIZE when no page could be added by the pte walk.
|
* @start + PAGE_SIZE when no page could be added by the pte walk.
|
||||||
*/
|
*/
|
||||||
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
|
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
|
||||||
struct vm_area_struct *vma, int zoneid, unsigned long start,
|
struct vm_area_struct *vma, struct zone *zone,
|
||||||
unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
@ -394,7 +394,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
|
||||||
* Break if page could not be obtained or the page's node+zone does not
|
* Break if page could not be obtained or the page's node+zone does not
|
||||||
* match
|
* match
|
||||||
*/
|
*/
|
||||||
if (!page || page_zone_id(page) != zoneid)
|
if (!page || page_zone(page) != zone)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -446,7 +446,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
||||||
unsigned long page_increm;
|
unsigned long page_increm;
|
||||||
struct pagevec pvec;
|
struct pagevec pvec;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int zoneid;
|
|
||||||
|
|
||||||
pagevec_init(&pvec, 0);
|
pagevec_init(&pvec, 0);
|
||||||
/*
|
/*
|
||||||
|
@ -481,7 +480,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
||||||
*/
|
*/
|
||||||
pagevec_add(&pvec, page);
|
pagevec_add(&pvec, page);
|
||||||
zone = page_zone(page);
|
zone = page_zone(page);
|
||||||
zoneid = page_zone_id(page);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to fill the rest of pagevec using fast
|
* Try to fill the rest of pagevec using fast
|
||||||
|
@ -490,7 +488,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
||||||
* pagevec.
|
* pagevec.
|
||||||
*/
|
*/
|
||||||
start = __munlock_pagevec_fill(&pvec, vma,
|
start = __munlock_pagevec_fill(&pvec, vma,
|
||||||
zoneid, start, end);
|
zone, start, end);
|
||||||
__munlock_pagevec(&pvec, zone);
|
__munlock_pagevec(&pvec, zone);
|
||||||
goto next;
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue