thp: scale nr_rotated to balance memory pressure
Make sure we scale up nr_rotated when we encounter a referenced transparent huge page. This ensures pageout scanning balance is not distorted when there are huge pages on the LRU. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2c888cfbc1
commit
9992af1029
|
@ -1276,7 +1276,8 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
|
|||
add_page_to_lru_list(zone, page, lru);
|
||||
if (is_active_lru(lru)) {
|
||||
int file = is_file_lru(lru);
|
||||
reclaim_stat->recent_rotated[file]++;
|
||||
int numpages = hpage_nr_pages(page);
|
||||
reclaim_stat->recent_rotated[file] += numpages;
|
||||
}
|
||||
if (!pagevec_add(&pvec, page)) {
|
||||
spin_unlock_irq(&zone->lru_lock);
|
||||
|
@ -1552,7 +1553,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|||
}
|
||||
|
||||
if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
|
||||
nr_rotated++;
|
||||
nr_rotated += hpage_nr_pages(page);
|
||||
/*
|
||||
* Identify referenced, file-backed active pages and
|
||||
* give them one more trip around the active list. So
|
||||
|
|
Loading…
Reference in New Issue