mm/memory-failure.c: fix page isolated count mismatch
Pages isolated for migration are accounted with the vmstat counters NR_ISOLATE_[ANON|FILE]. Callers of migrate_pages() are expected to increment these counters when pages are isolated from the LRU. Once the pages have been migrated, they are put back on the LRU or freed and the isolated count is decremented. Memory failure is not properly accounting for pages it isolates causing the NR_ISOLATED counters to be negative. On SMP builds, this goes unnoticed as negative counters are treated as 0 due to expected per-cpu drift. On UP builds, the counter is treated by too_many_isolated() as a large value causing processes to enter D state during page reclaim or compaction. This patch accounts for pages isolated by memory failure correctly. [mel@csn.ul.ie: rewrote changelog] Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Cc: Andi Kleen <andi@firstfloor.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d2c3225879
commit
5db8a73a8d
|
@ -52,6 +52,7 @@
|
|||
#include <linux/swapops.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
#include <linux/mm_inline.h>
|
||||
#include "internal.h"
|
||||
|
||||
int sysctl_memory_failure_early_kill __read_mostly = 0;
|
||||
|
@ -1468,7 +1469,8 @@ int soft_offline_page(struct page *page, int flags)
|
|||
put_page(page);
|
||||
if (!ret) {
|
||||
LIST_HEAD(pagelist);
|
||||
|
||||
inc_zone_page_state(page, NR_ISOLATED_ANON +
|
||||
page_is_file_cache(page));
|
||||
list_add(&page->lru, &pagelist);
|
||||
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
|
||||
0, true);
|
||||
|
|
Loading…
Reference in New Issue