mm, hugetlb: schedule when potentially allocating many hugepages
A few hugetlb allocators loop while calling the page allocator and can potentially prevent rescheduling if the page allocator slowpath is not utilized. Conditionally schedule when large numbers of hugepages can be allocated. Anshuman: "Fixes a task which was getting hung while writing like 10000 hugepages (16MB on POWER8) into /proc/sys/vm/nr_hugepages." Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1706091535300.66176@chino.kir.corp.google.com Signed-off-by: David Rientjes <rientjes@google.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Tested-by: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8b91323889
commit
69ed779a14
|
@ -1781,6 +1781,7 @@ retry:
|
|||
break;
|
||||
}
|
||||
list_add(&page->lru, &surplus_list);
|
||||
cond_resched();
|
||||
}
|
||||
allocated += i;
|
||||
|
||||
|
@ -2249,6 +2250,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
|
|||
} else if (!alloc_fresh_huge_page(h,
|
||||
&node_states[N_MEMORY]))
|
||||
break;
|
||||
cond_resched();
|
||||
}
|
||||
if (i < h->max_huge_pages) {
|
||||
char buf[32];
|
||||
|
|
Loading…
Reference in New Issue