mm: hugetlb: defer freeing pages when gathering surplus pages
When gathering surplus pages, the number of needed pages is recomputed after reacquiring hugetlb lock to catch changes in resv_huge_pages and free_huge_pages. Plus it is recomputed with the number of newly allocated pages involved. Thus freeing pages can be deferred a bit to see if the final page request is satisfied, though pages could be allocated less than needed. Signed-off-by: Hillf Danton <dhillf@gmail.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
cc715d99e5
commit
28073b02bf
28
mm/hugetlb.c
28
mm/hugetlb.c
|
@ -852,6 +852,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
|
||||||
struct page *page, *tmp;
|
struct page *page, *tmp;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
int needed, allocated;
|
int needed, allocated;
|
||||||
|
bool alloc_ok = true;
|
||||||
|
|
||||||
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
|
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
|
||||||
if (needed <= 0) {
|
if (needed <= 0) {
|
||||||
|
@ -867,17 +868,13 @@ retry:
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
for (i = 0; i < needed; i++) {
|
for (i = 0; i < needed; i++) {
|
||||||
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
||||||
if (!page)
|
if (!page) {
|
||||||
/*
|
alloc_ok = false;
|
||||||
* We were not able to allocate enough pages to
|
break;
|
||||||
* satisfy the entire reservation so we free what
|
}
|
||||||
* we've allocated so far.
|
|
||||||
*/
|
|
||||||
goto free;
|
|
||||||
|
|
||||||
list_add(&page->lru, &surplus_list);
|
list_add(&page->lru, &surplus_list);
|
||||||
}
|
}
|
||||||
allocated += needed;
|
allocated += i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After retaking hugetlb_lock, we need to recalculate 'needed'
|
* After retaking hugetlb_lock, we need to recalculate 'needed'
|
||||||
|
@ -886,9 +883,16 @@ retry:
|
||||||
spin_lock(&hugetlb_lock);
|
spin_lock(&hugetlb_lock);
|
||||||
needed = (h->resv_huge_pages + delta) -
|
needed = (h->resv_huge_pages + delta) -
|
||||||
(h->free_huge_pages + allocated);
|
(h->free_huge_pages + allocated);
|
||||||
if (needed > 0)
|
if (needed > 0) {
|
||||||
|
if (alloc_ok)
|
||||||
goto retry;
|
goto retry;
|
||||||
|
/*
|
||||||
|
* We were not able to allocate enough pages to
|
||||||
|
* satisfy the entire reservation so we free what
|
||||||
|
* we've allocated so far.
|
||||||
|
*/
|
||||||
|
goto free;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* The surplus_list now contains _at_least_ the number of extra pages
|
* The surplus_list now contains _at_least_ the number of extra pages
|
||||||
* needed to accommodate the reservation. Add the appropriate number
|
* needed to accommodate the reservation. Add the appropriate number
|
||||||
|
@ -914,10 +918,10 @@ retry:
|
||||||
VM_BUG_ON(page_count(page));
|
VM_BUG_ON(page_count(page));
|
||||||
enqueue_huge_page(h, page);
|
enqueue_huge_page(h, page);
|
||||||
}
|
}
|
||||||
|
free:
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
|
|
||||||
/* Free unnecessary surplus pages to the buddy allocator */
|
/* Free unnecessary surplus pages to the buddy allocator */
|
||||||
free:
|
|
||||||
if (!list_empty(&surplus_list)) {
|
if (!list_empty(&surplus_list)) {
|
||||||
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
|
|
Loading…
Reference in New Issue