hugetlb: decrease hugetlb_lock cycling in gather_surplus_huge_pages
To reduce hugetlb_lock acquisitions and releases when freeing excess surplus pages, scan the page list in two parts. First, transfer the needed pages to the hugetlb pool. Then drop the lock and free the remaining pages back to the buddy allocator. In the common case there are zero excess pages and no lock operations are required. Thanks Mel Gorman for this improvement. Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
797df57490
commit
19fc3f0acd
17
mm/hugetlb.c
17
mm/hugetlb.c
|
@ -372,11 +372,19 @@ retry:
|
||||||
resv_huge_pages += delta;
|
resv_huge_pages += delta;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
free:
|
free:
|
||||||
|
/* Free the needed pages to the hugetlb pool */
|
||||||
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
||||||
|
if ((--needed) < 0)
|
||||||
|
break;
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
if ((--needed) >= 0)
|
enqueue_huge_page(page);
|
||||||
enqueue_huge_page(page);
|
}
|
||||||
else {
|
|
||||||
|
/* Free unnecessary surplus pages to the buddy allocator */
|
||||||
|
if (!list_empty(&surplus_list)) {
|
||||||
|
spin_unlock(&hugetlb_lock);
|
||||||
|
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
||||||
|
list_del(&page->lru);
|
||||||
/*
|
/*
|
||||||
* The page has a reference count of zero already, so
|
* The page has a reference count of zero already, so
|
||||||
* call free_huge_page directly instead of using
|
* call free_huge_page directly instead of using
|
||||||
|
@ -384,10 +392,9 @@ free:
|
||||||
* unlocked which is safe because free_huge_page takes
|
* unlocked which is safe because free_huge_page takes
|
||||||
* hugetlb_lock before deciding how to free the page.
|
* hugetlb_lock before deciding how to free the page.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&hugetlb_lock);
|
|
||||||
free_huge_page(page);
|
free_huge_page(page);
|
||||||
spin_lock(&hugetlb_lock);
|
|
||||||
}
|
}
|
||||||
|
spin_lock(&hugetlb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
Loading…
Reference in New Issue