mm: add folio dtor and order setter functions
Patch series "convert core hugetlb functions to folios", v5. ============== OVERVIEW =========================== Now that many hugetlb helper functions that deal with hugetlb specific flags[1] and hugetlb cgroups[2] are converted to folios, higher level allocation, prep, and freeing functions within hugetlb can also be converted to operate in folios. Patch 1 of this series implements the wrapper functions around setting the compound destructor and compound order for a folio. Besides the user added in patch 1, patch 2 and patch 9 also use these helper functions. Patches 2-10 convert the higher level hugetlb functions to folios. ============== TESTING =========================== LTP: Ran 10 back to back rounds of the LTP hugetlb test suite. Gigantic Huge Pages: Test allocation and freeing via hugeadm commands: hugeadm --pool-pages-min 1GB:10 hugeadm --pool-pages-min 1GB:0 Demote: Demote 1 1GB hugepages to 512 2MB hugepages echo 1 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages echo 1 > /sys/kernel/mm/hugepages/hugepages-1048576kB/demote cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages # 512 cat /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages # 0 [1] https://lore.kernel.org/lkml/20220922154207.1575343-1-sidhartha.kumar@oracle.com/ [2] https://lore.kernel.org/linux-mm/20221101223059.460937-1-sidhartha.kumar@oracle.com/ This patch (of 10): Add folio equivalents for set_compound_order() and set_compound_page_dtor(). Also remove extra new-lines introduced by mm/hugetlb: convert move_hugetlb_state() to folios and mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios. [sidhartha.kumar@oracle.com: clarify folio_set_compound_order() zero support] Link: https://lkml.kernel.org/r/20221207223731.32784-1-sidhartha.kumar@oracle.com Link: https://lkml.kernel.org/r/20221129225039.82257-1-sidhartha.kumar@oracle.com Link: https://lkml.kernel.org/r/20221129225039.82257-2-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Suggested-by: Mike Kravetz <mike.kravetz@oracle.com> Suggested-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Tarun Sahu <tsahu@linux.ibm.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Wei Chen <harperchen1110@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
6e1ca48d06
commit
9fd330582b
|
@ -997,6 +997,13 @@ static inline void set_compound_page_dtor(struct page *page,
|
|||
page[1].compound_dtor = compound_dtor;
|
||||
}
|
||||
|
||||
static inline void folio_set_compound_dtor(struct folio *folio,
|
||||
enum compound_dtor_id compound_dtor)
|
||||
{
|
||||
VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio);
|
||||
folio->_folio_dtor = compound_dtor;
|
||||
}
|
||||
|
||||
void destroy_large_folio(struct folio *folio);
|
||||
|
||||
static inline int head_compound_pincount(struct page *head)
|
||||
|
@ -1012,6 +1019,22 @@ static inline void set_compound_order(struct page *page, unsigned int order)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* folio_set_compound_order is generally passed a non-zero order to
|
||||
* initialize a large folio. However, hugetlb code abuses this by
|
||||
* passing in zero when 'dissolving' a large folio.
|
||||
*/
|
||||
static inline void folio_set_compound_order(struct folio *folio,
|
||||
unsigned int order)
|
||||
{
|
||||
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
|
||||
|
||||
folio->_folio_order = order;
|
||||
#ifdef CONFIG_64BIT
|
||||
folio->_folio_nr_pages = order ? 1U << order : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Returns the number of pages in this potentially compound page. */
|
||||
static inline unsigned long compound_nr(struct page *page)
|
||||
{
|
||||
|
|
|
@ -1780,7 +1780,7 @@ static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
|
|||
{
|
||||
hugetlb_vmemmap_optimize(h, &folio->page);
|
||||
INIT_LIST_HEAD(&folio->lru);
|
||||
folio->_folio_dtor = HUGETLB_PAGE_DTOR;
|
||||
folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR);
|
||||
hugetlb_set_folio_subpool(folio, NULL);
|
||||
set_hugetlb_cgroup(folio, NULL);
|
||||
set_hugetlb_cgroup_rsvd(folio, NULL);
|
||||
|
@ -2938,7 +2938,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|||
* a reservation exists for the allocation.
|
||||
*/
|
||||
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
|
||||
|
||||
if (!page) {
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
|
||||
|
@ -7343,7 +7342,6 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
|
|||
int old_nid = folio_nid(old_folio);
|
||||
int new_nid = folio_nid(new_folio);
|
||||
|
||||
|
||||
folio_set_hugetlb_temporary(old_folio);
|
||||
folio_clear_hugetlb_temporary(new_folio);
|
||||
|
||||
|
|
Loading…
Reference in New Issue