mm: always initialise folio->_deferred_list
commit b7b098cf00a2b65d5654a86dc8edf82f125289c1 upstream. Patch series "Various significant MM patches". These patches all interact in annoying ways which make it tricky to send them out in any way other than a big batch, even though there's not really an overarching theme to connect them. The big effects of this patch series are: - folio_test_hugetlb() becomes reliable, even when called without a page reference - We free up PG_slab, and we could always use more page flags - We no longer need to check PageSlab before calling page_mapcount() This patch (of 9): For compound pages which are at least order-2 (and hence have a deferred_list), initialise it and then we can check at free that the page is not part of a deferred list. We recently found this useful to rule out a source of corruption. [peterx@redhat.com: always initialise folio->_deferred_list] Link: https://lkml.kernel.org/r/20240417211836.2742593-2-peterx@redhat.com Link: https://lkml.kernel.org/r/20240321142448.1645400-1-willy@infradead.org Link: https://lkml.kernel.org/r/20240321142448.1645400-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> [ Include three small changes from the upstream commit, for backport safety: replace list_del() by list_del_init() in split_huge_page_to_list(), like c010d47f107f ("mm: thp: split huge page to any lower order pages"); replace list_del() by list_del_init() in folio_undo_large_rmappable(), like 9bcef5973e31 ("mm: memcg: fix split queue list crash when large folio migration"); keep __free_pages() instead of folio_put() in __update_and_free_hugetlb_folio(). ] Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e8769509d6
commit
0275e4021b
|
@ -571,8 +571,6 @@ void folio_prep_large_rmappable(struct folio *folio)
|
|||
{
|
||||
if (!folio || !folio_test_large(folio))
|
||||
return;
|
||||
if (folio_order(folio) > 1)
|
||||
INIT_LIST_HEAD(&folio->_deferred_list);
|
||||
folio_set_large_rmappable(folio);
|
||||
}
|
||||
|
||||
|
@ -2725,7 +2723,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||
if (folio_order(folio) > 1 &&
|
||||
!list_empty(&folio->_deferred_list)) {
|
||||
ds_queue->split_queue_len--;
|
||||
list_del(&folio->_deferred_list);
|
||||
list_del_init(&folio->_deferred_list);
|
||||
}
|
||||
spin_unlock(&ds_queue->split_queue_lock);
|
||||
if (mapping) {
|
||||
|
@ -2789,7 +2787,7 @@ void folio_undo_large_rmappable(struct folio *folio)
|
|||
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
|
||||
if (!list_empty(&folio->_deferred_list)) {
|
||||
ds_queue->split_queue_len--;
|
||||
list_del(&folio->_deferred_list);
|
||||
list_del_init(&folio->_deferred_list);
|
||||
}
|
||||
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
|
||||
}
|
||||
|
|
|
@ -1795,6 +1795,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
|
|||
destroy_compound_gigantic_folio(folio, huge_page_order(h));
|
||||
free_gigantic_folio(folio, huge_page_order(h));
|
||||
} else {
|
||||
INIT_LIST_HEAD(&folio->_deferred_list);
|
||||
__free_pages(&folio->page, huge_page_order(h));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -431,6 +431,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
|
|||
atomic_set(&folio->_entire_mapcount, -1);
|
||||
atomic_set(&folio->_nr_pages_mapped, 0);
|
||||
atomic_set(&folio->_pincount, 0);
|
||||
if (order > 1)
|
||||
INIT_LIST_HEAD(&folio->_deferred_list);
|
||||
}
|
||||
|
||||
static inline void prep_compound_tail(struct page *head, int tail_idx)
|
||||
|
|
|
@ -7153,6 +7153,9 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
|
|||
struct obj_cgroup *objcg;
|
||||
|
||||
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
|
||||
VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
|
||||
!folio_test_hugetlb(folio) &&
|
||||
!list_empty(&folio->_deferred_list), folio);
|
||||
|
||||
/*
|
||||
* Nobody should be changing or seriously looking at
|
||||
|
|
|
@ -1002,10 +1002,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
|
|||
}
|
||||
break;
|
||||
case 2:
|
||||
/*
|
||||
* the second tail page: ->mapping is
|
||||
* deferred_list.next -- ignore value.
|
||||
*/
|
||||
/* the second tail page: deferred_list overlaps ->mapping */
|
||||
if (unlikely(!list_empty(&folio->_deferred_list))) {
|
||||
bad_page(page, "on deferred list");
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (page->mapping != TAIL_MAPPING) {
|
||||
|
|
Loading…
Reference in New Issue