mm: convert total_compound_mapcount() to folio_total_mapcount()
Instead of enforcing that the argument must be a head page by naming, enforce it with the compiler by making it a folio. Also rename the counter in struct folio from _compound_mapcount to _entire_mapcount. Link: https://lkml.kernel.org/r/20230111142915.1001531-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
6eee1a0062
commit
b14224fbea
|
@ -871,7 +871,7 @@ static inline int page_mapcount(struct page *page)
|
||||||
return head_compound_mapcount(page) + mapcount;
|
return head_compound_mapcount(page) + mapcount;
|
||||||
}
|
}
|
||||||
|
|
||||||
int total_compound_mapcount(struct page *head);
|
int folio_total_mapcount(struct folio *folio);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* folio_mapcount() - Calculate the number of mappings of this folio.
|
* folio_mapcount() - Calculate the number of mappings of this folio.
|
||||||
|
@ -888,14 +888,14 @@ static inline int folio_mapcount(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (likely(!folio_test_large(folio)))
|
if (likely(!folio_test_large(folio)))
|
||||||
return atomic_read(&folio->_mapcount) + 1;
|
return atomic_read(&folio->_mapcount) + 1;
|
||||||
return total_compound_mapcount(&folio->page);
|
return folio_total_mapcount(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int total_mapcount(struct page *page)
|
static inline int total_mapcount(struct page *page)
|
||||||
{
|
{
|
||||||
if (likely(!PageCompound(page)))
|
if (likely(!PageCompound(page)))
|
||||||
return atomic_read(&page->_mapcount) + 1;
|
return atomic_read(&page->_mapcount) + 1;
|
||||||
return total_compound_mapcount(compound_head(page));
|
return folio_total_mapcount(page_folio(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool folio_large_is_mapped(struct folio *folio)
|
static inline bool folio_large_is_mapped(struct folio *folio)
|
||||||
|
|
|
@ -306,7 +306,7 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
|
||||||
* @_head_1: Points to the folio. Do not use.
|
* @_head_1: Points to the folio. Do not use.
|
||||||
* @_folio_dtor: Which destructor to use for this folio.
|
* @_folio_dtor: Which destructor to use for this folio.
|
||||||
* @_folio_order: Do not use directly, call folio_order().
|
* @_folio_order: Do not use directly, call folio_order().
|
||||||
* @_compound_mapcount: Do not use directly, call folio_entire_mapcount().
|
* @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
|
||||||
* @_nr_pages_mapped: Do not use directly, call folio_mapcount().
|
* @_nr_pages_mapped: Do not use directly, call folio_mapcount().
|
||||||
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
|
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
|
||||||
* @_folio_nr_pages: Do not use directly, call folio_nr_pages().
|
* @_folio_nr_pages: Do not use directly, call folio_nr_pages().
|
||||||
|
@ -360,7 +360,7 @@ struct folio {
|
||||||
unsigned long _head_1;
|
unsigned long _head_1;
|
||||||
unsigned char _folio_dtor;
|
unsigned char _folio_dtor;
|
||||||
unsigned char _folio_order;
|
unsigned char _folio_order;
|
||||||
atomic_t _compound_mapcount;
|
atomic_t _entire_mapcount;
|
||||||
atomic_t _nr_pages_mapped;
|
atomic_t _nr_pages_mapped;
|
||||||
atomic_t _pincount;
|
atomic_t _pincount;
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
|
@ -403,7 +403,7 @@ FOLIO_MATCH(flags, _flags_1);
|
||||||
FOLIO_MATCH(compound_head, _head_1);
|
FOLIO_MATCH(compound_head, _head_1);
|
||||||
FOLIO_MATCH(compound_dtor, _folio_dtor);
|
FOLIO_MATCH(compound_dtor, _folio_dtor);
|
||||||
FOLIO_MATCH(compound_order, _folio_order);
|
FOLIO_MATCH(compound_order, _folio_order);
|
||||||
FOLIO_MATCH(compound_mapcount, _compound_mapcount);
|
FOLIO_MATCH(compound_mapcount, _entire_mapcount);
|
||||||
FOLIO_MATCH(subpages_mapcount, _nr_pages_mapped);
|
FOLIO_MATCH(subpages_mapcount, _nr_pages_mapped);
|
||||||
FOLIO_MATCH(compound_pincount, _pincount);
|
FOLIO_MATCH(compound_pincount, _pincount);
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
|
|
21
mm/rmap.c
21
mm/rmap.c
|
@ -1078,27 +1078,26 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
|
||||||
return page_vma_mkclean_one(&pvmw);
|
return page_vma_mkclean_one(&pvmw);
|
||||||
}
|
}
|
||||||
|
|
||||||
int total_compound_mapcount(struct page *head)
|
int folio_total_mapcount(struct folio *folio)
|
||||||
{
|
{
|
||||||
struct folio *folio = (struct folio *)head;
|
int mapcount = folio_entire_mapcount(folio);
|
||||||
int mapcount = head_compound_mapcount(head);
|
int nr_pages;
|
||||||
int nr_subpages;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* In the common case, avoid the loop when no subpages mapped by PTE */
|
/* In the common case, avoid the loop when no pages mapped by PTE */
|
||||||
if (folio_nr_pages_mapped(folio) == 0)
|
if (folio_nr_pages_mapped(folio) == 0)
|
||||||
return mapcount;
|
return mapcount;
|
||||||
/*
|
/*
|
||||||
* Add all the PTE mappings of those subpages mapped by PTE.
|
* Add all the PTE mappings of those pages mapped by PTE.
|
||||||
* Limit the loop, knowing that only subpages_mapcount are mapped?
|
* Limit the loop to folio_nr_pages_mapped()?
|
||||||
* Perhaps: given all the raciness, that may be a good or a bad idea.
|
* Perhaps: given all the raciness, that may be a good or a bad idea.
|
||||||
*/
|
*/
|
||||||
nr_subpages = thp_nr_pages(head);
|
nr_pages = folio_nr_pages(folio);
|
||||||
for (i = 0; i < nr_subpages; i++)
|
for (i = 0; i < nr_pages; i++)
|
||||||
mapcount += atomic_read(&head[i]._mapcount);
|
mapcount += atomic_read(&folio_page(folio, i)->_mapcount);
|
||||||
|
|
||||||
/* But each of those _mapcounts was based on -1 */
|
/* But each of those _mapcounts was based on -1 */
|
||||||
mapcount += nr_subpages;
|
mapcount += nr_pages;
|
||||||
return mapcount;
|
return mapcount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue