mm: move page zone helpers from mm.h to mmzone.h
It makes more sense to have these helpers in zone specific header file, rather than the generic mm.h Link: https://lkml.kernel.org/r/20220715150521.18165-3-alex.sierra@amd.com Signed-off-by: Alex Sierra <alex.sierra@amd.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Hildenbrand <david@redhat.com> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
6077c943be
commit
5bb88dc571
|
@ -2,7 +2,7 @@
|
|||
#ifndef _LINUX_MEMREMAP_H_
|
||||
#define _LINUX_MEMREMAP_H_
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/range.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
|
|
|
@ -1045,84 +1045,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
|
|||
* back into memory.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The zone field is never updated after free_area_init_core()
|
||||
* sets it, so none of the operations on it need to be atomic.
|
||||
*/
|
||||
|
||||
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
|
||||
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
|
||||
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
|
||||
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
||||
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
|
||||
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
|
||||
|
||||
/*
|
||||
* Define the bit shifts to access each section. For non-existent
|
||||
* sections we define the shift as 0; that plus a 0 mask ensures
|
||||
* the compiler will optimise away reference to them.
|
||||
*/
|
||||
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
|
||||
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
|
||||
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
|
||||
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
|
||||
#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
|
||||
|
||||
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
|
||||
#ifdef NODE_NOT_IN_PAGE_FLAGS
|
||||
#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
|
||||
#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
|
||||
SECTIONS_PGOFF : ZONES_PGOFF)
|
||||
#else
|
||||
#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
|
||||
#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
|
||||
NODES_PGOFF : ZONES_PGOFF)
|
||||
#endif
|
||||
|
||||
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
|
||||
|
||||
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
|
||||
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
|
||||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
|
||||
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
|
||||
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
|
||||
|
||||
static inline enum zone_type page_zonenum(const struct page *page)
|
||||
{
|
||||
ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
|
||||
static inline enum zone_type folio_zonenum(const struct folio *folio)
|
||||
{
|
||||
return page_zonenum(&folio->page);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ZONE_DEVICE
|
||||
static inline bool is_zone_device_page(const struct page *page)
|
||||
{
|
||||
return page_zonenum(page) == ZONE_DEVICE;
|
||||
}
|
||||
extern void memmap_init_zone_device(struct zone *, unsigned long,
|
||||
unsigned long, struct dev_pagemap *);
|
||||
#else
|
||||
static inline bool is_zone_device_page(const struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool folio_is_zone_device(const struct folio *folio)
|
||||
{
|
||||
return is_zone_device_page(&folio->page);
|
||||
}
|
||||
|
||||
static inline bool is_zone_movable_page(const struct page *page)
|
||||
{
|
||||
return page_zonenum(page) == ZONE_MOVABLE;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
|
||||
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
|
||||
|
||||
|
|
|
@ -730,6 +730,86 @@ static inline bool zone_is_empty(struct zone *zone)
|
|||
return zone->spanned_pages == 0;
|
||||
}
|
||||
|
||||
#ifndef BUILD_VDSO32_64
|
||||
/*
|
||||
* The zone field is never updated after free_area_init_core()
|
||||
* sets it, so none of the operations on it need to be atomic.
|
||||
*/
|
||||
|
||||
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
|
||||
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
|
||||
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
|
||||
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
||||
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
|
||||
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
|
||||
|
||||
/*
|
||||
* Define the bit shifts to access each section. For non-existent
|
||||
* sections we define the shift as 0; that plus a 0 mask ensures
|
||||
* the compiler will optimise away reference to them.
|
||||
*/
|
||||
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
|
||||
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
|
||||
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
|
||||
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
|
||||
#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
|
||||
|
||||
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
|
||||
#ifdef NODE_NOT_IN_PAGE_FLAGS
|
||||
#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
|
||||
#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
|
||||
SECTIONS_PGOFF : ZONES_PGOFF)
|
||||
#else
|
||||
#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
|
||||
#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
|
||||
NODES_PGOFF : ZONES_PGOFF)
|
||||
#endif
|
||||
|
||||
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
|
||||
|
||||
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
|
||||
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
|
||||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
|
||||
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
|
||||
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
|
||||
|
||||
static inline enum zone_type page_zonenum(const struct page *page)
|
||||
{
|
||||
ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
|
||||
static inline enum zone_type folio_zonenum(const struct folio *folio)
|
||||
{
|
||||
return page_zonenum(&folio->page);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ZONE_DEVICE
|
||||
static inline bool is_zone_device_page(const struct page *page)
|
||||
{
|
||||
return page_zonenum(page) == ZONE_DEVICE;
|
||||
}
|
||||
extern void memmap_init_zone_device(struct zone *, unsigned long,
|
||||
unsigned long, struct dev_pagemap *);
|
||||
#else
|
||||
static inline bool is_zone_device_page(const struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool folio_is_zone_device(const struct folio *folio)
|
||||
{
|
||||
return is_zone_device_page(&folio->page);
|
||||
}
|
||||
|
||||
static inline bool is_zone_movable_page(const struct page *page)
|
||||
{
|
||||
return page_zonenum(page) == ZONE_MOVABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
|
||||
* intersection with the given zone
|
||||
|
|
Loading…
Reference in New Issue