Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "9 patches. Subsystems affected by this patch series: mm (thp, memcg, gup, migration, memory-hotplug), lib, and x86" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: don't rely on system state to detect hot-plug operations mm: replace memmap_context by meminit_context arch/x86/lib/usercopy_64.c: fix __copy_user_flushcache() cache writeback lib/memregion.c: include memregion.h lib/string.c: implement stpcpy mm/migrate: correct thp migration stats mm/gup: fix gup_fast with dynamic page table folding mm: memcontrol: fix missing suffix of workingset_restore mm, THP, swap: fix allocating cluster for swapfile by mistake
This commit is contained in:
commit
8fb1e91033
|
@ -1324,15 +1324,26 @@ PAGE_SIZE multiple when read back.
|
||||||
pgmajfault
|
pgmajfault
|
||||||
Number of major page faults incurred
|
Number of major page faults incurred
|
||||||
|
|
||||||
workingset_refault
|
workingset_refault_anon
|
||||||
Number of refaults of previously evicted pages
|
Number of refaults of previously evicted anonymous pages.
|
||||||
|
|
||||||
workingset_activate
|
workingset_refault_file
|
||||||
Number of refaulted pages that were immediately activated
|
Number of refaults of previously evicted file pages.
|
||||||
|
|
||||||
workingset_restore
|
workingset_activate_anon
|
||||||
Number of restored pages which have been detected as an active
|
Number of refaulted anonymous pages that were immediately
|
||||||
workingset before they got reclaimed.
|
activated.
|
||||||
|
|
||||||
|
workingset_activate_file
|
||||||
|
Number of refaulted file pages that were immediately activated.
|
||||||
|
|
||||||
|
workingset_restore_anon
|
||||||
|
Number of restored anonymous pages which have been detected as
|
||||||
|
an active workingset before they got reclaimed.
|
||||||
|
|
||||||
|
workingset_restore_file
|
||||||
|
Number of restored file pages which have been detected as an
|
||||||
|
active workingset before they got reclaimed.
|
||||||
|
|
||||||
workingset_nodereclaim
|
workingset_nodereclaim
|
||||||
Number of times a shadow node has been reclaimed
|
Number of times a shadow node has been reclaimed
|
||||||
|
|
|
@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
|
||||||
if (map_start < map_end)
|
if (map_start < map_end)
|
||||||
memmap_init_zone((unsigned long)(map_end - map_start),
|
memmap_init_zone((unsigned long)(map_end - map_start),
|
||||||
args->nid, args->zone, page_to_pfn(map_start),
|
args->nid, args->zone, page_to_pfn(map_start),
|
||||||
MEMMAP_EARLY, NULL);
|
MEMINIT_EARLY, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
|
||||||
unsigned long start_pfn)
|
unsigned long start_pfn)
|
||||||
{
|
{
|
||||||
if (!vmem_map) {
|
if (!vmem_map) {
|
||||||
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
|
memmap_init_zone(size, nid, zone, start_pfn,
|
||||||
NULL);
|
MEMINIT_EARLY, NULL);
|
||||||
} else {
|
} else {
|
||||||
struct page *start;
|
struct page *start;
|
||||||
struct memmap_init_callback_data args;
|
struct memmap_init_callback_data args;
|
||||||
|
|
|
@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
|
||||||
|
|
||||||
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
|
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
|
||||||
|
|
||||||
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
|
||||||
{
|
{
|
||||||
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||||
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
|
return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
|
||||||
return (p4d_t *) pgd;
|
return (p4d_t *) pgdp;
|
||||||
|
}
|
||||||
|
#define p4d_offset_lockless p4d_offset_lockless
|
||||||
|
|
||||||
|
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
|
||||||
|
{
|
||||||
|
return p4d_offset_lockless(pgdp, *pgdp, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
|
static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
|
||||||
{
|
{
|
||||||
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||||
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
|
return (pud_t *) p4d_deref(p4d) + pud_index(address);
|
||||||
return (pud_t *) p4d;
|
return (pud_t *) p4dp;
|
||||||
|
}
|
||||||
|
#define pud_offset_lockless pud_offset_lockless
|
||||||
|
|
||||||
|
static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
|
||||||
|
{
|
||||||
|
return pud_offset_lockless(p4dp, *p4dp, address);
|
||||||
}
|
}
|
||||||
#define pud_offset pud_offset
|
#define pud_offset pud_offset
|
||||||
|
|
||||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
|
||||||
{
|
{
|
||||||
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||||
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
|
return (pmd_t *) pud_deref(pud) + pmd_index(address);
|
||||||
return (pmd_t *) pud;
|
return (pmd_t *) pudp;
|
||||||
|
}
|
||||||
|
#define pmd_offset_lockless pmd_offset_lockless
|
||||||
|
|
||||||
|
static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
|
||||||
|
{
|
||||||
|
return pmd_offset_lockless(pudp, *pudp, address);
|
||||||
}
|
}
|
||||||
#define pmd_offset pmd_offset
|
#define pmd_offset pmd_offset
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
||||||
*/
|
*/
|
||||||
if (size < 8) {
|
if (size < 8) {
|
||||||
if (!IS_ALIGNED(dest, 4) || size != 4)
|
if (!IS_ALIGNED(dest, 4) || size != 4)
|
||||||
clean_cache_range(dst, 1);
|
clean_cache_range(dst, size);
|
||||||
} else {
|
} else {
|
||||||
if (!IS_ALIGNED(dest, 8)) {
|
if (!IS_ALIGNED(dest, 8)) {
|
||||||
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||||
|
|
|
@ -761,41 +761,10 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
|
||||||
return pfn_to_nid(pfn);
|
return pfn_to_nid(pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* register memory section under specified node if it spans that node */
|
static int do_register_memory_block_under_node(int nid,
|
||||||
static int register_mem_sect_under_node(struct memory_block *mem_blk,
|
struct memory_block *mem_blk)
|
||||||
void *arg)
|
|
||||||
{
|
{
|
||||||
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
|
int ret;
|
||||||
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
|
|
||||||
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
|
|
||||||
int ret, nid = *(int *)arg;
|
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
|
|
||||||
int page_nid;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* memory block could have several absent sections from start.
|
|
||||||
* skip pfn range from absent section
|
|
||||||
*/
|
|
||||||
if (!pfn_in_present_section(pfn)) {
|
|
||||||
pfn = round_down(pfn + PAGES_PER_SECTION,
|
|
||||||
PAGES_PER_SECTION) - 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to check if page belongs to nid only for the boot
|
|
||||||
* case, during hotplug we know that all pages in the memory
|
|
||||||
* block belong to the same node.
|
|
||||||
*/
|
|
||||||
if (system_state == SYSTEM_BOOTING) {
|
|
||||||
page_nid = get_nid_for_pfn(pfn);
|
|
||||||
if (page_nid < 0)
|
|
||||||
continue;
|
|
||||||
if (page_nid != nid)
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this memory block spans multiple nodes, we only indicate
|
* If this memory block spans multiple nodes, we only indicate
|
||||||
|
@ -813,10 +782,58 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
|
||||||
&node_devices[nid]->dev.kobj,
|
&node_devices[nid]->dev.kobj,
|
||||||
kobject_name(&node_devices[nid]->dev.kobj));
|
kobject_name(&node_devices[nid]->dev.kobj));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* register memory section under specified node if it spans that node */
|
||||||
|
static int register_mem_block_under_node_early(struct memory_block *mem_blk,
|
||||||
|
void *arg)
|
||||||
|
{
|
||||||
|
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
|
||||||
|
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
|
||||||
|
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
|
||||||
|
int nid = *(int *)arg;
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
|
||||||
|
int page_nid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* memory block could have several absent sections from start.
|
||||||
|
* skip pfn range from absent section
|
||||||
|
*/
|
||||||
|
if (!pfn_in_present_section(pfn)) {
|
||||||
|
pfn = round_down(pfn + PAGES_PER_SECTION,
|
||||||
|
PAGES_PER_SECTION) - 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to check if page belongs to nid only at the boot
|
||||||
|
* case because node's ranges can be interleaved.
|
||||||
|
*/
|
||||||
|
page_nid = get_nid_for_pfn(pfn);
|
||||||
|
if (page_nid < 0)
|
||||||
|
continue;
|
||||||
|
if (page_nid != nid)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
return do_register_memory_block_under_node(nid, mem_blk);
|
||||||
|
}
|
||||||
/* mem section does not span the specified node */
|
/* mem section does not span the specified node */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* During hotplug we know that all pages in the memory block belong to the same
|
||||||
|
* node.
|
||||||
|
*/
|
||||||
|
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
|
||||||
|
void *arg)
|
||||||
|
{
|
||||||
|
int nid = *(int *)arg;
|
||||||
|
|
||||||
|
return do_register_memory_block_under_node(nid, mem_blk);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unregister a memory block device under the node it spans. Memory blocks
|
* Unregister a memory block device under the node it spans. Memory blocks
|
||||||
* with multiple nodes cannot be offlined and therefore also never be removed.
|
* with multiple nodes cannot be offlined and therefore also never be removed.
|
||||||
|
@ -832,11 +849,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
|
||||||
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
|
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
|
||||||
}
|
}
|
||||||
|
|
||||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
|
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
|
||||||
|
enum meminit_context context)
|
||||||
{
|
{
|
||||||
|
walk_memory_blocks_func_t func;
|
||||||
|
|
||||||
|
if (context == MEMINIT_HOTPLUG)
|
||||||
|
func = register_mem_block_under_node_hotplug;
|
||||||
|
else
|
||||||
|
func = register_mem_block_under_node_early;
|
||||||
|
|
||||||
return walk_memory_blocks(PFN_PHYS(start_pfn),
|
return walk_memory_blocks(PFN_PHYS(start_pfn),
|
||||||
PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
|
PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
|
||||||
register_mem_sect_under_node);
|
func);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLBFS
|
#ifdef CONFIG_HUGETLBFS
|
||||||
|
|
|
@ -2416,7 +2416,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||||
|
|
||||||
extern void set_dma_reserve(unsigned long new_dma_reserve);
|
extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||||
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
|
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
|
||||||
enum memmap_context, struct vmem_altmap *);
|
enum meminit_context, struct vmem_altmap *);
|
||||||
extern void setup_per_zone_wmarks(void);
|
extern void setup_per_zone_wmarks(void);
|
||||||
extern int __meminit init_per_zone_wmark_min(void);
|
extern int __meminit init_per_zone_wmark_min(void);
|
||||||
extern void mem_init(void);
|
extern void mem_init(void);
|
||||||
|
|
|
@ -824,10 +824,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
|
||||||
unsigned int alloc_flags);
|
unsigned int alloc_flags);
|
||||||
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
|
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
|
||||||
unsigned long mark, int highest_zoneidx);
|
unsigned long mark, int highest_zoneidx);
|
||||||
enum memmap_context {
|
/*
|
||||||
MEMMAP_EARLY,
|
* Memory initialization context, use to differentiate memory added by
|
||||||
MEMMAP_HOTPLUG,
|
* the platform statically or via memory hotplug interface.
|
||||||
|
*/
|
||||||
|
enum meminit_context {
|
||||||
|
MEMINIT_EARLY,
|
||||||
|
MEMINIT_HOTPLUG,
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
|
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
|
||||||
unsigned long size);
|
unsigned long size);
|
||||||
|
|
||||||
|
|
|
@ -99,11 +99,13 @@ extern struct node *node_devices[];
|
||||||
typedef void (*node_registration_func_t)(struct node *);
|
typedef void (*node_registration_func_t)(struct node *);
|
||||||
|
|
||||||
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
|
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
|
||||||
extern int link_mem_sections(int nid, unsigned long start_pfn,
|
int link_mem_sections(int nid, unsigned long start_pfn,
|
||||||
unsigned long end_pfn);
|
unsigned long end_pfn,
|
||||||
|
enum meminit_context context);
|
||||||
#else
|
#else
|
||||||
static inline int link_mem_sections(int nid, unsigned long start_pfn,
|
static inline int link_mem_sections(int nid, unsigned long start_pfn,
|
||||||
unsigned long end_pfn)
|
unsigned long end_pfn,
|
||||||
|
enum meminit_context context)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -128,7 +130,8 @@ static inline int register_one_node(int nid)
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
/* link memory sections under this node */
|
/* link memory sections under this node */
|
||||||
error = link_mem_sections(nid, start_pfn, end_pfn);
|
error = link_mem_sections(nid, start_pfn, end_pfn,
|
||||||
|
MEMINIT_EARLY);
|
||||||
}
|
}
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
|
|
|
@ -1427,6 +1427,16 @@ typedef unsigned int pgtbl_mod_mask;
|
||||||
#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
|
#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef p4d_offset_lockless
|
||||||
|
#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
|
||||||
|
#endif
|
||||||
|
#ifndef pud_offset_lockless
|
||||||
|
#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
|
||||||
|
#endif
|
||||||
|
#ifndef pmd_offset_lockless
|
||||||
|
#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* p?d_leaf() - true if this entry is a final mapping to a physical address.
|
* p?d_leaf() - true if this entry is a final mapping to a physical address.
|
||||||
* This differs from p?d_huge() by the fact that they are always available (if
|
* This differs from p?d_huge() by the fact that they are always available (if
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
/* identifiers for device / performance-differentiated memory regions */
|
/* identifiers for device / performance-differentiated memory regions */
|
||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/memregion.h>
|
||||||
|
|
||||||
static DEFINE_IDA(memregion_ids);
|
static DEFINE_IDA(memregion_ids);
|
||||||
|
|
||||||
|
|
24
lib/string.c
24
lib/string.c
|
@ -272,6 +272,30 @@ ssize_t strscpy_pad(char *dest, const char *src, size_t count)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(strscpy_pad);
|
EXPORT_SYMBOL(strscpy_pad);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stpcpy - copy a string from src to dest returning a pointer to the new end
|
||||||
|
* of dest, including src's %NUL-terminator. May overrun dest.
|
||||||
|
* @dest: pointer to end of string being copied into. Must be large enough
|
||||||
|
* to receive copy.
|
||||||
|
* @src: pointer to the beginning of string being copied from. Must not overlap
|
||||||
|
* dest.
|
||||||
|
*
|
||||||
|
* stpcpy differs from strcpy in a key way: the return value is a pointer
|
||||||
|
* to the new %NUL-terminating character in @dest. (For strcpy, the return
|
||||||
|
* value is a pointer to the start of @dest). This interface is considered
|
||||||
|
* unsafe as it doesn't perform bounds checking of the inputs. As such it's
|
||||||
|
* not recommended for usage. Instead, its definition is provided in case
|
||||||
|
* the compiler lowers other libcalls to stpcpy.
|
||||||
|
*/
|
||||||
|
char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
|
||||||
|
char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
|
||||||
|
{
|
||||||
|
while ((*dest++ = *src++) != '\0')
|
||||||
|
/* nothing */;
|
||||||
|
return --dest;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(stpcpy);
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_STRCAT
|
#ifndef __HAVE_ARCH_STRCAT
|
||||||
/**
|
/**
|
||||||
* strcat - Append one %NUL-terminated string to another
|
* strcat - Append one %NUL-terminated string to another
|
||||||
|
|
18
mm/gup.c
18
mm/gup.c
|
@ -2485,13 +2485,13 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
|
||||||
unsigned int flags, struct page **pages, int *nr)
|
unsigned int flags, struct page **pages, int *nr)
|
||||||
{
|
{
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
pmd_t *pmdp;
|
pmd_t *pmdp;
|
||||||
|
|
||||||
pmdp = pmd_offset(&pud, addr);
|
pmdp = pmd_offset_lockless(pudp, pud, addr);
|
||||||
do {
|
do {
|
||||||
pmd_t pmd = READ_ONCE(*pmdp);
|
pmd_t pmd = READ_ONCE(*pmdp);
|
||||||
|
|
||||||
|
@ -2528,13 +2528,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
|
static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
|
||||||
unsigned int flags, struct page **pages, int *nr)
|
unsigned int flags, struct page **pages, int *nr)
|
||||||
{
|
{
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
pud_t *pudp;
|
pud_t *pudp;
|
||||||
|
|
||||||
pudp = pud_offset(&p4d, addr);
|
pudp = pud_offset_lockless(p4dp, p4d, addr);
|
||||||
do {
|
do {
|
||||||
pud_t pud = READ_ONCE(*pudp);
|
pud_t pud = READ_ONCE(*pudp);
|
||||||
|
|
||||||
|
@ -2549,20 +2549,20 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
|
||||||
if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
|
if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
|
||||||
PUD_SHIFT, next, flags, pages, nr))
|
PUD_SHIFT, next, flags, pages, nr))
|
||||||
return 0;
|
return 0;
|
||||||
} else if (!gup_pmd_range(pud, addr, next, flags, pages, nr))
|
} else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
|
||||||
return 0;
|
return 0;
|
||||||
} while (pudp++, addr = next, addr != end);
|
} while (pudp++, addr = next, addr != end);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
|
||||||
unsigned int flags, struct page **pages, int *nr)
|
unsigned int flags, struct page **pages, int *nr)
|
||||||
{
|
{
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
p4d_t *p4dp;
|
p4d_t *p4dp;
|
||||||
|
|
||||||
p4dp = p4d_offset(&pgd, addr);
|
p4dp = p4d_offset_lockless(pgdp, pgd, addr);
|
||||||
do {
|
do {
|
||||||
p4d_t p4d = READ_ONCE(*p4dp);
|
p4d_t p4d = READ_ONCE(*p4dp);
|
||||||
|
|
||||||
|
@ -2574,7 +2574,7 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
||||||
if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
|
if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
|
||||||
P4D_SHIFT, next, flags, pages, nr))
|
P4D_SHIFT, next, flags, pages, nr))
|
||||||
return 0;
|
return 0;
|
||||||
} else if (!gup_pud_range(p4d, addr, next, flags, pages, nr))
|
} else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
|
||||||
return 0;
|
return 0;
|
||||||
} while (p4dp++, addr = next, addr != end);
|
} while (p4dp++, addr = next, addr != end);
|
||||||
|
|
||||||
|
@ -2602,7 +2602,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
|
||||||
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
|
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
|
||||||
PGDIR_SHIFT, next, flags, pages, nr))
|
PGDIR_SHIFT, next, flags, pages, nr))
|
||||||
return;
|
return;
|
||||||
} else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr))
|
} else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
|
||||||
return;
|
return;
|
||||||
} while (pgdp++, addr = next, addr != end);
|
} while (pgdp++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1538,9 +1538,9 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
|
||||||
memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON));
|
memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON));
|
||||||
seq_buf_printf(&s, "workingset_activate_file %lu\n",
|
seq_buf_printf(&s, "workingset_activate_file %lu\n",
|
||||||
memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE));
|
memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE));
|
||||||
seq_buf_printf(&s, "workingset_restore %lu\n",
|
seq_buf_printf(&s, "workingset_restore_anon %lu\n",
|
||||||
memcg_page_state(memcg, WORKINGSET_RESTORE_ANON));
|
memcg_page_state(memcg, WORKINGSET_RESTORE_ANON));
|
||||||
seq_buf_printf(&s, "workingset_restore %lu\n",
|
seq_buf_printf(&s, "workingset_restore_file %lu\n",
|
||||||
memcg_page_state(memcg, WORKINGSET_RESTORE_FILE));
|
memcg_page_state(memcg, WORKINGSET_RESTORE_FILE));
|
||||||
seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
|
seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
|
||||||
memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
|
memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
|
||||||
|
|
|
@ -729,7 +729,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
|
||||||
* are reserved so nobody should be touching them so we should be safe
|
* are reserved so nobody should be touching them so we should be safe
|
||||||
*/
|
*/
|
||||||
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
|
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
|
||||||
MEMMAP_HOTPLUG, altmap);
|
MEMINIT_HOTPLUG, altmap);
|
||||||
|
|
||||||
set_zone_contiguous(zone);
|
set_zone_contiguous(zone);
|
||||||
}
|
}
|
||||||
|
@ -1080,7 +1080,8 @@ int __ref add_memory_resource(int nid, struct resource *res)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* link memory sections under this node.*/
|
/* link memory sections under this node.*/
|
||||||
ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1));
|
ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
|
||||||
|
MEMINIT_HOTPLUG);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
|
||||||
/* create new memmap entry */
|
/* create new memmap entry */
|
||||||
|
|
|
@ -1446,7 +1446,7 @@ retry:
|
||||||
* Capture required information that might get lost
|
* Capture required information that might get lost
|
||||||
* during migration.
|
* during migration.
|
||||||
*/
|
*/
|
||||||
is_thp = PageTransHuge(page);
|
is_thp = PageTransHuge(page) && !PageHuge(page);
|
||||||
nr_subpages = thp_nr_pages(page);
|
nr_subpages = thp_nr_pages(page);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
|
@ -1472,7 +1472,7 @@ retry:
|
||||||
* we encounter them after the rest of the list
|
* we encounter them after the rest of the list
|
||||||
* is processed.
|
* is processed.
|
||||||
*/
|
*/
|
||||||
if (PageTransHuge(page) && !PageHuge(page)) {
|
if (is_thp) {
|
||||||
lock_page(page);
|
lock_page(page);
|
||||||
rc = split_huge_page_to_list(page, from);
|
rc = split_huge_page_to_list(page, from);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
@ -1481,8 +1481,7 @@ retry:
|
||||||
nr_thp_split++;
|
nr_thp_split++;
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (is_thp) {
|
|
||||||
nr_thp_failed++;
|
nr_thp_failed++;
|
||||||
nr_failed += nr_subpages;
|
nr_failed += nr_subpages;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -5975,7 +5975,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
|
||||||
* done. Non-atomic initialization, single-pass.
|
* done. Non-atomic initialization, single-pass.
|
||||||
*/
|
*/
|
||||||
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
||||||
unsigned long start_pfn, enum memmap_context context,
|
unsigned long start_pfn, enum meminit_context context,
|
||||||
struct vmem_altmap *altmap)
|
struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
unsigned long pfn, end_pfn = start_pfn + size;
|
unsigned long pfn, end_pfn = start_pfn + size;
|
||||||
|
@ -6007,7 +6007,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
||||||
* There can be holes in boot-time mem_map[]s handed to this
|
* There can be holes in boot-time mem_map[]s handed to this
|
||||||
* function. They do not exist on hotplugged memory.
|
* function. They do not exist on hotplugged memory.
|
||||||
*/
|
*/
|
||||||
if (context == MEMMAP_EARLY) {
|
if (context == MEMINIT_EARLY) {
|
||||||
if (overlap_memmap_init(zone, &pfn))
|
if (overlap_memmap_init(zone, &pfn))
|
||||||
continue;
|
continue;
|
||||||
if (defer_init(nid, pfn, end_pfn))
|
if (defer_init(nid, pfn, end_pfn))
|
||||||
|
@ -6016,7 +6016,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
||||||
|
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
__init_single_page(page, pfn, zone, nid);
|
__init_single_page(page, pfn, zone, nid);
|
||||||
if (context == MEMMAP_HOTPLUG)
|
if (context == MEMINIT_HOTPLUG)
|
||||||
__SetPageReserved(page);
|
__SetPageReserved(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -6099,7 +6099,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
|
||||||
* check here not to call set_pageblock_migratetype() against
|
* check here not to call set_pageblock_migratetype() against
|
||||||
* pfn out of zone.
|
* pfn out of zone.
|
||||||
*
|
*
|
||||||
* Please note that MEMMAP_HOTPLUG path doesn't clear memmap
|
* Please note that MEMINIT_HOTPLUG path doesn't clear memmap
|
||||||
* because this is done early in section_activate()
|
* because this is done early in section_activate()
|
||||||
*/
|
*/
|
||||||
if (!(pfn & (pageblock_nr_pages - 1))) {
|
if (!(pfn & (pageblock_nr_pages - 1))) {
|
||||||
|
@ -6137,7 +6137,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
|
||||||
if (end_pfn > start_pfn) {
|
if (end_pfn > start_pfn) {
|
||||||
size = end_pfn - start_pfn;
|
size = end_pfn - start_pfn;
|
||||||
memmap_init_zone(size, nid, zone, start_pfn,
|
memmap_init_zone(size, nid, zone, start_pfn,
|
||||||
MEMMAP_EARLY, NULL);
|
MEMINIT_EARLY, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1078,7 +1078,7 @@ start_over:
|
||||||
goto nextsi;
|
goto nextsi;
|
||||||
}
|
}
|
||||||
if (size == SWAPFILE_CLUSTER) {
|
if (size == SWAPFILE_CLUSTER) {
|
||||||
if (!(si->flags & SWP_FS))
|
if (si->flags & SWP_BLKDEV)
|
||||||
n_ret = swap_alloc_cluster(si, swp_entries);
|
n_ret = swap_alloc_cluster(si, swp_entries);
|
||||||
} else
|
} else
|
||||||
n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
|
n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
|
||||||
|
|
Loading…
Reference in New Issue