mm: memory hotplug: Check if pages are correctly reserved on a per-section basis
(Resending as I am not seeing it in -next so maybe it got lost) mm: memory hotplug: Check if pages are correctly reserved on a per-section basis It is expected that memory being brought online is PageReserved similar to what happens when the page allocator is being brought up. Memory is onlined in "memory blocks" which consist of one or more sections. Unfortunately, the code that verifies PageReserved is currently assuming that the memmap backing all these pages is virtually contiguous which is only the case when CONFIG_SPARSEMEM_VMEMMAP is set. As a result, memory hot-add is failing on those configurations with the message; kernel: section number XXX page number 256 not reserved, was it already online? This patch updates the PageReserved check to lookup struct page once per section to guarantee the correct struct page is being checked. [Check pages within sections properly: rientjes@google.com] [original patch by: nfont@linux.vnet.ibm.com] Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Tested-by: Nathan Fontenot <nfont@linux.vnet.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
de0ed36a3e
commit
2bbcb87883
|
@ -223,6 +223,42 @@ int memory_isolate_notify(unsigned long val, void *v)
|
|||
return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
|
||||
}
|
||||
|
||||
/*
|
||||
* The probe routines leave the pages reserved, just as the bootmem code does.
|
||||
* Make sure they're still that way.
|
||||
*/
|
||||
static bool pages_correctly_reserved(unsigned long start_pfn,
|
||||
unsigned long nr_pages)
|
||||
{
|
||||
int i, j;
|
||||
struct page *page;
|
||||
unsigned long pfn = start_pfn;
|
||||
|
||||
/*
|
||||
* memmap between sections is not contiguous except with
|
||||
* SPARSEMEM_VMEMMAP. We lookup the page once per section
|
||||
* and assume memmap is contiguous within each section
|
||||
*/
|
||||
for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
|
||||
if (WARN_ON_ONCE(!pfn_valid(pfn)))
|
||||
return false;
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
for (j = 0; j < PAGES_PER_SECTION; j++) {
|
||||
if (PageReserved(page + j))
|
||||
continue;
|
||||
|
||||
printk(KERN_WARNING "section number %ld page number %d "
|
||||
"not reserved, was it already online?\n",
|
||||
pfn_to_section_nr(pfn), j);
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
|
||||
* OK to have direct references to sparsemem variables in here.
|
||||
|
@ -230,7 +266,6 @@ int memory_isolate_notify(unsigned long val, void *v)
|
|||
static int
|
||||
memory_block_action(unsigned long phys_index, unsigned long action)
|
||||
{
|
||||
int i;
|
||||
unsigned long start_pfn, start_paddr;
|
||||
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
|
||||
struct page *first_page;
|
||||
|
@ -238,26 +273,13 @@ memory_block_action(unsigned long phys_index, unsigned long action)
|
|||
|
||||
first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
|
||||
|
||||
/*
|
||||
* The probe routines leave the pages reserved, just
|
||||
* as the bootmem code does. Make sure they're still
|
||||
* that way.
|
||||
*/
|
||||
if (action == MEM_ONLINE) {
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (PageReserved(first_page+i))
|
||||
continue;
|
||||
|
||||
printk(KERN_WARNING "section number %ld page number %d "
|
||||
"not reserved, was it already online?\n",
|
||||
phys_index, i);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
switch (action) {
|
||||
case MEM_ONLINE:
|
||||
start_pfn = page_to_pfn(first_page);
|
||||
|
||||
if (!pages_correctly_reserved(start_pfn, nr_pages))
|
||||
return -EBUSY;
|
||||
|
||||
ret = online_pages(start_pfn, nr_pages);
|
||||
break;
|
||||
case MEM_OFFLINE:
|
||||
|
|
Loading…
Reference in New Issue