x86, mm: Add alloc_low_pages(num)

32bit kmap mapping needs pages to be used for low to high.
At this point those pages are still from pgt_buf_* from BRK, so it is
ok now.
But we want to move early_ioremap_page_table_range_init() out of
init_memory_mapping() and only call it one time later, that will
make page_table_range_init/page_table_kmap_check/alloc_low_page to
use memblock to get page.

memblock allocation for pages are from high to low.
So will get panic from page_table_kmap_check() that has BUG_ON to do
ordering checking.

This patch add alloc_low_pages to make it possible to allocate serveral
pages at first, and hand out pages one by one from low to high.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-28-git-send-email-yinghai@kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Yinghai Lu 2012-11-16 19:39:04 -08:00 committed by H. Peter Anvin
parent 6f80b68e9e
commit 22c8ca2ac2
2 changed files with 26 additions and 13 deletions

View File

@ -25,36 +25,45 @@ unsigned long __meminitdata pgt_buf_top;
static unsigned long min_pfn_mapped;
__ref void *alloc_low_page(void)
__ref void *alloc_low_pages(unsigned int num)
{
unsigned long pfn;
void *adr;
int i;
#ifdef CONFIG_X86_64
if (after_bootmem) {
adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
unsigned int order;
return adr;
order = get_order((unsigned long)num << PAGE_SHIFT);
return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
__GFP_ZERO, order);
}
#endif
if ((pgt_buf_end + 1) >= pgt_buf_top) {
if ((pgt_buf_end + num) >= pgt_buf_top) {
unsigned long ret;
if (min_pfn_mapped >= max_pfn_mapped)
panic("alloc_low_page: ran out of memory");
ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
max_pfn_mapped << PAGE_SHIFT,
PAGE_SIZE, PAGE_SIZE);
PAGE_SIZE * num , PAGE_SIZE);
if (!ret)
panic("alloc_low_page: can not alloc memory");
memblock_reserve(ret, PAGE_SIZE);
memblock_reserve(ret, PAGE_SIZE * num);
pfn = ret >> PAGE_SHIFT;
} else
pfn = pgt_buf_end++;
} else {
pfn = pgt_buf_end;
pgt_buf_end += num;
}
adr = __va(pfn * PAGE_SIZE);
clear_page(adr);
return adr;
for (i = 0; i < num; i++) {
void *adr;
adr = __va((pfn + i) << PAGE_SHIFT);
clear_page(adr);
}
return __va(pfn << PAGE_SHIFT);
}
/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */

View File

@ -1,6 +1,10 @@
#ifndef __X86_MM_INTERNAL_H
#define __X86_MM_INTERNAL_H
void *alloc_low_page(void);
void *alloc_low_pages(unsigned int num);
static inline void *alloc_low_page(void)
{
return alloc_low_pages(1);
}
#endif /* __X86_MM_INTERNAL_H */