hugetlbfs: handle pages higher order than MAX_ORDER
When working with hugepages, hugetlbfs assumes that those hugepages are smaller than MAX_ORDER. Specifically it assumes that the mem_map is contigious and uses that to optimise access to the elements of the mem_map that represent the hugepage. Gigantic pages (such as 16GB pages on powerpc) by definition are of greater order than MAX_ORDER (larger than MAX_ORDER_NR_PAGES in size). This means that we can no longer make use of the buddy alloctor guarentees for the contiguity of the mem_map, which ensures that the mem_map is at least contigious for maximmally aligned areas of MAX_ORDER_NR_PAGES pages. This patch adds new mem_map accessors and iterator helpers which handle any discontiguity at MAX_ORDER_NR_PAGES boundaries. It then uses these to implement gigantic page versions of copy_huge_page and clear_huge_page, and to allow follow_hugetlb_page handle gigantic pages. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Cc: Jon Tollefson <kniht@linux.vnet.ibm.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: <stable@kernel.org> [2.6.27.x] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
22bece00dc
commit
69d177c2fc
37
mm/hugetlb.c
37
mm/hugetlb.c
|
@ -354,11 +354,26 @@ static int vma_has_reserves(struct vm_area_struct *vma)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void clear_gigantic_page(struct page *page,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct page *p = page;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
|
||||||
|
cond_resched();
|
||||||
|
clear_user_highpage(p, addr + i * PAGE_SIZE);
|
||||||
|
}
|
||||||
|
}
|
||||||
static void clear_huge_page(struct page *page,
|
static void clear_huge_page(struct page *page,
|
||||||
unsigned long addr, unsigned long sz)
|
unsigned long addr, unsigned long sz)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (unlikely(sz > MAX_ORDER_NR_PAGES))
|
||||||
|
return clear_gigantic_page(page, addr, sz);
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
for (i = 0; i < sz/PAGE_SIZE; i++) {
|
for (i = 0; i < sz/PAGE_SIZE; i++) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
@ -366,12 +381,32 @@ static void clear_huge_page(struct page *page,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void copy_gigantic_page(struct page *dst, struct page *src,
|
||||||
|
unsigned long addr, struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct hstate *h = hstate_vma(vma);
|
||||||
|
struct page *dst_base = dst;
|
||||||
|
struct page *src_base = src;
|
||||||
|
might_sleep();
|
||||||
|
for (i = 0; i < pages_per_huge_page(h); ) {
|
||||||
|
cond_resched();
|
||||||
|
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
|
||||||
|
|
||||||
|
i++;
|
||||||
|
dst = mem_map_next(dst, dst_base, i);
|
||||||
|
src = mem_map_next(src, src_base, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
static void copy_huge_page(struct page *dst, struct page *src,
|
static void copy_huge_page(struct page *dst, struct page *src,
|
||||||
unsigned long addr, struct vm_area_struct *vma)
|
unsigned long addr, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
|
|
||||||
|
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
|
||||||
|
return copy_gigantic_page(dst, src, addr, vma);
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
for (i = 0; i < pages_per_huge_page(h); i++) {
|
for (i = 0; i < pages_per_huge_page(h); i++) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
@ -2130,7 +2165,7 @@ same_page:
|
||||||
if (zeropage_ok)
|
if (zeropage_ok)
|
||||||
pages[i] = ZERO_PAGE(0);
|
pages[i] = ZERO_PAGE(0);
|
||||||
else
|
else
|
||||||
pages[i] = page + pfn_offset;
|
pages[i] = mem_map_offset(page, pfn_offset);
|
||||||
get_page(pages[i]);
|
get_page(pages[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -175,6 +175,34 @@ static inline void free_page_mlock(struct page *page) { }
|
||||||
|
|
||||||
#endif /* CONFIG_UNEVICTABLE_LRU */
|
#endif /* CONFIG_UNEVICTABLE_LRU */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the mem_map entry representing the 'offset' subpage within
|
||||||
|
* the maximally aligned gigantic page 'base'. Handle any discontiguity
|
||||||
|
* in the mem_map at MAX_ORDER_NR_PAGES boundaries.
|
||||||
|
*/
|
||||||
|
static inline struct page *mem_map_offset(struct page *base, int offset)
|
||||||
|
{
|
||||||
|
if (unlikely(offset >= MAX_ORDER_NR_PAGES))
|
||||||
|
return pfn_to_page(page_to_pfn(base) + offset);
|
||||||
|
return base + offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Iterator over all subpages withing the maximally aligned gigantic
|
||||||
|
* page 'base'. Handle any discontiguity in the mem_map.
|
||||||
|
*/
|
||||||
|
static inline struct page *mem_map_next(struct page *iter,
|
||||||
|
struct page *base, int offset)
|
||||||
|
{
|
||||||
|
if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
|
||||||
|
unsigned long pfn = page_to_pfn(base) + offset;
|
||||||
|
if (!pfn_valid(pfn))
|
||||||
|
return NULL;
|
||||||
|
return pfn_to_page(pfn);
|
||||||
|
}
|
||||||
|
return iter + 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
|
* FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
|
||||||
* so all functions starting at paging_init should be marked __init
|
* so all functions starting at paging_init should be marked __init
|
||||||
|
|
Loading…
Reference in New Issue