2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* IA-64 Huge TLB Page Support for Kernel.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
|
|
|
|
* Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
|
|
|
|
*
|
|
|
|
* Sep, 2003: add numa support
|
|
|
|
* Feb, 2004: dynamic hugetlb page size via boot parameter
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/smp_lock.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
#include <asm/mman.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
|
|
|
unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
|
|
|
|
|
2005-06-22 08:14:44 +08:00
|
|
|
pte_t *
|
2005-04-17 06:20:36 +08:00
|
|
|
huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
unsigned long taddr = htlbpage_to_page(addr);
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte = NULL;
|
|
|
|
|
|
|
|
pgd = pgd_offset(mm, taddr);
|
|
|
|
pud = pud_alloc(mm, pgd, taddr);
|
|
|
|
if (pud) {
|
|
|
|
pmd = pmd_alloc(mm, pud, taddr);
|
|
|
|
if (pmd)
|
|
|
|
pte = pte_alloc_map(mm, pmd, taddr);
|
|
|
|
}
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
2005-06-22 08:14:44 +08:00
|
|
|
pte_t *
|
2005-04-17 06:20:36 +08:00
|
|
|
huge_pte_offset (struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
unsigned long taddr = htlbpage_to_page(addr);
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte = NULL;
|
|
|
|
|
|
|
|
pgd = pgd_offset(mm, taddr);
|
|
|
|
if (pgd_present(*pgd)) {
|
|
|
|
pud = pud_offset(pgd, taddr);
|
|
|
|
if (pud_present(*pud)) {
|
|
|
|
pmd = pmd_offset(pud, taddr);
|
|
|
|
if (pmd_present(*pmd))
|
|
|
|
pte = pte_offset_map(pmd, taddr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
|
|
|
|
|
|
|
|
/*
|
2006-03-22 16:09:01 +08:00
|
|
|
* Don't actually need to do any preparation, but need to make sure
|
|
|
|
* the address is in the right region.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:)
If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example,
because the given file offset is not hugepage aligned - then do_mmap_pgoff
will go to the unmap_and_free_vma backout path.
But at this stage the vma hasn't been marked as hugepage, and the backout path
will call unmap_region() on it. That will eventually call down to the
non-hugepage version of unmap_page_range(). On ppc64, at least, that will
cause serious problems if there are any existing hugepage pagetable entries in
the vicinity - for example if there are any other hugepage mappings under the
same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud
entries. I suspect this will also cause bad problems on ia64, though I don't
have a machine to test it on.
(Hugh:)
prepare_hugepage_range() should check file offset alignment when it checks
virtual address and length, to stop MAP_FIXED with a bad huge offset from
unmapping before it fails further down. PowerPC should apply the same
prepare_hugepage_range alignment checks as ia64 and all the others do.
Then none of the alignment checks in hugetlbfs_file_mmap are required (nor
is the check for too small a mapping); but even so, move up setting of
VM_HUGETLB and add a comment to warn of what David Gibson discovered - if
hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region
when unwinding from error will go the non-huge way, which may cause bad
behaviour on architectures (powerpc and ia64) which segregate their huge
mappings into a separate region of the address space.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-11-14 18:03:32 +08:00
|
|
|
int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:)
If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example,
because the given file offset is not hugepage aligned - then do_mmap_pgoff
will go to the unmap_and_free_vma backout path.
But at this stage the vma hasn't been marked as hugepage, and the backout path
will call unmap_region() on it. That will eventually call down to the
non-hugepage version of unmap_page_range(). On ppc64, at least, that will
cause serious problems if there are any existing hugepage pagetable entries in
the vicinity - for example if there are any other hugepage mappings under the
same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud
entries. I suspect this will also cause bad problems on ia64, though I don't
have a machine to test it on.
(Hugh:)
prepare_hugepage_range() should check file offset alignment when it checks
virtual address and length, to stop MAP_FIXED with a bad huge offset from
unmapping before it fails further down. PowerPC should apply the same
prepare_hugepage_range alignment checks as ia64 and all the others do.
Then none of the alignment checks in hugetlbfs_file_mmap are required (nor
is the check for too small a mapping); but even so, move up setting of
VM_HUGETLB and add a comment to warn of what David Gibson discovered - if
hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region
when unwinding from error will go the non-huge way, which may cause bad
behaviour on architectures (powerpc and ia64) which segregate their huge
mappings into a separate region of the address space.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-11-14 18:03:32 +08:00
|
|
|
if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
|
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (len & ~HPAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
if (addr & ~HPAGE_MASK)
|
|
|
|
return -EINVAL;
|
2005-08-17 10:54:00 +08:00
|
|
|
if (REGION_NUMBER(addr) != RGN_HPAGE)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
pte_t *ptep;
|
|
|
|
|
2005-08-17 10:54:00 +08:00
|
|
|
if (REGION_NUMBER(addr) != RGN_HPAGE)
|
2005-04-17 06:20:36 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
ptep = huge_pte_offset(mm, addr);
|
|
|
|
if (!ptep || pte_none(*ptep))
|
|
|
|
return NULL;
|
|
|
|
page = pte_page(*ptep);
|
|
|
|
page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
struct page *
|
|
|
|
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-04-20 04:29:16 +08:00
|
|
|
void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long floor, unsigned long ceiling)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-04-20 04:29:16 +08:00
|
|
|
/*
|
2006-03-23 02:49:00 +08:00
|
|
|
* This is called to free hugetlb page tables.
|
2005-04-20 04:29:16 +08:00
|
|
|
*
|
|
|
|
* The offset of these addresses from the base of the hugetlb
|
|
|
|
* region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
|
|
|
|
* the standard free_pgd_range will free the right page tables.
|
|
|
|
*
|
|
|
|
* If floor and ceiling are also in the hugetlb region, they
|
|
|
|
* must likewise be scaled down; but if outside, left unchanged.
|
|
|
|
*/
|
|
|
|
|
|
|
|
addr = htlbpage_to_page(addr);
|
|
|
|
end = htlbpage_to_page(end);
|
2006-03-23 02:49:00 +08:00
|
|
|
if (REGION_NUMBER(floor) == RGN_HPAGE)
|
2005-04-20 04:29:16 +08:00
|
|
|
floor = htlbpage_to_page(floor);
|
2006-03-23 02:49:00 +08:00
|
|
|
if (REGION_NUMBER(ceiling) == RGN_HPAGE)
|
2005-04-20 04:29:16 +08:00
|
|
|
ceiling = htlbpage_to_page(ceiling);
|
|
|
|
|
|
|
|
free_pgd_range(tlb, addr, end, floor, ceiling);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
|
|
|
unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vmm;
|
|
|
|
|
|
|
|
if (len > RGN_MAP_LIMIT)
|
|
|
|
return -ENOMEM;
|
|
|
|
if (len & ~HPAGE_MASK)
|
|
|
|
return -EINVAL;
|
2005-08-17 10:54:00 +08:00
|
|
|
/* This code assumes that RGN_HPAGE != 0. */
|
|
|
|
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
|
2005-04-17 06:20:36 +08:00
|
|
|
addr = HPAGE_REGION_BASE;
|
|
|
|
else
|
|
|
|
addr = ALIGN(addr, HPAGE_SIZE);
|
|
|
|
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
|
|
|
|
/* At this point: (!vmm || addr < vmm->vm_end). */
|
|
|
|
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
|
|
|
|
return -ENOMEM;
|
|
|
|
if (!vmm || (addr + len) <= vmm->vm_start)
|
|
|
|
return addr;
|
|
|
|
addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init hugetlb_setup_sz(char *str)
|
|
|
|
{
|
|
|
|
u64 tr_pages;
|
|
|
|
unsigned long long size;
|
|
|
|
|
|
|
|
if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
|
|
|
|
/*
|
|
|
|
* shouldn't happen, but just in case.
|
|
|
|
*/
|
|
|
|
tr_pages = 0x15557000UL;
|
|
|
|
|
|
|
|
size = memparse(str, &str);
|
|
|
|
if (*str || (size & (size-1)) || !(tr_pages & size) ||
|
|
|
|
size <= PAGE_SIZE ||
|
|
|
|
size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
|
|
|
|
printk(KERN_WARNING "Invalid huge page size specified\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
hpage_shift = __ffs(size);
|
|
|
|
/*
|
|
|
|
* boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
|
|
|
|
* override here with new page shift.
|
|
|
|
*/
|
|
|
|
ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("hugepagesz=", hugetlb_setup_sz);
|