[PATCH] hugepage: Make {alloc,free}_huge_page() local
Originally, mm/hugetlb.c just handled the hugepage physical allocation path and its {alloc,free}_huge_page() functions were used from the arch specific hugepage code. These days those functions are only used with mm/hugetlb.c itself. Therefore, this patch makes them static and removes their prototypes from hugetlb.h. This requires a small rearrangement of code in mm/hugetlb.c to avoid a forward declaration. This patch causes no regressions on the libhugetlbfs testsuite (ppc64, POWER5). Signed-off-by: David Gibson <dwg@au1.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
b45b5bd65f
commit
27a85ef1b8
|
@ -21,8 +21,6 @@ int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
|
|||
int hugetlb_report_meminfo(char *);
|
||||
int hugetlb_report_node_meminfo(int, char *);
|
||||
unsigned long hugetlb_total_pages(void);
|
||||
struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
|
||||
void free_huge_page(struct page *);
|
||||
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, int write_access);
|
||||
|
||||
|
@ -97,8 +95,6 @@ static inline unsigned long hugetlb_total_pages(void)
|
|||
#define is_hugepage_only_range(mm, addr, len) 0
|
||||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
|
||||
do { } while (0)
|
||||
#define alloc_huge_page(vma, addr) ({ NULL; })
|
||||
#define free_huge_page(p) ({ (void)(p); BUG(); })
|
||||
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
|
||||
|
||||
#define hugetlb_change_protection(vma, address, end, newprot)
|
||||
|
|
25
mm/hugetlb.c
25
mm/hugetlb.c
|
@ -88,6 +88,17 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
|
|||
return page;
|
||||
}
|
||||
|
||||
static void free_huge_page(struct page *page)
|
||||
{
|
||||
BUG_ON(page_count(page));
|
||||
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
|
||||
spin_lock(&hugetlb_lock);
|
||||
enqueue_huge_page(page);
|
||||
spin_unlock(&hugetlb_lock);
|
||||
}
|
||||
|
||||
static int alloc_fresh_huge_page(void)
|
||||
{
|
||||
static int nid = 0;
|
||||
|
@ -107,18 +118,8 @@ static int alloc_fresh_huge_page(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void free_huge_page(struct page *page)
|
||||
{
|
||||
BUG_ON(page_count(page));
|
||||
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
|
||||
spin_lock(&hugetlb_lock);
|
||||
enqueue_huge_page(page);
|
||||
spin_unlock(&hugetlb_lock);
|
||||
}
|
||||
|
||||
struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
|
||||
static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct inode *inode = vma->vm_file->f_dentry->d_inode;
|
||||
struct page *page;
|
||||
|
|
Loading…
Reference in New Issue