[PATCH] hugetlb: fix linked list corruption in unmap_hugepage_range()

commit fe1668ae5b causes kernel to oops with
libhugetlbfs test suite.  The problem is that hugetlb pages can be shared
by multiple mappings.  Multiple threads can fight over page->lru in the
unmap path and bad things happen.  We now serialize __unmap_hugepage_range
to void concurrent linked list manipulation.  Such serialization is also
needed for shared page table page on hugetlb area.  This patch will fixed
the bug and also serve as a prepatch for shared page table.

Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Chen, Kenneth W 2006-10-11 01:20:46 -07:00 committed by Linus Torvalds
parent 97c7801cd5
commit 502717f4e1
3 changed files with 22 additions and 3 deletions

View File

@ -293,7 +293,7 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
if (h_vm_pgoff >= h_pgoff) if (h_vm_pgoff >= h_pgoff)
v_offset = 0; v_offset = 0;
unmap_hugepage_range(vma, __unmap_hugepage_range(vma,
vma->vm_start + v_offset, vma->vm_end); vma->vm_start + v_offset, vma->vm_end);
} }
} }

View File

@ -17,6 +17,7 @@ int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int); int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
int hugetlb_prefault(struct address_space *, struct vm_area_struct *); int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
int hugetlb_report_meminfo(char *); int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *); int hugetlb_report_node_meminfo(int, char *);

View File

@ -356,8 +356,8 @@ nomem:
return -ENOMEM; return -ENOMEM;
} }
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long address; unsigned long address;
@ -398,6 +398,24 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
} }
} }
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
/*
* It is undesirable to test vma->vm_file as it should be non-null
* for valid hugetlb area. However, vm_file will be NULL in the error
* cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
* do_mmap_pgoff() nullifies vma->vm_file before calling this function
* to clean up. Since no pte has actually been setup, it is safe to
* do nothing in this case.
*/
if (vma->vm_file) {
spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
__unmap_hugepage_range(vma, start, end);
spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
}
}
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte) unsigned long address, pte_t *ptep, pte_t pte)
{ {