mm/hugetlb.c: undo change to page mapcount in fault handler
Page mapcount should be updated only if we are sure that the page ends up in the page table otherwise we would leak if we couldn't COW due to reservations or if idx is out of bounds. Signed-off-by: Hillf Danton <dhillf@gmail.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6568d4a9c9
commit
409eb8c261
|
@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
{
|
{
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
int ret = VM_FAULT_SIGBUS;
|
int ret = VM_FAULT_SIGBUS;
|
||||||
|
int anon_rmap = 0;
|
||||||
pgoff_t idx;
|
pgoff_t idx;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -2562,14 +2563,13 @@ retry:
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
inode->i_blocks += blocks_per_huge_page(h);
|
inode->i_blocks += blocks_per_huge_page(h);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
page_dup_rmap(page);
|
|
||||||
} else {
|
} else {
|
||||||
lock_page(page);
|
lock_page(page);
|
||||||
if (unlikely(anon_vma_prepare(vma))) {
|
if (unlikely(anon_vma_prepare(vma))) {
|
||||||
ret = VM_FAULT_OOM;
|
ret = VM_FAULT_OOM;
|
||||||
goto backout_unlocked;
|
goto backout_unlocked;
|
||||||
}
|
}
|
||||||
hugepage_add_new_anon_rmap(page, vma, address);
|
anon_rmap = 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -2582,7 +2582,6 @@ retry:
|
||||||
VM_FAULT_SET_HINDEX(h - hstates);
|
VM_FAULT_SET_HINDEX(h - hstates);
|
||||||
goto backout_unlocked;
|
goto backout_unlocked;
|
||||||
}
|
}
|
||||||
page_dup_rmap(page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2606,6 +2605,10 @@ retry:
|
||||||
if (!huge_pte_none(huge_ptep_get(ptep)))
|
if (!huge_pte_none(huge_ptep_get(ptep)))
|
||||||
goto backout;
|
goto backout;
|
||||||
|
|
||||||
|
if (anon_rmap)
|
||||||
|
hugepage_add_new_anon_rmap(page, vma, address);
|
||||||
|
else
|
||||||
|
page_dup_rmap(page);
|
||||||
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
|
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
|
||||||
&& (vma->vm_flags & VM_SHARED)));
|
&& (vma->vm_flags & VM_SHARED)));
|
||||||
set_huge_pte_at(mm, address, ptep, new_pte);
|
set_huge_pte_at(mm, address, ptep, new_pte);
|
||||||
|
|
Loading…
Reference in New Issue