Fix ZERO_PAGE breakage with vmware
Commit 89f5b7da2a
("Reinstate ZERO_PAGE
optimization in 'get_user_pages()' and fix XIP") broke vmware, as
reported by Jeff Chua:
"This broke vmware 6.0.4.
Jun 22 14:53:03.845: vmx| NOT_IMPLEMENTED
/build/mts/release/bora-93057/bora/vmx/main/vmmonPosix.c:774"
and the reason seems to be that there's an old bug in how we handle do
FOLL_ANON on VM_SHARED areas in get_user_pages(), but since it only
triggered if the whole page table was missing, nobody had apparently hit
it before.
The recent changes to 'follow_page()' made the FOLL_ANON logic trigger
not just for whole missing page tables, but for individual pages as
well, and exposed this problem.
This fixes it by making the test for when FOLL_ANON is used more
careful, and also makes the code easier to read and understand by moving
the logic to a separate inline function.
Reported-and-tested-by: Jeff Chua <jeff.chua.linux@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
96a331b1d6
commit
672ca28e30
23
mm/memory.c
23
mm/memory.c
|
@ -1045,6 +1045,26 @@ no_page_table:
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Can we do the FOLL_ANON optimization? */
|
||||||
|
static inline int use_zero_page(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We don't want to optimize FOLL_ANON for make_pages_present()
|
||||||
|
* when it tries to page in a VM_LOCKED region. As to VM_SHARED,
|
||||||
|
* we want to get the page from the page tables to make sure
|
||||||
|
* that we serialize and update with any other user of that
|
||||||
|
* mapping.
|
||||||
|
*/
|
||||||
|
if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
|
||||||
|
return 0;
|
||||||
|
/*
|
||||||
|
* And if we have a fault or a nopfn routine, it's not an
|
||||||
|
* anonymous region.
|
||||||
|
*/
|
||||||
|
return !vma->vm_ops ||
|
||||||
|
(!vma->vm_ops->fault && !vma->vm_ops->nopfn);
|
||||||
|
}
|
||||||
|
|
||||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, int len, int write, int force,
|
unsigned long start, int len, int write, int force,
|
||||||
struct page **pages, struct vm_area_struct **vmas)
|
struct page **pages, struct vm_area_struct **vmas)
|
||||||
|
@ -1119,8 +1139,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
foll_flags = FOLL_TOUCH;
|
foll_flags = FOLL_TOUCH;
|
||||||
if (pages)
|
if (pages)
|
||||||
foll_flags |= FOLL_GET;
|
foll_flags |= FOLL_GET;
|
||||||
if (!write && !(vma->vm_flags & VM_LOCKED) &&
|
if (!write && use_zero_page(vma))
|
||||||
(!vma->vm_ops || !vma->vm_ops->fault))
|
|
||||||
foll_flags |= FOLL_ANON;
|
foll_flags |= FOLL_ANON;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
|
Loading…
Reference in New Issue