[PATCH] Pass vma argument to copy_user_highpage().

To allow a more effective copy_user_highpage() on certain architectures,
a vma argument is added to the function and cow_user_page() allowing
the implementation of these functions to check for the VM_EXEC bit.

The main part of this patch was originally written by Ralf Baechle;
Atushi Nemoto did the the debugging.

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Atsushi Nemoto 2006-12-12 17:14:55 +00:00 committed by Linus Torvalds
parent 77fff4ae2b
commit 9de455b207
3 changed files with 10 additions and 9 deletions

View File

@ -98,7 +98,8 @@ static inline void memclear_highpage_flush(struct page *page, unsigned int offse
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) static inline void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{ {
char *vfrom, *vto; char *vfrom, *vto;

View File

@ -44,14 +44,14 @@ static void clear_huge_page(struct page *page, unsigned long addr)
} }
static void copy_huge_page(struct page *dst, struct page *src, static void copy_huge_page(struct page *dst, struct page *src,
unsigned long addr) unsigned long addr, struct vm_area_struct *vma)
{ {
int i; int i;
might_sleep(); might_sleep();
for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
cond_resched(); cond_resched();
copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE); copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
} }
} }
@ -442,7 +442,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
copy_huge_page(new_page, old_page, address); copy_huge_page(new_page, old_page, address, vma);
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & HPAGE_MASK); ptep = huge_pte_offset(mm, address & HPAGE_MASK);

View File

@ -1441,7 +1441,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte; return pte;
} }
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{ {
/* /*
* If the source page was a PFN mapping, we don't have * If the source page was a PFN mapping, we don't have
@ -1466,7 +1466,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
return; return;
} }
copy_user_highpage(dst, src, va); copy_user_highpage(dst, src, va, vma);
} }
/* /*
@ -1577,7 +1577,7 @@ gotten:
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!new_page) if (!new_page)
goto oom; goto oom;
cow_user_page(new_page, old_page, address); cow_user_page(new_page, old_page, address, vma);
} }
/* /*
@ -2200,7 +2200,7 @@ retry:
page = alloc_page_vma(GFP_HIGHUSER, vma, address); page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!page) if (!page)
goto oom; goto oom;
copy_user_highpage(page, new_page, address); copy_user_highpage(page, new_page, address, vma);
page_cache_release(new_page); page_cache_release(new_page);
new_page = page; new_page = page;
anon = 1; anon = 1;