Don't lock guardpage if the stack is growing up
Linux kernel excludes guard page when performing mlock on a VMA with down-growing stack. However, some architectures have up-growing stack and locking the guard page should be excluded in this case too. This patch fixes lvm2 on PA-RISC (and possibly other architectures with up-growing stack). lvm2 calculates number of used pages when locking and when unlocking and reports an internal error if the numbers mismatch. [ Patch changed fairly extensively to also fix /proc/<pid>/maps for the grows-up case, and to move things around a bit to clean it all up and share the infrstructure with the /proc bits. Tested on ia64 that has both grow-up and grow-down segments - Linus ] Signed-off-by: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> Tested-by: Tony Luck <tony.luck@gmail.com> Cc: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
26822eebb2
commit
a09a79f668
|
@ -214,7 +214,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
|
|||
int flags = vma->vm_flags;
|
||||
unsigned long ino = 0;
|
||||
unsigned long long pgoff = 0;
|
||||
unsigned long start;
|
||||
unsigned long start, end;
|
||||
dev_t dev = 0;
|
||||
int len;
|
||||
|
||||
|
@ -227,13 +227,15 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
|
|||
|
||||
/* We don't show the stack guard page in /proc/maps */
|
||||
start = vma->vm_start;
|
||||
if (vma->vm_flags & VM_GROWSDOWN)
|
||||
if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
|
||||
start += PAGE_SIZE;
|
||||
if (stack_guard_page_start(vma, start))
|
||||
start += PAGE_SIZE;
|
||||
end = vma->vm_end;
|
||||
if (stack_guard_page_end(vma, end))
|
||||
end -= PAGE_SIZE;
|
||||
|
||||
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
|
||||
start,
|
||||
vma->vm_end,
|
||||
end,
|
||||
flags & VM_READ ? 'r' : '-',
|
||||
flags & VM_WRITE ? 'w' : '-',
|
||||
flags & VM_EXEC ? 'x' : '-',
|
||||
|
|
|
@ -1011,11 +1011,33 @@ int set_page_dirty_lock(struct page *page);
|
|||
int clear_page_dirty_for_io(struct page *page);
|
||||
|
||||
/* Is the vma a continuation of the stack vma above it? */
|
||||
static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
|
||||
static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||
}
|
||||
|
||||
static inline int stack_guard_page_start(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSDOWN) &&
|
||||
(vma->vm_start == addr) &&
|
||||
!vma_growsdown(vma->vm_prev, addr);
|
||||
}
|
||||
|
||||
/* Is the vma a continuation of the stack vma below it? */
|
||||
static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
|
||||
}
|
||||
|
||||
static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSUP) &&
|
||||
(vma->vm_end == addr) &&
|
||||
!vma_growsup(vma->vm_next, addr);
|
||||
}
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
unsigned long new_addr, unsigned long len);
|
||||
|
|
16
mm/memory.c
16
mm/memory.c
|
@ -1412,9 +1412,8 @@ no_page_table:
|
|||
|
||||
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSDOWN) &&
|
||||
(vma->vm_start == addr) &&
|
||||
!vma_stack_continue(vma->vm_prev, addr);
|
||||
return stack_guard_page_start(vma, addr) ||
|
||||
stack_guard_page_end(vma, addr+PAGE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1551,12 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* For mlock, just skip the stack guard page.
|
||||
*/
|
||||
if ((gup_flags & FOLL_MLOCK) && stack_guard_page(vma, start))
|
||||
goto next_page;
|
||||
|
||||
do {
|
||||
struct page *page;
|
||||
unsigned int foll_flags = gup_flags;
|
||||
|
@ -1573,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
int ret;
|
||||
unsigned int fault_flags = 0;
|
||||
|
||||
/* For mlock, just skip the stack guard page. */
|
||||
if (foll_flags & FOLL_MLOCK) {
|
||||
if (stack_guard_page(vma, start))
|
||||
goto next_page;
|
||||
}
|
||||
if (foll_flags & FOLL_WRITE)
|
||||
fault_flags |= FAULT_FLAG_WRITE;
|
||||
if (nonblocking)
|
||||
|
|
Loading…
Reference in New Issue