x86/vdso: Add mremap hook to vm_special_mapping
Add possibility for 32-bit user-space applications to move the vDSO mapping. Previously, when a user-space app called mremap() for the vDSO address, in the syscall return path it would land on the previous address of the vDSOpage, resulting in segmentation violation. Now it lands fine and returns to userspace with a remapped vDSO. This will also fix the context.vdso pointer for 64-bit, which does not affect the user of vDSO after mremap() currently, but this may change in the future. As suggested by Andy, return -EINVAL for mremap() that would split the vDSO image: that operation cannot possibly result in a working system so reject it. Renamed and moved the text_mapping structure declaration inside map_vdso(), as it used only there and now it complements the vvar_mapping variable. There is still a problem for remapping the vDSO in glibc applications: the linker relocates addresses for syscalls on the vDSO page, so you need to relink with the new addresses. Without that the next syscall through glibc may fail: Program received signal SIGSEGV, Segmentation fault. #0 0xf7fd9b80 in __kernel_vsyscall () #1 0xf7ec8238 in _exit () from /usr/lib32/libc.so.6 Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: 0x7f454c46@gmail.com Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160628113539.13606-2-dsafonov@virtuozzo.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
39380b80d7
commit
b059a453b1
|
@ -12,6 +12,7 @@
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/elf.h>
|
#include <linux/elf.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/ptrace.h>
|
||||||
#include <asm/pvclock.h>
|
#include <asm/pvclock.h>
|
||||||
#include <asm/vgtod.h>
|
#include <asm/vgtod.h>
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
|
@ -97,10 +98,40 @@ static int vdso_fault(const struct vm_special_mapping *sm,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct vm_special_mapping text_mapping = {
|
static void vdso_fix_landing(const struct vdso_image *image,
|
||||||
.name = "[vdso]",
|
struct vm_area_struct *new_vma)
|
||||||
.fault = vdso_fault,
|
{
|
||||||
};
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
||||||
|
if (in_ia32_syscall() && image == &vdso_image_32) {
|
||||||
|
struct pt_regs *regs = current_pt_regs();
|
||||||
|
unsigned long vdso_land = image->sym_int80_landing_pad;
|
||||||
|
unsigned long old_land_addr = vdso_land +
|
||||||
|
(unsigned long)current->mm->context.vdso;
|
||||||
|
|
||||||
|
/* Fixing userspace landing - look at do_fast_syscall_32 */
|
||||||
|
if (regs->ip == old_land_addr)
|
||||||
|
regs->ip = new_vma->vm_start + vdso_land;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vdso_mremap(const struct vm_special_mapping *sm,
|
||||||
|
struct vm_area_struct *new_vma)
|
||||||
|
{
|
||||||
|
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
|
||||||
|
const struct vdso_image *image = current->mm->context.vdso_image;
|
||||||
|
|
||||||
|
if (image->size != new_size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
vdso_fix_landing(image, new_vma);
|
||||||
|
current->mm->context.vdso = (void __user *)new_vma->vm_start;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int vvar_fault(const struct vm_special_mapping *sm,
|
static int vvar_fault(const struct vm_special_mapping *sm,
|
||||||
struct vm_area_struct *vma, struct vm_fault *vmf)
|
struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
|
@ -151,6 +182,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
unsigned long addr, text_start;
|
unsigned long addr, text_start;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
static const struct vm_special_mapping vdso_mapping = {
|
||||||
|
.name = "[vdso]",
|
||||||
|
.fault = vdso_fault,
|
||||||
|
.mremap = vdso_mremap,
|
||||||
|
};
|
||||||
static const struct vm_special_mapping vvar_mapping = {
|
static const struct vm_special_mapping vvar_mapping = {
|
||||||
.name = "[vvar]",
|
.name = "[vvar]",
|
||||||
.fault = vvar_fault,
|
.fault = vvar_fault,
|
||||||
|
@ -185,7 +222,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
|
||||||
image->size,
|
image->size,
|
||||||
VM_READ|VM_EXEC|
|
VM_READ|VM_EXEC|
|
||||||
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
||||||
&text_mapping);
|
&vdso_mapping);
|
||||||
|
|
||||||
if (IS_ERR(vma)) {
|
if (IS_ERR(vma)) {
|
||||||
ret = PTR_ERR(vma);
|
ret = PTR_ERR(vma);
|
||||||
|
|
|
@ -594,6 +594,9 @@ struct vm_special_mapping {
|
||||||
int (*fault)(const struct vm_special_mapping *sm,
|
int (*fault)(const struct vm_special_mapping *sm,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
struct vm_fault *vmf);
|
struct vm_fault *vmf);
|
||||||
|
|
||||||
|
int (*mremap)(const struct vm_special_mapping *sm,
|
||||||
|
struct vm_area_struct *new_vma);
|
||||||
};
|
};
|
||||||
|
|
||||||
enum tlb_flush_reason {
|
enum tlb_flush_reason {
|
||||||
|
|
10
mm/mmap.c
10
mm/mmap.c
|
@ -2943,9 +2943,19 @@ static const char *special_mapping_name(struct vm_area_struct *vma)
|
||||||
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
|
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int special_mapping_mremap(struct vm_area_struct *new_vma)
|
||||||
|
{
|
||||||
|
struct vm_special_mapping *sm = new_vma->vm_private_data;
|
||||||
|
|
||||||
|
if (sm->mremap)
|
||||||
|
return sm->mremap(sm, new_vma);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct vm_operations_struct special_mapping_vmops = {
|
static const struct vm_operations_struct special_mapping_vmops = {
|
||||||
.close = special_mapping_close,
|
.close = special_mapping_close,
|
||||||
.fault = special_mapping_fault,
|
.fault = special_mapping_fault,
|
||||||
|
.mremap = special_mapping_mremap,
|
||||||
.name = special_mapping_name,
|
.name = special_mapping_name,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue