x86: vdso: Expose sigreturn address on vdso to the kernel
Syscall user redirection requires the signal trampoline code to not be captured, in order to support returning with a locked selector while avoiding recursion back into the signal handler. For ia-32, which has the trampoline in the vDSO, expose the entry points to the kernel, such that it can avoid dispatching syscalls from that region to userspace. Suggested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Kees Cook <keescook@chromium.org> Reviewed-by: Andy Lutomirski <luto@kernel.org> Acked-by: Andy Lutomirski <luto@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20201127193238.821364-2-krisman@collabora.com
This commit is contained in:
parent
01fe185d95
commit
c5c878125a
|
@ -101,6 +101,8 @@ struct vdso_sym required_syms[] = {
|
|||
{"__kernel_sigreturn", true},
|
||||
{"__kernel_rt_sigreturn", true},
|
||||
{"int80_landing_pad", true},
|
||||
{"vdso32_rt_sigreturn_landing_pad", true},
|
||||
{"vdso32_sigreturn_landing_pad", true},
|
||||
};
|
||||
|
||||
__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
|
||||
|
|
|
@ -18,6 +18,7 @@ __kernel_sigreturn:
|
|||
movl $__NR_sigreturn, %eax
|
||||
SYSCALL_ENTER_KERNEL
|
||||
.LEND_sigreturn:
|
||||
SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL)
|
||||
nop
|
||||
.size __kernel_sigreturn,.-.LSTART_sigreturn
|
||||
|
||||
|
@ -29,6 +30,7 @@ __kernel_rt_sigreturn:
|
|||
movl $__NR_rt_sigreturn, %eax
|
||||
SYSCALL_ENTER_KERNEL
|
||||
.LEND_rt_sigreturn:
|
||||
SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL)
|
||||
nop
|
||||
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
|
||||
.previous
|
||||
|
|
|
@ -436,6 +436,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|||
}
|
||||
#endif
|
||||
|
||||
bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
||||
const struct vdso_image *image = current->mm->context.vdso_image;
|
||||
unsigned long vdso = (unsigned long) current->mm->context.vdso;
|
||||
|
||||
if (in_ia32_syscall() && image == &vdso_image_32) {
|
||||
if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
|
||||
regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static __init int vdso_setup(char *s)
|
||||
{
|
||||
|
|
|
@ -388,6 +388,8 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
compat_arch_setup_additional_pages(bprm, interpreter, \
|
||||
(ex->e_machine == EM_X86_64))
|
||||
|
||||
extern bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs);
|
||||
|
||||
/* Do not change the values. See get_align_mask() */
|
||||
enum align_flags {
|
||||
ALIGN_VA_32 = BIT(0),
|
||||
|
|
|
@ -27,6 +27,8 @@ struct vdso_image {
|
|||
long sym___kernel_rt_sigreturn;
|
||||
long sym___kernel_vsyscall;
|
||||
long sym_int80_landing_pad;
|
||||
long sym_vdso32_sigreturn_landing_pad;
|
||||
long sym_vdso32_rt_sigreturn_landing_pad;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
Loading…
Reference in New Issue