2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2015-06-05 19:02:28 +08:00
|
|
|
* Compatibility mode system call entry point for x86-64.
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
|
2015-06-05 19:02:28 +08:00
|
|
|
*/
|
2015-06-04 00:29:26 +08:00
|
|
|
#include "calling.h"
|
2005-09-10 03:28:48 +08:00
|
|
|
#include <asm/asm-offsets.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/current.h>
|
|
|
|
#include <asm/errno.h>
|
2015-06-05 19:02:28 +08:00
|
|
|
#include <asm/ia32_unistd.h>
|
|
|
|
#include <asm/thread_info.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/segment.h>
|
2006-07-03 15:24:45 +08:00
|
|
|
#include <asm/irqflags.h>
|
2012-04-21 03:19:50 +08:00
|
|
|
#include <asm/asm.h>
|
2012-09-22 03:43:12 +08:00
|
|
|
#include <asm/smap.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/linkage.h>
|
2012-01-04 03:23:06 +08:00
|
|
|
#include <linux/err.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-24 16:13:31 +08:00
|
|
|
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
|
|
|
#include <linux/elf-em.h>
|
|
|
|
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
|
2015-06-05 19:02:28 +08:00
|
|
|
#define __AUDIT_ARCH_LE 0x40000000
|
2008-06-24 16:13:31 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_AUDITSYSCALL
|
2015-07-08 01:55:28 +08:00
|
|
|
# define sysexit_audit ia32_ret_from_sys_call_irqs_off
|
|
|
|
# define sysretl_audit ia32_ret_from_sys_call_irqs_off
|
2008-06-24 16:13:31 +08:00
|
|
|
#endif
|
|
|
|
|
2011-03-08 02:10:39 +08:00
|
|
|
.section .entry.text, "ax"
|
|
|
|
|
2008-06-25 12:19:28 +08:00
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
|
ENTRY(native_usergs_sysret32)
|
|
|
|
swapgs
|
|
|
|
sysretl
|
|
|
|
ENDPROC(native_usergs_sysret32)
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2015-06-05 19:02:28 +08:00
|
|
|
* 32-bit SYSENTER instruction entry.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2015-02-27 06:40:32 +08:00
|
|
|
* SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
|
|
|
|
* IF and VM in rflags are cleared (IOW: interrupts are off).
|
|
|
|
* SYSENTER does not save anything on the stack,
|
|
|
|
* and does not save old rip (!!!) and rflags.
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Arguments:
|
2015-02-27 06:40:32 +08:00
|
|
|
* eax system call number
|
|
|
|
* ebx arg1
|
|
|
|
* ecx arg2
|
|
|
|
* edx arg3
|
|
|
|
* esi arg4
|
|
|
|
* edi arg5
|
|
|
|
* ebp user stack
|
|
|
|
* 0(%ebp) arg6
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* This is purely a fast path. For anything complicated we use the int 0x80
|
2015-02-27 06:40:32 +08:00
|
|
|
* path below. We set up a complete hardware stack frame to share code
|
2005-04-17 06:20:36 +08:00
|
|
|
* with the int 0x80 path.
|
2015-02-27 06:40:32 +08:00
|
|
|
*/
|
2015-06-08 14:33:56 +08:00
|
|
|
ENTRY(entry_SYSENTER_compat)
|
2015-10-06 08:47:55 +08:00
|
|
|
/* Interrupts are off on entry. */
|
2015-03-27 18:36:20 +08:00
|
|
|
SWAPGS_UNSAFE_STACK
|
2015-04-24 23:31:35 +08:00
|
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
2015-03-27 18:36:20 +08:00
|
|
|
|
2015-03-27 18:36:21 +08:00
|
|
|
/* Zero-extending 32-bit regs, do not remove */
|
|
|
|
movl %ebp, %ebp
|
|
|
|
movl %eax, %eax
|
|
|
|
|
2015-04-01 01:00:04 +08:00
|
|
|
movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
|
|
|
|
|
|
|
|
/* Construct struct pt_regs on stack */
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
pushq $__USER32_DS /* pt_regs->ss */
|
|
|
|
pushq %rbp /* pt_regs->sp */
|
2015-10-06 08:47:55 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Push flags. This is nasty. First, interrupts are currently
|
|
|
|
* off, but we need pt_regs->flags to have IF set. Second, even
|
|
|
|
* if TF was set when SYSENTER started, it's clear by now. We fix
|
|
|
|
* that later using TIF_SINGLESTEP.
|
|
|
|
*/
|
|
|
|
pushfq /* pt_regs->flags (except IF = 0) */
|
|
|
|
orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */
|
|
|
|
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
pushq $__USER32_CS /* pt_regs->cs */
|
2015-06-05 19:02:28 +08:00
|
|
|
pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
pushq %rax /* pt_regs->orig_ax */
|
|
|
|
pushq %rdi /* pt_regs->di */
|
|
|
|
pushq %rsi /* pt_regs->si */
|
|
|
|
pushq %rdx /* pt_regs->dx */
|
|
|
|
pushq %rcx /* pt_regs->cx */
|
|
|
|
pushq $-ENOSYS /* pt_regs->ax */
|
2005-04-17 06:20:36 +08:00
|
|
|
cld
|
2015-06-05 19:02:28 +08:00
|
|
|
sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
|
2015-04-01 01:00:04 +08:00
|
|
|
|
x86_64, entry: Filter RFLAGS.NT on entry from userspace
The NT flag doesn't do anything in long mode other than causing IRET
to #GP. Oddly, CPL3 code can still set NT using popf.
Entry via hardware or software interrupt clears NT automatically, so
the only relevant entries are fast syscalls.
If user code causes kernel code to run with NT set, then there's at
least some (small) chance that it could cause trouble. For example,
user code could cause a call to EFI code with NT set, and who knows
what would happen? Apparently some games on Wine sometimes do
this (!), and, if an IRET return happens, they will segfault. That
segfault cannot be handled, because signal delivery fails, too.
This patch programs the CPU to clear NT on entry via SYSCALL (both
32-bit and 64-bit, by my reading of the AMD APM), and it clears NT
in software on entry via SYSENTER.
To save a few cycles, this borrows a trick from Jan Beulich in Xen:
it checks whether NT is set before trying to clear it. As a result,
it seems to have very little effect on SYSENTER performance on my
machine.
There's another minor bug fix in here: it looks like the CFI
annotations were wrong if CONFIG_AUDITSYSCALL=n.
Testers beware: on Xen, SYSENTER with NT set turns into a GPF.
I haven't touched anything on 32-bit kernels.
The syscall mask change comes from a variant of this patch by Anish
Bhatt.
Note to stable maintainers: there is no known security issue here.
A misguided program can set NT and cause the kernel to try and fail
to deliver SIGSEGV, crashing the program. This patch fixes Far Cry
on Wine: https://bugs.winehq.org/show_bug.cgi?id=33275
Cc: <stable@vger.kernel.org>
Reported-by: Anish Bhatt <anish@chelsio.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/395749a5d39a29bd3e4b35899cf3a3c1340e5595.1412189265.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2014-10-02 02:49:04 +08:00
|
|
|
/*
|
|
|
|
* Sysenter doesn't filter flags, so we need to clear NT
|
|
|
|
* ourselves. To save a few cycles, we can check whether
|
|
|
|
* NT was set instead of doing an unconditional popfq.
|
2015-10-06 08:47:55 +08:00
|
|
|
* This needs to happen before enabling interrupts so that
|
|
|
|
* we don't get preempted with NT set.
|
x86_64, entry: Filter RFLAGS.NT on entry from userspace
The NT flag doesn't do anything in long mode other than causing IRET
to #GP. Oddly, CPL3 code can still set NT using popf.
Entry via hardware or software interrupt clears NT automatically, so
the only relevant entries are fast syscalls.
If user code causes kernel code to run with NT set, then there's at
least some (small) chance that it could cause trouble. For example,
user code could cause a call to EFI code with NT set, and who knows
what would happen? Apparently some games on Wine sometimes do
this (!), and, if an IRET return happens, they will segfault. That
segfault cannot be handled, because signal delivery fails, too.
This patch programs the CPU to clear NT on entry via SYSCALL (both
32-bit and 64-bit, by my reading of the AMD APM), and it clears NT
in software on entry via SYSENTER.
To save a few cycles, this borrows a trick from Jan Beulich in Xen:
it checks whether NT is set before trying to clear it. As a result,
it seems to have very little effect on SYSENTER performance on my
machine.
There's another minor bug fix in here: it looks like the CFI
annotations were wrong if CONFIG_AUDITSYSCALL=n.
Testers beware: on Xen, SYSENTER with NT set turns into a GPF.
I haven't touched anything on 32-bit kernels.
The syscall mask change comes from a variant of this patch by Anish
Bhatt.
Note to stable maintainers: there is no known security issue here.
A misguided program can set NT and cause the kernel to try and fail
to deliver SIGSEGV, crashing the program. This patch fixes Far Cry
on Wine: https://bugs.winehq.org/show_bug.cgi?id=33275
Cc: <stable@vger.kernel.org>
Reported-by: Anish Bhatt <anish@chelsio.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/395749a5d39a29bd3e4b35899cf3a3c1340e5595.1412189265.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2014-10-02 02:49:04 +08:00
|
|
|
*/
|
2015-06-05 19:02:28 +08:00
|
|
|
testl $X86_EFLAGS_NT, EFLAGS(%rsp)
|
|
|
|
jnz sysenter_fix_flags
|
x86_64, entry: Filter RFLAGS.NT on entry from userspace
The NT flag doesn't do anything in long mode other than causing IRET
to #GP. Oddly, CPL3 code can still set NT using popf.
Entry via hardware or software interrupt clears NT automatically, so
the only relevant entries are fast syscalls.
If user code causes kernel code to run with NT set, then there's at
least some (small) chance that it could cause trouble. For example,
user code could cause a call to EFI code with NT set, and who knows
what would happen? Apparently some games on Wine sometimes do
this (!), and, if an IRET return happens, they will segfault. That
segfault cannot be handled, because signal delivery fails, too.
This patch programs the CPU to clear NT on entry via SYSCALL (both
32-bit and 64-bit, by my reading of the AMD APM), and it clears NT
in software on entry via SYSENTER.
To save a few cycles, this borrows a trick from Jan Beulich in Xen:
it checks whether NT is set before trying to clear it. As a result,
it seems to have very little effect on SYSENTER performance on my
machine.
There's another minor bug fix in here: it looks like the CFI
annotations were wrong if CONFIG_AUDITSYSCALL=n.
Testers beware: on Xen, SYSENTER with NT set turns into a GPF.
I haven't touched anything on 32-bit kernels.
The syscall mask change comes from a variant of this patch by Anish
Bhatt.
Note to stable maintainers: there is no known security issue here.
A misguided program can set NT and cause the kernel to try and fail
to deliver SIGSEGV, crashing the program. This patch fixes Far Cry
on Wine: https://bugs.winehq.org/show_bug.cgi?id=33275
Cc: <stable@vger.kernel.org>
Reported-by: Anish Bhatt <anish@chelsio.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/395749a5d39a29bd3e4b35899cf3a3c1340e5595.1412189265.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2014-10-02 02:49:04 +08:00
|
|
|
sysenter_flags_fixed:
|
|
|
|
|
2015-10-06 08:47:55 +08:00
|
|
|
/*
|
|
|
|
* Re-enable interrupts. IRQ tracing already thinks that IRQs are
|
|
|
|
* on (since we treat user mode as having IRQs on), and the
|
|
|
|
* prologue above is too short for it to be worth adding a
|
|
|
|
* tracing round trip.
|
|
|
|
*/
|
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
|
2015-10-06 08:47:53 +08:00
|
|
|
/*
|
|
|
|
* No need to do an access_ok() check here because RBP has been
|
|
|
|
* 32-bit zero extended:
|
|
|
|
*/
|
|
|
|
ASM_STAC
|
|
|
|
1: movl (%rbp), %ebp
|
|
|
|
_ASM_EXTABLE(1b, ia32_badarg)
|
|
|
|
ASM_CLAC
|
|
|
|
|
2015-06-09 04:35:33 +08:00
|
|
|
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
|
|
|
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
2015-06-05 19:02:28 +08:00
|
|
|
jnz sysenter_tracesys
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
|
2008-07-09 17:38:07 +08:00
|
|
|
sysenter_do_call:
|
2015-06-05 19:02:28 +08:00
|
|
|
/* 32-bit syscall -> 64-bit C ABI argument conversion */
|
|
|
|
movl %edi, %r8d /* arg5 */
|
|
|
|
movl %ebp, %r9d /* arg6 */
|
|
|
|
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
|
|
|
|
movl %ebx, %edi /* arg1 */
|
|
|
|
movl %edx, %edx /* arg3 (zero extension) */
|
2008-06-24 16:13:31 +08:00
|
|
|
sysenter_dispatch:
|
2015-06-05 19:02:28 +08:00
|
|
|
cmpq $(IA32_NR_syscalls-1), %rax
|
2015-04-22 00:03:14 +08:00
|
|
|
ja 1f
|
2015-06-05 19:02:28 +08:00
|
|
|
call *ia32_sys_call_table(, %rax, 8)
|
|
|
|
movq %rax, RAX(%rsp)
|
2015-04-22 00:03:14 +08:00
|
|
|
1:
|
2008-06-25 12:19:29 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2015-03-25 02:44:42 +08:00
|
|
|
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
2008-06-24 16:13:31 +08:00
|
|
|
jnz sysexit_audit
|
|
|
|
sysexit_from_sys_call:
|
2015-04-03 08:12:12 +08:00
|
|
|
/*
|
|
|
|
* NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
|
|
|
|
* NMI between STI and SYSEXIT has poorly specified behavior,
|
|
|
|
* and and NMI followed by an IRQ with usergs is fatal. So
|
|
|
|
* we just pretend we're using SYSEXIT but we really use
|
|
|
|
* SYSRETL instead.
|
|
|
|
*
|
|
|
|
* This code path is still called 'sysexit' because it pairs
|
|
|
|
* with 'sysenter' and it uses the SYSENTER calling convention.
|
|
|
|
*/
|
2015-06-09 04:35:33 +08:00
|
|
|
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
2015-06-05 19:02:28 +08:00
|
|
|
movl RIP(%rsp), %ecx /* User %eip */
|
2015-08-14 07:19:44 +08:00
|
|
|
movq RAX(%rsp), %rax
|
2015-07-04 04:19:02 +08:00
|
|
|
movl RSI(%rsp), %esi
|
|
|
|
movl RDI(%rsp), %edi
|
2015-06-05 19:02:28 +08:00
|
|
|
xorl %edx, %edx /* Do not leak kernel information */
|
|
|
|
xorq %r8, %r8
|
|
|
|
xorq %r9, %r9
|
|
|
|
xorq %r10, %r10
|
|
|
|
movl EFLAGS(%rsp), %r11d /* User eflags */
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2015-04-03 08:12:12 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
|
|
|
|
* since it avoids a dicey window with interrupts enabled.
|
|
|
|
*/
|
2015-06-05 19:02:28 +08:00
|
|
|
movl RSP(%rsp), %esp
|
2015-04-03 08:12:12 +08:00
|
|
|
|
2015-02-27 06:40:32 +08:00
|
|
|
/*
|
2015-04-03 08:12:12 +08:00
|
|
|
* USERGS_SYSRET32 does:
|
|
|
|
* gsbase = user's gs base
|
|
|
|
* eip = ecx
|
|
|
|
* rflags = r11
|
|
|
|
* cs = __USER32_CS
|
|
|
|
* ss = __USER_DS
|
|
|
|
*
|
|
|
|
* The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
|
|
|
|
*
|
|
|
|
* pop %ebp
|
|
|
|
* pop %edx
|
|
|
|
* pop %ecx
|
|
|
|
*
|
|
|
|
* Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
|
|
|
|
* avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
|
|
|
|
* address (already known to user code), and R12-R15 are
|
|
|
|
* callee-saved and therefore don't contain any interesting
|
|
|
|
* kernel data.
|
2015-02-27 06:40:32 +08:00
|
|
|
*/
|
2015-04-03 08:12:12 +08:00
|
|
|
USERGS_SYSRET32
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-24 16:13:31 +08:00
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
|
|
.macro auditsys_entry_common
|
2015-06-10 02:54:09 +08:00
|
|
|
/*
|
|
|
|
* At this point, registers hold syscall args in the 32-bit syscall ABI:
|
|
|
|
* EAX is syscall number, the 6 args are in EBX,ECX,EDX,ESI,EDI,EBP.
|
|
|
|
*
|
|
|
|
* We want to pass them to __audit_syscall_entry(), which is a 64-bit
|
|
|
|
* C function with 5 parameters, so shuffle them to match what
|
|
|
|
* the function expects: RDI,RSI,RDX,RCX,R8.
|
|
|
|
*/
|
|
|
|
movl %esi, %r8d /* arg5 (R8 ) <= 4th syscall arg (ESI) */
|
|
|
|
xchg %ecx, %edx /* arg4 (RCX) <= 3rd syscall arg (EDX) */
|
|
|
|
/* arg3 (RDX) <= 2nd syscall arg (ECX) */
|
|
|
|
movl %ebx, %esi /* arg2 (RSI) <= 1st syscall arg (EBX) */
|
|
|
|
movl %eax, %edi /* arg1 (RDI) <= syscall number (EAX) */
|
2015-06-05 19:02:28 +08:00
|
|
|
call __audit_syscall_entry
|
2015-06-10 02:54:09 +08:00
|
|
|
|
2015-06-10 02:54:08 +08:00
|
|
|
/*
|
2015-06-10 02:54:09 +08:00
|
|
|
* We are going to jump back to the syscall dispatch code.
|
|
|
|
* Prepare syscall args as required by the 64-bit C ABI.
|
|
|
|
* Registers clobbered by __audit_syscall_entry() are
|
|
|
|
* loaded from pt_regs on stack:
|
2015-06-10 02:54:08 +08:00
|
|
|
*/
|
|
|
|
movl ORIG_RAX(%rsp), %eax /* syscall number */
|
|
|
|
movl %ebx, %edi /* arg1 */
|
|
|
|
movl RCX(%rsp), %esi /* arg2 */
|
|
|
|
movl RDX(%rsp), %edx /* arg3 */
|
|
|
|
movl RSI(%rsp), %ecx /* arg4 */
|
|
|
|
movl RDI(%rsp), %r8d /* arg5 */
|
2008-06-24 16:13:31 +08:00
|
|
|
.endm
|
|
|
|
|
2009-10-26 23:20:29 +08:00
|
|
|
.macro auditsys_exit exit
|
2008-06-24 16:13:31 +08:00
|
|
|
TRACE_IRQS_ON
|
2013-01-30 15:55:53 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2015-07-04 03:44:28 +08:00
|
|
|
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
|
|
|
jnz ia32_ret_from_sys_call
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %eax, %esi /* second arg, syscall return value */
|
|
|
|
cmpl $-MAX_ERRNO, %eax /* is it an error ? */
|
|
|
|
jbe 1f
|
|
|
|
movslq %eax, %rsi /* if error sign extend to 64 bits */
|
|
|
|
1: setbe %al /* 1 if error, 0 if not */
|
|
|
|
movzbl %al, %edi /* zero-extend that into %edi */
|
|
|
|
call __audit_syscall_exit
|
|
|
|
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
|
2013-01-30 15:55:53 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2008-06-24 16:13:31 +08:00
|
|
|
TRACE_IRQS_OFF
|
2015-06-05 19:02:28 +08:00
|
|
|
testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
|
|
|
jz \exit
|
|
|
|
xorl %eax, %eax /* Do not leak kernel information */
|
2015-06-03 03:04:01 +08:00
|
|
|
movq %rax, R11(%rsp)
|
|
|
|
movq %rax, R10(%rsp)
|
|
|
|
movq %rax, R9(%rsp)
|
|
|
|
movq %rax, R8(%rsp)
|
2015-07-04 03:44:28 +08:00
|
|
|
jmp int_ret_from_sys_call_irqs_off
|
2008-06-24 16:13:31 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
sysenter_auditsys:
|
|
|
|
auditsys_entry_common
|
2015-07-24 20:16:43 +08:00
|
|
|
movl %ebp, %r9d /* reload 6th syscall arg */
|
2015-06-05 19:02:28 +08:00
|
|
|
jmp sysenter_dispatch
|
2008-06-24 16:13:31 +08:00
|
|
|
|
|
|
|
sysexit_audit:
|
|
|
|
auditsys_exit sysexit_from_sys_call
|
|
|
|
#endif
|
|
|
|
|
x86_64, entry: Filter RFLAGS.NT on entry from userspace
The NT flag doesn't do anything in long mode other than causing IRET
to #GP. Oddly, CPL3 code can still set NT using popf.
Entry via hardware or software interrupt clears NT automatically, so
the only relevant entries are fast syscalls.
If user code causes kernel code to run with NT set, then there's at
least some (small) chance that it could cause trouble. For example,
user code could cause a call to EFI code with NT set, and who knows
what would happen? Apparently some games on Wine sometimes do
this (!), and, if an IRET return happens, they will segfault. That
segfault cannot be handled, because signal delivery fails, too.
This patch programs the CPU to clear NT on entry via SYSCALL (both
32-bit and 64-bit, by my reading of the AMD APM), and it clears NT
in software on entry via SYSENTER.
To save a few cycles, this borrows a trick from Jan Beulich in Xen:
it checks whether NT is set before trying to clear it. As a result,
it seems to have very little effect on SYSENTER performance on my
machine.
There's another minor bug fix in here: it looks like the CFI
annotations were wrong if CONFIG_AUDITSYSCALL=n.
Testers beware: on Xen, SYSENTER with NT set turns into a GPF.
I haven't touched anything on 32-bit kernels.
The syscall mask change comes from a variant of this patch by Anish
Bhatt.
Note to stable maintainers: there is no known security issue here.
A misguided program can set NT and cause the kernel to try and fail
to deliver SIGSEGV, crashing the program. This patch fixes Far Cry
on Wine: https://bugs.winehq.org/show_bug.cgi?id=33275
Cc: <stable@vger.kernel.org>
Reported-by: Anish Bhatt <anish@chelsio.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/395749a5d39a29bd3e4b35899cf3a3c1340e5595.1412189265.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2014-10-02 02:49:04 +08:00
|
|
|
sysenter_fix_flags:
|
2015-10-06 08:47:55 +08:00
|
|
|
pushq $X86_EFLAGS_FIXED
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
popfq
|
2015-06-05 19:02:28 +08:00
|
|
|
jmp sysenter_flags_fixed
|
x86_64, entry: Filter RFLAGS.NT on entry from userspace
The NT flag doesn't do anything in long mode other than causing IRET
to #GP. Oddly, CPL3 code can still set NT using popf.
Entry via hardware or software interrupt clears NT automatically, so
the only relevant entries are fast syscalls.
If user code causes kernel code to run with NT set, then there's at
least some (small) chance that it could cause trouble. For example,
user code could cause a call to EFI code with NT set, and who knows
what would happen? Apparently some games on Wine sometimes do
this (!), and, if an IRET return happens, they will segfault. That
segfault cannot be handled, because signal delivery fails, too.
This patch programs the CPU to clear NT on entry via SYSCALL (both
32-bit and 64-bit, by my reading of the AMD APM), and it clears NT
in software on entry via SYSENTER.
To save a few cycles, this borrows a trick from Jan Beulich in Xen:
it checks whether NT is set before trying to clear it. As a result,
it seems to have very little effect on SYSENTER performance on my
machine.
There's another minor bug fix in here: it looks like the CFI
annotations were wrong if CONFIG_AUDITSYSCALL=n.
Testers beware: on Xen, SYSENTER with NT set turns into a GPF.
I haven't touched anything on 32-bit kernels.
The syscall mask change comes from a variant of this patch by Anish
Bhatt.
Note to stable maintainers: there is no known security issue here.
A misguided program can set NT and cause the kernel to try and fail
to deliver SIGSEGV, crashing the program. This patch fixes Far Cry
on Wine: https://bugs.winehq.org/show_bug.cgi?id=33275
Cc: <stable@vger.kernel.org>
Reported-by: Anish Bhatt <anish@chelsio.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/395749a5d39a29bd3e4b35899cf3a3c1340e5595.1412189265.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2014-10-02 02:49:04 +08:00
|
|
|
|
2008-06-24 16:13:31 +08:00
|
|
|
sysenter_tracesys:
|
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
2015-03-25 02:44:42 +08:00
|
|
|
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
2008-06-24 16:13:31 +08:00
|
|
|
jz sysenter_auditsys
|
|
|
|
#endif
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
SAVE_EXTRA_REGS
|
2015-06-05 19:02:28 +08:00
|
|
|
xorl %eax, %eax /* Do not leak kernel information */
|
2015-06-03 03:04:01 +08:00
|
|
|
movq %rax, R11(%rsp)
|
|
|
|
movq %rax, R10(%rsp)
|
|
|
|
movq %rax, R9(%rsp)
|
|
|
|
movq %rax, R8(%rsp)
|
2015-06-05 19:02:28 +08:00
|
|
|
movq %rsp, %rdi /* &pt_regs -> arg1 */
|
2005-04-17 06:20:36 +08:00
|
|
|
call syscall_trace_enter
|
2015-06-03 03:04:02 +08:00
|
|
|
|
|
|
|
/* Reload arg registers from stack. (see sysenter_tracesys) */
|
|
|
|
movl RCX(%rsp), %ecx
|
|
|
|
movl RDX(%rsp), %edx
|
|
|
|
movl RSI(%rsp), %esi
|
|
|
|
movl RDI(%rsp), %edi
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %eax, %eax /* zero extension */
|
2015-06-03 03:04:02 +08:00
|
|
|
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
RESTORE_EXTRA_REGS
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp sysenter_do_call
|
2015-06-08 14:33:56 +08:00
|
|
|
ENDPROC(entry_SYSENTER_compat)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2015-06-05 19:02:28 +08:00
|
|
|
* 32-bit SYSCALL instruction entry.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2015-06-05 19:02:28 +08:00
|
|
|
* 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
|
2015-02-27 06:40:32 +08:00
|
|
|
* then loads new ss, cs, and rip from previously programmed MSRs.
|
|
|
|
* rflags gets masked by a value from another MSR (so CLD and CLAC
|
|
|
|
* are not needed). SYSCALL does not save anything on the stack
|
|
|
|
* and does not change rsp.
|
|
|
|
*
|
|
|
|
* Note: rflags saving+masking-with-MSR happens only in Long mode
|
2015-06-05 19:02:28 +08:00
|
|
|
* (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
|
2015-02-27 06:40:32 +08:00
|
|
|
* Don't get confused: rflags saving+masking depends on Long Mode Active bit
|
|
|
|
* (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
|
|
|
|
* or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Arguments:
|
2015-02-27 06:40:32 +08:00
|
|
|
* eax system call number
|
|
|
|
* ecx return address
|
|
|
|
* ebx arg1
|
|
|
|
* ebp arg2 (note: not saved in the stack frame, should not be touched)
|
|
|
|
* edx arg3
|
|
|
|
* esi arg4
|
|
|
|
* edi arg5
|
|
|
|
* esp user stack
|
|
|
|
* 0(%esp) arg6
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* This is purely a fast path. For anything complicated we use the int 0x80
|
2015-02-27 06:40:32 +08:00
|
|
|
* path below. We set up a complete hardware stack frame to share code
|
|
|
|
* with the int 0x80 path.
|
|
|
|
*/
|
2015-06-08 14:28:07 +08:00
|
|
|
ENTRY(entry_SYSCALL_compat)
|
2015-03-27 18:36:20 +08:00
|
|
|
/*
|
|
|
|
* Interrupts are off on entry.
|
|
|
|
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
|
|
|
* it is too small to ever cause noticeable irq latency.
|
|
|
|
*/
|
2008-06-26 22:28:51 +08:00
|
|
|
SWAPGS_UNSAFE_STACK
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %esp, %r8d
|
|
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
2008-06-25 12:19:29 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2015-03-27 18:36:20 +08:00
|
|
|
|
2015-03-27 18:36:21 +08:00
|
|
|
/* Zero-extending 32-bit regs, do not remove */
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %eax, %eax
|
2015-03-27 18:36:21 +08:00
|
|
|
|
2015-04-01 01:00:04 +08:00
|
|
|
/* Construct struct pt_regs on stack */
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
pushq $__USER32_DS /* pt_regs->ss */
|
|
|
|
pushq %r8 /* pt_regs->sp */
|
|
|
|
pushq %r11 /* pt_regs->flags */
|
|
|
|
pushq $__USER32_CS /* pt_regs->cs */
|
|
|
|
pushq %rcx /* pt_regs->ip */
|
|
|
|
pushq %rax /* pt_regs->orig_ax */
|
|
|
|
pushq %rdi /* pt_regs->di */
|
|
|
|
pushq %rsi /* pt_regs->si */
|
|
|
|
pushq %rdx /* pt_regs->dx */
|
|
|
|
pushq %rbp /* pt_regs->cx */
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %ebp, %ecx
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
pushq $-ENOSYS /* pt_regs->ax */
|
2015-06-05 19:02:28 +08:00
|
|
|
sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
|
2015-04-01 01:00:04 +08:00
|
|
|
|
2015-02-27 06:40:32 +08:00
|
|
|
/*
|
2015-06-05 19:02:28 +08:00
|
|
|
* No need to do an access_ok check here because r8 has been
|
|
|
|
* 32-bit zero extended:
|
2015-02-27 06:40:32 +08:00
|
|
|
*/
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_STAC
|
2015-07-24 20:16:43 +08:00
|
|
|
1: movl (%r8), %r9d
|
2015-06-05 19:02:28 +08:00
|
|
|
_ASM_EXTABLE(1b, ia32_badarg)
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_CLAC
|
2015-06-09 04:35:33 +08:00
|
|
|
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
|
|
|
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
|
|
|
jnz cstar_tracesys
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
|
2008-08-29 20:21:11 +08:00
|
|
|
cstar_do_call:
|
2015-06-05 19:02:28 +08:00
|
|
|
/* 32-bit syscall -> 64-bit C ABI argument conversion */
|
|
|
|
movl %edi, %r8d /* arg5 */
|
2015-07-24 20:16:43 +08:00
|
|
|
/* r9 already loaded */ /* arg6 */
|
2015-06-05 19:02:28 +08:00
|
|
|
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
|
|
|
|
movl %ebx, %edi /* arg1 */
|
|
|
|
movl %edx, %edx /* arg3 (zero extension) */
|
|
|
|
|
2008-06-24 16:13:31 +08:00
|
|
|
cstar_dispatch:
|
2015-06-05 19:02:28 +08:00
|
|
|
cmpq $(IA32_NR_syscalls-1), %rax
|
2015-04-22 00:03:14 +08:00
|
|
|
ja 1f
|
2015-06-05 19:02:28 +08:00
|
|
|
|
|
|
|
call *ia32_sys_call_table(, %rax, 8)
|
|
|
|
movq %rax, RAX(%rsp)
|
2015-04-22 00:03:14 +08:00
|
|
|
1:
|
2008-06-25 12:19:29 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2015-06-05 19:02:28 +08:00
|
|
|
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
|
|
|
jnz sysretl_audit
|
|
|
|
|
2008-06-24 16:13:31 +08:00
|
|
|
sysretl_from_sys_call:
|
2015-06-05 19:02:28 +08:00
|
|
|
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
2015-07-04 04:19:02 +08:00
|
|
|
movl RDX(%rsp), %edx
|
|
|
|
movl RSI(%rsp), %esi
|
|
|
|
movl RDI(%rsp), %edi
|
2015-06-05 19:02:28 +08:00
|
|
|
movl RIP(%rsp), %ecx
|
|
|
|
movl EFLAGS(%rsp), %r11d
|
2015-08-14 07:19:44 +08:00
|
|
|
movq RAX(%rsp), %rax
|
2015-06-05 19:02:28 +08:00
|
|
|
xorq %r10, %r10
|
|
|
|
xorq %r9, %r9
|
|
|
|
xorq %r8, %r8
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2015-06-05 19:02:28 +08:00
|
|
|
movl RSP(%rsp), %esp
|
2015-02-27 06:40:32 +08:00
|
|
|
/*
|
2015-06-05 19:02:28 +08:00
|
|
|
* 64-bit->32-bit SYSRET restores eip from ecx,
|
2015-02-27 06:40:32 +08:00
|
|
|
* eflags from r11 (but RF and VM bits are forced to 0),
|
|
|
|
* cs and ss are loaded from MSRs.
|
2015-06-05 19:02:28 +08:00
|
|
|
* (Note: 32-bit->32-bit SYSRET is different: since r11
|
2015-02-27 06:40:32 +08:00
|
|
|
* does not exist, it merely sets eflags.IF=1).
|
2015-04-27 07:47:59 +08:00
|
|
|
*
|
|
|
|
* NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
|
|
|
|
* descriptor is not reinitialized. This means that we must
|
|
|
|
* avoid SYSRET with SS == NULL, which could happen if we schedule,
|
|
|
|
* exit the kernel, and re-enter using an interrupt vector. (All
|
|
|
|
* interrupt entries on x86_64 set SS to NULL.) We prevent that
|
|
|
|
* from happening by reloading SS in __switch_to.
|
2015-02-27 06:40:32 +08:00
|
|
|
*/
|
2008-06-25 12:19:28 +08:00
|
|
|
USERGS_SYSRET32
|
2015-02-27 06:40:32 +08:00
|
|
|
|
2008-06-24 16:13:31 +08:00
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
|
|
cstar_auditsys:
|
2015-07-24 20:16:43 +08:00
|
|
|
movl %r9d, R9(%rsp) /* register to be clobbered by call */
|
2008-06-24 16:13:31 +08:00
|
|
|
auditsys_entry_common
|
2015-07-24 20:16:43 +08:00
|
|
|
movl R9(%rsp), %r9d /* reload 6th syscall arg */
|
2015-06-05 19:02:28 +08:00
|
|
|
jmp cstar_dispatch
|
2008-06-24 16:13:31 +08:00
|
|
|
|
|
|
|
sysretl_audit:
|
2009-10-26 23:20:29 +08:00
|
|
|
auditsys_exit sysretl_from_sys_call
|
2008-06-24 16:13:31 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
cstar_tracesys:
|
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
2015-06-05 19:02:28 +08:00
|
|
|
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
|
|
|
jz cstar_auditsys
|
2008-06-24 16:13:31 +08:00
|
|
|
#endif
|
2015-07-24 20:16:43 +08:00
|
|
|
xchgl %r9d, %ebp
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
SAVE_EXTRA_REGS
|
2015-06-05 19:02:28 +08:00
|
|
|
xorl %eax, %eax /* Do not leak kernel information */
|
2015-06-03 03:04:01 +08:00
|
|
|
movq %rax, R11(%rsp)
|
|
|
|
movq %rax, R10(%rsp)
|
2015-07-24 20:16:43 +08:00
|
|
|
movq %r9, R9(%rsp)
|
2015-06-03 03:04:01 +08:00
|
|
|
movq %rax, R8(%rsp)
|
2015-06-05 19:02:28 +08:00
|
|
|
movq %rsp, %rdi /* &pt_regs -> arg1 */
|
2015-06-03 20:56:09 +08:00
|
|
|
call syscall_trace_enter
|
2015-07-24 20:16:43 +08:00
|
|
|
movl R9(%rsp), %r9d
|
2015-06-03 03:04:02 +08:00
|
|
|
|
|
|
|
/* Reload arg registers from stack. (see sysenter_tracesys) */
|
|
|
|
movl RCX(%rsp), %ecx
|
|
|
|
movl RDX(%rsp), %edx
|
|
|
|
movl RSI(%rsp), %esi
|
|
|
|
movl RDI(%rsp), %edi
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %eax, %eax /* zero extension */
|
2015-06-03 03:04:02 +08:00
|
|
|
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
RESTORE_EXTRA_REGS
|
2015-07-24 20:16:43 +08:00
|
|
|
xchgl %ebp, %r9d
|
2015-06-03 20:56:09 +08:00
|
|
|
jmp cstar_do_call
|
2015-06-08 14:28:07 +08:00
|
|
|
END(entry_SYSCALL_compat)
|
2015-06-05 19:02:28 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
ia32_badarg:
|
2015-07-04 03:44:19 +08:00
|
|
|
/*
|
|
|
|
* So far, we've entered kernel mode, set AC, turned on IRQs, and
|
|
|
|
* saved C regs except r8-r11. We haven't done any of the other
|
|
|
|
* standard entry work, though. We want to bail, but we shouldn't
|
|
|
|
* treat this as a syscall entry since we don't even know what the
|
|
|
|
* args are. Instead, treat this as a non-syscall entry, finish
|
|
|
|
* the entry work, and immediately exit after setting AX = -EFAULT.
|
|
|
|
*
|
|
|
|
* We're really just being polite here. Killing the task outright
|
|
|
|
* would be a reasonable action, too. Given that the only valid
|
|
|
|
* way to have gotten here is through the vDSO, and we already know
|
|
|
|
* that the stack pointer is bad, the task isn't going to survive
|
|
|
|
* for long no matter what we do.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ASM_CLAC /* undo STAC */
|
|
|
|
movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */
|
|
|
|
|
|
|
|
/* Fill in the rest of pt_regs */
|
|
|
|
xorl %eax, %eax
|
|
|
|
movq %rax, R11(%rsp)
|
|
|
|
movq %rax, R10(%rsp)
|
|
|
|
movq %rax, R9(%rsp)
|
|
|
|
movq %rax, R8(%rsp)
|
|
|
|
SAVE_EXTRA_REGS
|
|
|
|
|
|
|
|
/* Turn IRQs back off. */
|
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
|
2015-07-04 03:44:31 +08:00
|
|
|
/* Now finish entering normal kernel mode. */
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
|
|
|
call enter_from_user_mode
|
|
|
|
#endif
|
|
|
|
|
2015-07-04 03:44:19 +08:00
|
|
|
/* And exit again. */
|
|
|
|
jmp retint_user
|
|
|
|
|
2015-07-08 01:55:28 +08:00
|
|
|
ia32_ret_from_sys_call_irqs_off:
|
|
|
|
TRACE_IRQS_ON
|
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
|
2015-06-03 01:35:10 +08:00
|
|
|
ia32_ret_from_sys_call:
|
2015-06-05 19:02:28 +08:00
|
|
|
xorl %eax, %eax /* Do not leak kernel information */
|
2015-06-03 03:04:01 +08:00
|
|
|
movq %rax, R11(%rsp)
|
|
|
|
movq %rax, R10(%rsp)
|
|
|
|
movq %rax, R9(%rsp)
|
|
|
|
movq %rax, R8(%rsp)
|
2015-06-05 19:02:28 +08:00
|
|
|
jmp int_ret_from_sys_call
|
2015-06-03 01:35:10 +08:00
|
|
|
|
2015-02-27 06:40:32 +08:00
|
|
|
/*
|
|
|
|
* Emulated IA32 system calls via int 0x80.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2015-02-27 06:40:32 +08:00
|
|
|
* Arguments:
|
|
|
|
* eax system call number
|
|
|
|
* ebx arg1
|
|
|
|
* ecx arg2
|
|
|
|
* edx arg3
|
|
|
|
* esi arg4
|
|
|
|
* edi arg5
|
|
|
|
* ebp arg6 (note: not saved in the stack frame, should not be touched)
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Notes:
|
2015-02-27 06:40:32 +08:00
|
|
|
* Uses the same stack frame as the x86-64 version.
|
|
|
|
* All registers except eax must be saved (but ptrace may violate that).
|
2005-04-17 06:20:36 +08:00
|
|
|
* Arguments are zero extended. For system calls that want sign extension and
|
|
|
|
* take long arguments a wrapper is needed. Most calls can just be called
|
|
|
|
* directly.
|
2015-02-27 06:40:32 +08:00
|
|
|
* Assumes it is only called from user space and entered with interrupts off.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-06-08 14:28:07 +08:00
|
|
|
ENTRY(entry_INT80_compat)
|
2006-07-03 15:24:45 +08:00
|
|
|
/*
|
2015-03-27 18:36:20 +08:00
|
|
|
* Interrupts are off on entry.
|
|
|
|
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
|
|
|
* it is too small to ever cause noticeable irq latency.
|
2006-07-03 15:24:45 +08:00
|
|
|
*/
|
2015-03-27 18:36:20 +08:00
|
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
|
|
|
SWAPGS
|
2008-06-25 12:19:29 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2015-03-27 18:36:20 +08:00
|
|
|
|
2015-03-27 18:36:21 +08:00
|
|
|
/* Zero-extending 32-bit regs, do not remove */
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %eax, %eax
|
2015-03-27 18:36:21 +08:00
|
|
|
|
2015-04-01 01:00:04 +08:00
|
|
|
/* Construct struct pt_regs on stack (iret frame is already on stack) */
|
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.
These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.
In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.
So burn down the sick forest, allowing new, healthier growth:
27 files changed, 350 insertions(+), 1101 deletions(-)
Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:
- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.
- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.
- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-28 18:21:47 +08:00
|
|
|
pushq %rax /* pt_regs->orig_ax */
|
|
|
|
pushq %rdi /* pt_regs->di */
|
|
|
|
pushq %rsi /* pt_regs->si */
|
|
|
|
pushq %rdx /* pt_regs->dx */
|
|
|
|
pushq %rcx /* pt_regs->cx */
|
|
|
|
pushq $-ENOSYS /* pt_regs->ax */
|
2015-06-03 01:35:10 +08:00
|
|
|
pushq $0 /* pt_regs->r8 */
|
|
|
|
pushq $0 /* pt_regs->r9 */
|
|
|
|
pushq $0 /* pt_regs->r10 */
|
|
|
|
pushq $0 /* pt_regs->r11 */
|
2005-04-17 06:20:36 +08:00
|
|
|
cld
|
2015-06-05 19:02:28 +08:00
|
|
|
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
|
|
|
|
|
|
|
|
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
|
|
|
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
|
|
|
jnz ia32_tracesys
|
2015-04-01 01:00:04 +08:00
|
|
|
|
2009-02-07 10:15:18 +08:00
|
|
|
ia32_do_call:
|
2015-06-05 19:02:28 +08:00
|
|
|
/* 32-bit syscall -> 64-bit C ABI argument conversion */
|
|
|
|
movl %edi, %r8d /* arg5 */
|
|
|
|
movl %ebp, %r9d /* arg6 */
|
|
|
|
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
|
|
|
|
movl %ebx, %edi /* arg1 */
|
|
|
|
movl %edx, %edx /* arg3 (zero extension) */
|
|
|
|
cmpq $(IA32_NR_syscalls-1), %rax
|
2015-04-22 00:03:14 +08:00
|
|
|
ja 1f
|
2015-06-05 19:02:28 +08:00
|
|
|
|
2015-06-08 02:24:30 +08:00
|
|
|
call *ia32_sys_call_table(, %rax, 8)
|
2015-06-05 19:02:28 +08:00
|
|
|
movq %rax, RAX(%rsp)
|
2015-04-22 00:03:14 +08:00
|
|
|
1:
|
2015-06-05 19:02:28 +08:00
|
|
|
jmp int_ret_from_sys_call
|
2005-04-17 06:20:36 +08:00
|
|
|
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
ia32_tracesys:
|
|
|
|
SAVE_EXTRA_REGS
|
2015-06-05 19:02:28 +08:00
|
|
|
movq %rsp, %rdi /* &pt_regs -> arg1 */
|
|
|
|
call syscall_trace_enter
|
2015-06-03 03:04:02 +08:00
|
|
|
/*
|
|
|
|
* Reload arg registers from stack in case ptrace changed them.
|
|
|
|
* Don't reload %eax because syscall_trace_enter() returned
|
|
|
|
* the %rax value we should see. But do truncate it to 32 bits.
|
|
|
|
* If it's -1 to make us punt the syscall, then (u32)-1 is still
|
|
|
|
* an appropriately invalid value.
|
|
|
|
*/
|
|
|
|
movl RCX(%rsp), %ecx
|
|
|
|
movl RDX(%rsp), %edx
|
|
|
|
movl RSI(%rsp), %esi
|
|
|
|
movl RDI(%rsp), %edi
|
2015-06-05 19:02:28 +08:00
|
|
|
movl %eax, %eax /* zero extension */
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
RESTORE_EXTRA_REGS
|
2015-06-05 19:02:28 +08:00
|
|
|
jmp ia32_do_call
|
2015-06-08 14:28:07 +08:00
|
|
|
END(entry_INT80_compat)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-11 02:04:28 +08:00
|
|
|
.macro PTREGSCALL label, func
|
2011-11-29 19:24:10 +08:00
|
|
|
ALIGN
|
|
|
|
GLOBAL(\label)
|
2015-06-05 19:02:28 +08:00
|
|
|
leaq \func(%rip), %rax
|
|
|
|
jmp ia32_ptregs_common
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
2015-06-05 19:02:28 +08:00
|
|
|
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
|
|
|
|
PTREGSCALL stub32_sigreturn, sys32_sigreturn
|
|
|
|
PTREGSCALL stub32_fork, sys_fork
|
|
|
|
PTREGSCALL stub32_vfork, sys_vfork
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-10-23 10:34:11 +08:00
|
|
|
ALIGN
|
|
|
|
GLOBAL(stub32_clone)
|
2015-06-05 19:02:28 +08:00
|
|
|
leaq sys_clone(%rip), %rax
|
2015-06-03 21:58:49 +08:00
|
|
|
/*
|
2015-06-03 21:58:50 +08:00
|
|
|
* The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
|
|
|
|
* The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
|
|
|
|
*
|
|
|
|
* The native 64-bit kernel's sys_clone() implements the latter,
|
|
|
|
* so we need to swap arguments here before calling it:
|
2015-06-03 21:58:49 +08:00
|
|
|
*/
|
2015-06-03 21:58:50 +08:00
|
|
|
xchg %r8, %rcx
|
2015-06-05 19:02:28 +08:00
|
|
|
jmp ia32_ptregs_common
|
2012-10-23 10:34:11 +08:00
|
|
|
|
2011-11-29 19:24:10 +08:00
|
|
|
ALIGN
|
|
|
|
ia32_ptregs_common:
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
SAVE_EXTRA_REGS 8
|
2015-06-05 19:02:28 +08:00
|
|
|
call *%rax
|
x86/asm/entry/64: Always allocate a complete "struct pt_regs" on the kernel stack
The 64-bit entry code was using six stack slots less by not
saving/restoring registers which are callee-preserved according
to the C ABI, and was not allocating space for them.
Only when syscalls needed a complete "struct pt_regs" was
the complete area allocated and filled in.
As an additional twist, on interrupt entry a "slightly less
truncated pt_regs" trick is used, to make nested interrupt
stacks easier to unwind.
This proved to be a source of significant obfuscation and subtle
bugs. For example, 'stub_fork' had to pop the return address,
extend the struct, save registers, and push return address back.
Ugly. 'ia32_ptregs_common' pops return address and "returns" via
jmp insn, throwing a wrench into CPU return stack cache.
This patch changes the code to always allocate a complete
"struct pt_regs" on the kernel stack. The saving of registers
is still done lazily.
"Partial pt_regs" trick on interrupt stack is retained.
Macros which manipulate "struct pt_regs" on stack are reworked:
- ALLOC_PT_GPREGS_ON_STACK allocates the structure.
- SAVE_C_REGS saves to it those registers which are clobbered
by C code.
- SAVE_EXTRA_REGS saves to it all other registers.
- Corresponding RESTORE_* and REMOVE_PT_GPREGS_FROM_STACK macros
reverse it.
'ia32_ptregs_common', 'stub_fork' and friends lost their ugly dance
with the return pointer.
LOAD_ARGS32 in ia32entry.S now uses symbolic stack offsets
instead of magic numbers.
'error_entry' and 'save_paranoid' now use SAVE_C_REGS +
SAVE_EXTRA_REGS instead of having it open-coded yet again.
Patch was run-tested: 64-bit executables, 32-bit executables,
strace works.
Timing tests did not show measurable difference in 32-bit
and 64-bit syscalls.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1423778052-21038-2-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/b89763d354aa23e670b9bdf3a40ae320320a7c2e.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-27 06:40:27 +08:00
|
|
|
RESTORE_EXTRA_REGS 8
|
|
|
|
ret
|
2006-06-26 19:56:55 +08:00
|
|
|
END(ia32_ptregs_common)
|