x86/asm/entry/32: Clean up entry_32.S
Make the 32-bit syscall entry code a bit more readable: - use consistent assembly coding style similar to entry_64.S - remove old comments that are not true anymore - eliminate whitespace noise - use consistent vertical spacing - fix various comments No code changed: # arch/x86/entry/entry_32.o: text data bss dec hex filename 6025 0 0 6025 1789 entry_32.o.before 6025 0 0 6025 1789 entry_32.o.after md5: f3fa16b2b0dca804f052deb6b30ba6cb entry_32.o.before.asm f3fa16b2b0dca804f052deb6b30ba6cb entry_32.o.after.asm Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b2502b418e
commit
a49976d14f
|
@ -1,23 +1,12 @@
|
|||
/*
|
||||
* Copyright (C) 1991,1992 Linus Torvalds
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*/
|
||||
|
||||
/*
|
||||
* entry.S contains the system-call and fault low-level handling routines.
|
||||
* This also contains the timer-interrupt handler, as well as all interrupts
|
||||
* and faults that can result in a task-switch.
|
||||
*
|
||||
* NOTE: This code handles signal-recognition, which happens every time
|
||||
* after a timer-interrupt and after each system call.
|
||||
*
|
||||
* I changed all the .align's to 4 (16 byte alignment), as that's faster
|
||||
* on a 486.
|
||||
* entry_32.S contains the system-call and low-level fault and trap handling routines.
|
||||
*
|
||||
* Stack layout in 'syscall_exit':
|
||||
* ptrace needs to have all regs on the stack.
|
||||
* if the order here is changed, it needs to be
|
||||
* updated in fork.c:copy_process, signal.c:do_signal,
|
||||
* ptrace needs to have all registers on the stack.
|
||||
* If the order here is changed, it needs to be
|
||||
* updated in fork.c:copy_process(), signal.c:do_signal(),
|
||||
* ptrace.c and ptrace.h
|
||||
*
|
||||
* 0(%esp) - %ebx
|
||||
|
@ -37,8 +26,6 @@
|
|||
* 38(%esp) - %eflags
|
||||
* 3C(%esp) - %oldesp
|
||||
* 40(%esp) - %oldss
|
||||
*
|
||||
* "current" is in register %ebx during any slow entries.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
@ -64,8 +51,8 @@
|
|||
#define __AUDIT_ARCH_LE 0x40000000
|
||||
|
||||
#ifndef CONFIG_AUDITSYSCALL
|
||||
#define sysenter_audit syscall_trace_entry
|
||||
#define sysexit_audit syscall_exit_work
|
||||
# define sysenter_audit syscall_trace_entry
|
||||
# define sysexit_audit syscall_exit_work
|
||||
#endif
|
||||
|
||||
.section .entry.text, "ax"
|
||||
|
@ -84,15 +71,15 @@
|
|||
*/
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
|
||||
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
|
||||
#else
|
||||
#define preempt_stop(clobbers)
|
||||
#define resume_kernel restore_all
|
||||
# define preempt_stop(clobbers)
|
||||
# define resume_kernel restore_all
|
||||
#endif
|
||||
|
||||
.macro TRACE_IRQS_IRET
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
|
||||
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
|
||||
jz 1f
|
||||
TRACE_IRQS_ON
|
||||
1:
|
||||
|
@ -149,7 +136,7 @@
|
|||
99: movl $0, (%esp)
|
||||
jmp 98b
|
||||
.popsection
|
||||
_ASM_EXTABLE(98b,99b)
|
||||
_ASM_EXTABLE(98b, 99b)
|
||||
.endm
|
||||
|
||||
.macro PTGS_TO_GS
|
||||
|
@ -160,7 +147,7 @@
|
|||
99: movl $0, PT_GS(%esp)
|
||||
jmp 98b
|
||||
.popsection
|
||||
_ASM_EXTABLE(98b,99b)
|
||||
_ASM_EXTABLE(98b, 99b)
|
||||
.endm
|
||||
|
||||
.macro GS_TO_REG reg
|
||||
|
@ -221,9 +208,9 @@
|
|||
6: movl $0, (%esp)
|
||||
jmp 3b
|
||||
.popsection
|
||||
_ASM_EXTABLE(1b,4b)
|
||||
_ASM_EXTABLE(2b,5b)
|
||||
_ASM_EXTABLE(3b,6b)
|
||||
_ASM_EXTABLE(1b, 4b)
|
||||
_ASM_EXTABLE(2b, 5b)
|
||||
_ASM_EXTABLE(3b, 6b)
|
||||
POP_GS_EX
|
||||
.endm
|
||||
|
||||
|
@ -244,9 +231,9 @@ ENTRY(ret_from_kernel_thread)
|
|||
popl %eax
|
||||
pushl $0x0202 # Reset kernel eflags
|
||||
popfl
|
||||
movl PT_EBP(%esp),%eax
|
||||
movl PT_EBP(%esp), %eax
|
||||
call *PT_EBX(%esp)
|
||||
movl $0,PT_EAX(%esp)
|
||||
movl $0, PT_EAX(%esp)
|
||||
jmp syscall_exit
|
||||
ENDPROC(ret_from_kernel_thread)
|
||||
|
||||
|
@ -294,21 +281,24 @@ END(ret_from_exception)
|
|||
ENTRY(resume_kernel)
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
need_resched:
|
||||
cmpl $0,PER_CPU_VAR(__preempt_count)
|
||||
cmpl $0, PER_CPU_VAR(__preempt_count)
|
||||
jnz restore_all
|
||||
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
||||
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
||||
jz restore_all
|
||||
call preempt_schedule_irq
|
||||
jmp need_resched
|
||||
END(resume_kernel)
|
||||
#endif
|
||||
|
||||
/* SYSENTER_RETURN points to after the "sysenter" instruction in
|
||||
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
|
||||
/*
|
||||
* SYSENTER_RETURN points to after the SYSENTER instruction
|
||||
* in the vsyscall page. See vsyscall-sysentry.S, which defines
|
||||
* the symbol.
|
||||
*/
|
||||
|
||||
# sysenter call handler stub
|
||||
# SYSENTER call handler stub
|
||||
ENTRY(entry_SYSENTER_32)
|
||||
movl TSS_sysenter_sp0(%esp),%esp
|
||||
movl TSS_sysenter_sp0(%esp), %esp
|
||||
sysenter_past_esp:
|
||||
/*
|
||||
* Interrupts are disabled here, but we can't trace it until
|
||||
|
@ -338,24 +328,24 @@ sysenter_past_esp:
|
|||
* Load the potential sixth argument from user stack.
|
||||
* Careful about security.
|
||||
*/
|
||||
cmpl $__PAGE_OFFSET-3,%ebp
|
||||
cmpl $__PAGE_OFFSET-3, %ebp
|
||||
jae syscall_fault
|
||||
ASM_STAC
|
||||
1: movl (%ebp),%ebp
|
||||
1: movl (%ebp), %ebp
|
||||
ASM_CLAC
|
||||
movl %ebp,PT_EBP(%esp)
|
||||
_ASM_EXTABLE(1b,syscall_fault)
|
||||
movl %ebp, PT_EBP(%esp)
|
||||
_ASM_EXTABLE(1b, syscall_fault)
|
||||
|
||||
GET_THREAD_INFO(%ebp)
|
||||
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
|
||||
jnz sysenter_audit
|
||||
sysenter_do_call:
|
||||
cmpl $(NR_syscalls), %eax
|
||||
jae sysenter_badsys
|
||||
call *sys_call_table(,%eax,4)
|
||||
call *sys_call_table(, %eax, 4)
|
||||
sysenter_after_call:
|
||||
movl %eax,PT_EAX(%esp)
|
||||
movl %eax, PT_EAX(%esp)
|
||||
LOCKDEP_SYS_EXIT
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
TRACE_IRQS_OFF
|
||||
|
@ -366,7 +356,7 @@ sysenter_exit:
|
|||
/* if something modifies registers it must also disable sysexit */
|
||||
movl PT_EIP(%esp), %edx
|
||||
movl PT_OLDESP(%esp), %ecx
|
||||
xorl %ebp,%ebp
|
||||
xorl %ebp, %ebp
|
||||
TRACE_IRQS_ON
|
||||
1: mov PT_FS(%esp), %fs
|
||||
PTGS_TO_GS
|
||||
|
@ -374,7 +364,7 @@ sysenter_exit:
|
|||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
sysenter_audit:
|
||||
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
|
||||
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
|
||||
jnz syscall_trace_entry
|
||||
/* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
|
||||
movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
|
||||
|
@ -384,7 +374,7 @@ sysenter_audit:
|
|||
call __audit_syscall_entry
|
||||
popl %ecx /* get that remapped edx off the stack */
|
||||
popl %ecx /* get that remapped esi off the stack */
|
||||
movl PT_EAX(%esp),%eax /* reload syscall number */
|
||||
movl PT_EAX(%esp), %eax /* reload syscall number */
|
||||
jmp sysenter_do_call
|
||||
|
||||
sysexit_audit:
|
||||
|
@ -392,25 +382,25 @@ sysexit_audit:
|
|||
jnz syscall_exit_work
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_ANY)
|
||||
movl %eax,%edx /* second arg, syscall return value */
|
||||
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
|
||||
movl %eax, %edx /* second arg, syscall return value */
|
||||
cmpl $-MAX_ERRNO, %eax /* is it an error ? */
|
||||
setbe %al /* 1 if so, 0 if not */
|
||||
movzbl %al,%eax /* zero-extend that */
|
||||
movzbl %al, %eax /* zero-extend that */
|
||||
call __audit_syscall_exit
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
TRACE_IRQS_OFF
|
||||
movl TI_flags(%ebp), %ecx
|
||||
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
|
||||
jnz syscall_exit_work
|
||||
movl PT_EAX(%esp),%eax /* reload syscall return value */
|
||||
movl PT_EAX(%esp), %eax /* reload syscall return value */
|
||||
jmp sysenter_exit
|
||||
#endif
|
||||
|
||||
.pushsection .fixup,"ax"
|
||||
2: movl $0,PT_FS(%esp)
|
||||
.pushsection .fixup, "ax"
|
||||
2: movl $0, PT_FS(%esp)
|
||||
jmp 1b
|
||||
.popsection
|
||||
_ASM_EXTABLE(1b,2b)
|
||||
_ASM_EXTABLE(1b, 2b)
|
||||
PTGS_TO_GS_EX
|
||||
ENDPROC(entry_SYSENTER_32)
|
||||
|
||||
|
@ -421,14 +411,14 @@ ENTRY(entry_INT80_32)
|
|||
SAVE_ALL
|
||||
GET_THREAD_INFO(%ebp)
|
||||
# system call tracing in operation / emulation
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
|
||||
jnz syscall_trace_entry
|
||||
cmpl $(NR_syscalls), %eax
|
||||
jae syscall_badsys
|
||||
syscall_call:
|
||||
call *sys_call_table(,%eax,4)
|
||||
call *sys_call_table(, %eax, 4)
|
||||
syscall_after_call:
|
||||
movl %eax,PT_EAX(%esp) # store the return value
|
||||
movl %eax, PT_EAX(%esp) # store the return value
|
||||
syscall_exit:
|
||||
LOCKDEP_SYS_EXIT
|
||||
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
|
||||
|
@ -444,9 +434,11 @@ restore_all:
|
|||
restore_all_notrace:
|
||||
#ifdef CONFIG_X86_ESPFIX32
|
||||
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
|
||||
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
||||
# are returning to the kernel.
|
||||
# See comments in process.c:copy_thread() for details.
|
||||
/*
|
||||
* Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
||||
* are returning to the kernel.
|
||||
* See comments in process.c:copy_thread() for details.
|
||||
*/
|
||||
movb PT_OLDSS(%esp), %ah
|
||||
movb PT_CS(%esp), %al
|
||||
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
|
||||
|
@ -457,13 +449,13 @@ restore_nocheck:
|
|||
RESTORE_REGS 4 # skip orig_eax/error_code
|
||||
irq_return:
|
||||
INTERRUPT_RETURN
|
||||
.section .fixup,"ax"
|
||||
ENTRY(iret_exc)
|
||||
.section .fixup, "ax"
|
||||
ENTRY(iret_exc )
|
||||
pushl $0 # no error code
|
||||
pushl $do_iret_error
|
||||
jmp error_code
|
||||
.previous
|
||||
_ASM_EXTABLE(irq_return,iret_exc)
|
||||
_ASM_EXTABLE(irq_return, iret_exc)
|
||||
|
||||
#ifdef CONFIG_X86_ESPFIX32
|
||||
ldt_ss:
|
||||
|
@ -501,9 +493,11 @@ ldt_ss:
|
|||
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
|
||||
pushl $__ESPFIX_SS
|
||||
pushl %eax /* new kernel esp */
|
||||
/* Disable interrupts, but do not irqtrace this section: we
|
||||
/*
|
||||
* Disable interrupts, but do not irqtrace this section: we
|
||||
* will soon execute iret and the tracer was already set to
|
||||
* the irqstate after the iret */
|
||||
* the irqstate after the IRET:
|
||||
*/
|
||||
DISABLE_INTERRUPTS(CLBR_EAX)
|
||||
lss (%esp), %esp /* switch to espfix segment */
|
||||
jmp restore_nocheck
|
||||
|
@ -564,7 +558,7 @@ END(work_pending)
|
|||
# perform syscall exit tracing
|
||||
ALIGN
|
||||
syscall_trace_entry:
|
||||
movl $-ENOSYS,PT_EAX(%esp)
|
||||
movl $-ENOSYS, PT_EAX(%esp)
|
||||
movl %esp, %eax
|
||||
call syscall_trace_enter
|
||||
/* What it returned is what we'll actually use. */
|
||||
|
@ -589,17 +583,17 @@ END(syscall_exit_work)
|
|||
syscall_fault:
|
||||
ASM_CLAC
|
||||
GET_THREAD_INFO(%ebp)
|
||||
movl $-EFAULT,PT_EAX(%esp)
|
||||
movl $-EFAULT, PT_EAX(%esp)
|
||||
jmp resume_userspace
|
||||
END(syscall_fault)
|
||||
|
||||
syscall_badsys:
|
||||
movl $-ENOSYS,%eax
|
||||
movl $-ENOSYS, %eax
|
||||
jmp syscall_after_call
|
||||
END(syscall_badsys)
|
||||
|
||||
sysenter_badsys:
|
||||
movl $-ENOSYS,%eax
|
||||
movl $-ENOSYS, %eax
|
||||
jmp sysenter_after_call
|
||||
END(sysenter_badsys)
|
||||
|
||||
|
@ -659,10 +653,10 @@ END(irq_entries_start)
|
|||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||
common_interrupt:
|
||||
ASM_CLAC
|
||||
addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
|
||||
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
|
||||
SAVE_ALL
|
||||
TRACE_IRQS_OFF
|
||||
movl %esp,%eax
|
||||
movl %esp, %eax
|
||||
call do_IRQ
|
||||
jmp ret_from_intr
|
||||
ENDPROC(common_interrupt)
|
||||
|
@ -673,17 +667,16 @@ ENTRY(name) \
|
|||
pushl $~(nr); \
|
||||
SAVE_ALL; \
|
||||
TRACE_IRQS_OFF \
|
||||
movl %esp,%eax; \
|
||||
movl %esp, %eax; \
|
||||
call fn; \
|
||||
jmp ret_from_intr; \
|
||||
ENDPROC(name)
|
||||
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
#define TRACE_BUILD_INTERRUPT(name, nr) \
|
||||
BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
|
||||
# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
|
||||
#else
|
||||
#define TRACE_BUILD_INTERRUPT(name, nr)
|
||||
# define TRACE_BUILD_INTERRUPT(name, nr)
|
||||
#endif
|
||||
|
||||
#define BUILD_INTERRUPT(name, nr) \
|
||||
|
@ -809,8 +802,10 @@ ENTRY(spurious_interrupt_bug)
|
|||
END(spurious_interrupt_bug)
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
/* Xen doesn't set %esp to be precisely what the normal sysenter
|
||||
entrypoint expects, so fix it up before using the normal path. */
|
||||
/*
|
||||
* Xen doesn't set %esp to be precisely what the normal SYSENTER
|
||||
* entry point expects, so fix it up before using the normal path.
|
||||
*/
|
||||
ENTRY(xen_sysenter_target)
|
||||
addl $5*4, %esp /* remove xen-provided frame */
|
||||
jmp sysenter_past_esp
|
||||
|
@ -820,15 +815,17 @@ ENTRY(xen_hypervisor_callback)
|
|||
SAVE_ALL
|
||||
TRACE_IRQS_OFF
|
||||
|
||||
/* Check to see if we got the event in the critical
|
||||
region in xen_iret_direct, after we've reenabled
|
||||
events and checked for pending events. This simulates
|
||||
iret instruction's behaviour where it delivers a
|
||||
pending interrupt when enabling interrupts. */
|
||||
movl PT_EIP(%esp),%eax
|
||||
cmpl $xen_iret_start_crit,%eax
|
||||
/*
|
||||
* Check to see if we got the event in the critical
|
||||
* region in xen_iret_direct, after we've reenabled
|
||||
* events and checked for pending events. This simulates
|
||||
* iret instruction's behaviour where it delivers a
|
||||
* pending interrupt when enabling interrupts:
|
||||
*/
|
||||
movl PT_EIP(%esp), %eax
|
||||
cmpl $xen_iret_start_crit, %eax
|
||||
jb 1f
|
||||
cmpl $xen_iret_end_crit,%eax
|
||||
cmpl $xen_iret_end_crit, %eax
|
||||
jae 1f
|
||||
|
||||
jmp xen_iret_crit_fixup
|
||||
|
@ -842,52 +839,54 @@ ENTRY(xen_do_upcall)
|
|||
jmp ret_from_intr
|
||||
ENDPROC(xen_hypervisor_callback)
|
||||
|
||||
# Hypervisor uses this for application faults while it executes.
|
||||
# We get here for two reasons:
|
||||
# 1. Fault while reloading DS, ES, FS or GS
|
||||
# 2. Fault while executing IRET
|
||||
# Category 1 we fix up by reattempting the load, and zeroing the segment
|
||||
# register if the load fails.
|
||||
# Category 2 we fix up by jumping to do_iret_error. We cannot use the
|
||||
# normal Linux return path in this case because if we use the IRET hypercall
|
||||
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
|
||||
# We distinguish between categories by maintaining a status value in EAX.
|
||||
/*
|
||||
* Hypervisor uses this for application faults while it executes.
|
||||
* We get here for two reasons:
|
||||
* 1. Fault while reloading DS, ES, FS or GS
|
||||
* 2. Fault while executing IRET
|
||||
* Category 1 we fix up by reattempting the load, and zeroing the segment
|
||||
* register if the load fails.
|
||||
* Category 2 we fix up by jumping to do_iret_error. We cannot use the
|
||||
* normal Linux return path in this case because if we use the IRET hypercall
|
||||
* to pop the stack frame we end up in an infinite loop of failsafe callbacks.
|
||||
* We distinguish between categories by maintaining a status value in EAX.
|
||||
*/
|
||||
ENTRY(xen_failsafe_callback)
|
||||
pushl %eax
|
||||
movl $1,%eax
|
||||
1: mov 4(%esp),%ds
|
||||
2: mov 8(%esp),%es
|
||||
3: mov 12(%esp),%fs
|
||||
4: mov 16(%esp),%gs
|
||||
movl $1, %eax
|
||||
1: mov 4(%esp), %ds
|
||||
2: mov 8(%esp), %es
|
||||
3: mov 12(%esp), %fs
|
||||
4: mov 16(%esp), %gs
|
||||
/* EAX == 0 => Category 1 (Bad segment)
|
||||
EAX != 0 => Category 2 (Bad IRET) */
|
||||
testl %eax,%eax
|
||||
testl %eax, %eax
|
||||
popl %eax
|
||||
lea 16(%esp),%esp
|
||||
lea 16(%esp), %esp
|
||||
jz 5f
|
||||
jmp iret_exc
|
||||
5: pushl $-1 /* orig_ax = -1 => not a system call */
|
||||
SAVE_ALL
|
||||
jmp ret_from_exception
|
||||
|
||||
.section .fixup,"ax"
|
||||
6: xorl %eax,%eax
|
||||
movl %eax,4(%esp)
|
||||
.section .fixup, "ax"
|
||||
6: xorl %eax, %eax
|
||||
movl %eax, 4(%esp)
|
||||
jmp 1b
|
||||
7: xorl %eax,%eax
|
||||
movl %eax,8(%esp)
|
||||
7: xorl %eax, %eax
|
||||
movl %eax, 8(%esp)
|
||||
jmp 2b
|
||||
8: xorl %eax,%eax
|
||||
movl %eax,12(%esp)
|
||||
8: xorl %eax, %eax
|
||||
movl %eax, 12(%esp)
|
||||
jmp 3b
|
||||
9: xorl %eax,%eax
|
||||
movl %eax,16(%esp)
|
||||
9: xorl %eax, %eax
|
||||
movl %eax, 16(%esp)
|
||||
jmp 4b
|
||||
.previous
|
||||
_ASM_EXTABLE(1b,6b)
|
||||
_ASM_EXTABLE(2b,7b)
|
||||
_ASM_EXTABLE(3b,8b)
|
||||
_ASM_EXTABLE(4b,9b)
|
||||
_ASM_EXTABLE(1b, 6b)
|
||||
_ASM_EXTABLE(2b, 7b)
|
||||
_ASM_EXTABLE(3b, 8b)
|
||||
_ASM_EXTABLE(4b, 9b)
|
||||
ENDPROC(xen_failsafe_callback)
|
||||
|
||||
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
|
||||
|
@ -923,7 +922,7 @@ ENTRY(ftrace_caller)
|
|||
ftrace_call:
|
||||
call ftrace_stub
|
||||
|
||||
addl $4,%esp /* skip NULL pointer */
|
||||
addl $4, %esp /* skip NULL pointer */
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
|
@ -967,7 +966,7 @@ ENTRY(ftrace_regs_caller)
|
|||
movl 13*4(%esp), %eax /* Get the saved flags */
|
||||
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
|
||||
/* clobbering return ip */
|
||||
movl $__KERNEL_CS,13*4(%esp)
|
||||
movl $__KERNEL_CS, 13*4(%esp)
|
||||
|
||||
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
|
||||
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
|
||||
|
@ -1105,7 +1104,7 @@ error_code:
|
|||
movl %ecx, %ds
|
||||
movl %ecx, %es
|
||||
TRACE_IRQS_OFF
|
||||
movl %esp,%eax # pt_regs pointer
|
||||
movl %esp, %eax # pt_regs pointer
|
||||
call *%edi
|
||||
jmp ret_from_exception
|
||||
END(page_fault)
|
||||
|
@ -1135,15 +1134,15 @@ END(page_fault)
|
|||
|
||||
ENTRY(debug)
|
||||
ASM_CLAC
|
||||
cmpl $entry_SYSENTER_32,(%esp)
|
||||
cmpl $entry_SYSENTER_32, (%esp)
|
||||
jne debug_stack_correct
|
||||
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
|
||||
debug_stack_correct:
|
||||
pushl $-1 # mark this as an int
|
||||
SAVE_ALL
|
||||
TRACE_IRQS_OFF
|
||||
xorl %edx,%edx # error code 0
|
||||
movl %esp,%eax # pt_regs pointer
|
||||
xorl %edx, %edx # error code 0
|
||||
movl %esp, %eax # pt_regs pointer
|
||||
call do_debug
|
||||
jmp ret_from_exception
|
||||
END(debug)
|
||||
|
@ -1165,24 +1164,25 @@ ENTRY(nmi)
|
|||
popl %eax
|
||||
je nmi_espfix_stack
|
||||
#endif
|
||||
cmpl $entry_SYSENTER_32,(%esp)
|
||||
cmpl $entry_SYSENTER_32, (%esp)
|
||||
je nmi_stack_fixup
|
||||
pushl %eax
|
||||
movl %esp,%eax
|
||||
/* Do not access memory above the end of our stack page,
|
||||
movl %esp, %eax
|
||||
/*
|
||||
* Do not access memory above the end of our stack page,
|
||||
* it might not exist.
|
||||
*/
|
||||
andl $(THREAD_SIZE-1),%eax
|
||||
cmpl $(THREAD_SIZE-20),%eax
|
||||
andl $(THREAD_SIZE-1), %eax
|
||||
cmpl $(THREAD_SIZE-20), %eax
|
||||
popl %eax
|
||||
jae nmi_stack_correct
|
||||
cmpl $entry_SYSENTER_32,12(%esp)
|
||||
cmpl $entry_SYSENTER_32, 12(%esp)
|
||||
je nmi_debug_stack_check
|
||||
nmi_stack_correct:
|
||||
pushl %eax
|
||||
SAVE_ALL
|
||||
xorl %edx,%edx # zero error code
|
||||
movl %esp,%eax # pt_regs pointer
|
||||
xorl %edx, %edx # zero error code
|
||||
movl %esp, %eax # pt_regs pointer
|
||||
call do_nmi
|
||||
jmp restore_all_notrace
|
||||
|
||||
|
@ -1191,11 +1191,11 @@ nmi_stack_fixup:
|
|||
jmp nmi_stack_correct
|
||||
|
||||
nmi_debug_stack_check:
|
||||
cmpw $__KERNEL_CS,16(%esp)
|
||||
cmpw $__KERNEL_CS, 16(%esp)
|
||||
jne nmi_stack_correct
|
||||
cmpl $debug,(%esp)
|
||||
cmpl $debug, (%esp)
|
||||
jb nmi_stack_correct
|
||||
cmpl $debug_esp_fix_insn,(%esp)
|
||||
cmpl $debug_esp_fix_insn, (%esp)
|
||||
ja nmi_stack_correct
|
||||
FIX_STACK 24, nmi_stack_correct, 1
|
||||
jmp nmi_stack_correct
|
||||
|
@ -1215,7 +1215,7 @@ nmi_espfix_stack:
|
|||
pushl %eax
|
||||
SAVE_ALL
|
||||
FIXUP_ESPFIX_STACK # %eax == %esp
|
||||
xorl %edx,%edx # zero error code
|
||||
xorl %edx, %edx # zero error code
|
||||
call do_nmi
|
||||
RESTORE_REGS
|
||||
lss 12+4(%esp), %esp # back to espfix stack
|
||||
|
@ -1228,8 +1228,8 @@ ENTRY(int3)
|
|||
pushl $-1 # mark this as an int
|
||||
SAVE_ALL
|
||||
TRACE_IRQS_OFF
|
||||
xorl %edx,%edx # zero error code
|
||||
movl %esp,%eax # pt_regs pointer
|
||||
xorl %edx, %edx # zero error code
|
||||
movl %esp, %eax # pt_regs pointer
|
||||
call do_int3
|
||||
jmp ret_from_exception
|
||||
END(int3)
|
||||
|
@ -1246,4 +1246,3 @@ ENTRY(async_page_fault)
|
|||
jmp error_code
|
||||
END(async_page_fault)
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue