x86-64: Cleanup some assembly entry points

system_call_after_swapgs doesn't really benefit from forcing
alignment from it - quite the opposite, native code needlessly
so far got a big NOP instruction inserted in front of it. Xen
being the only user of the separate entry point can well live
with the branch going to three bytes into a cache line.

The compatibility mode ptregs entry points for one can make use
of the GLOBAL() macro, and should be suitably aligned. Their
shared continuation point (ia32_ptregs_common) otoh doesn't need
to be global at all, but should continue to be properly aligned.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/4ED4CEEA020000780006407D@nat28.tlf.novell.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Jan Beulich 2011-11-29 11:24:10 +00:00 committed by Ingo Molnar
parent 46db09d3fd
commit f6b2bc8476
2 changed files with 5 additions and 4 deletions

View File

@ -453,8 +453,8 @@ quiet_ni_syscall:
CFI_ENDPROC CFI_ENDPROC
.macro PTREGSCALL label, func, arg .macro PTREGSCALL label, func, arg
.globl \label ALIGN
\label: GLOBAL(\label)
leaq \func(%rip),%rax leaq \func(%rip),%rax
leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
jmp ia32_ptregs_common jmp ia32_ptregs_common
@ -471,7 +471,8 @@ quiet_ni_syscall:
PTREGSCALL stub32_vfork, sys_vfork, %rdi PTREGSCALL stub32_vfork, sys_vfork, %rdi
PTREGSCALL stub32_iopl, sys_iopl, %rsi PTREGSCALL stub32_iopl, sys_iopl, %rsi
ENTRY(ia32_ptregs_common) ALIGN
ia32_ptregs_common:
popq %r11 popq %r11
CFI_ENDPROC CFI_ENDPROC
CFI_STARTPROC32 simple CFI_STARTPROC32 simple

View File

@ -465,7 +465,7 @@ ENTRY(system_call)
* after the swapgs, so that it can do the swapgs * after the swapgs, so that it can do the swapgs
* for the guest and jump here on syscall. * for the guest and jump here on syscall.
*/ */
ENTRY(system_call_after_swapgs) GLOBAL(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp) movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp movq PER_CPU_VAR(kernel_stack),%rsp