x86/fpu: Micro-optimize the copy_xregs_to_kernel*() and copy_kernel_to_xregs*() functions

The copy_xregs_to_kernel*() and copy_kernel_to_xregs*() functions are used
to copy FPU registers to kernel memory and vice versa.

They are never expected to fail, yet they have a return code, mostly because
that way they can share the assembly macros with the copy*user*() functions.

This error code is then silently ignored by the context switching
and other code - which made the bug in:

  b8c1b8ea7b ("x86/fpu: Fix FPU state save area alignment bug")

harder to fix than necessary.

So remove the return values and check for no faults when FPU debugging
is enabled in the .config.

This improves the eagerfpu context switching fast path by a couple of
instructions, when FPU debugging is disabled:

   ffffffff810407fa:      89 c2                   mov    %eax,%edx
   ffffffff810407fc:      48 0f ae 2f             xrstor64 (%rdi)
   ffffffff81040800:      31 c0                   xor    %eax,%eax
  -ffffffff81040802:      eb 0a                   jmp    ffffffff8104080e <__switch_to+0x321>
  +ffffffff81040802:      eb 16                   jmp    ffffffff8104081a <__switch_to+0x32d>
   ffffffff81040804:      31 c0                   xor    %eax,%eax
   ffffffff81040806:      48 0f ae 8b c0 05 00    fxrstor64 0x5c0(%rbx)
   ffffffff8104080d:      00

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-05-24 09:23:25 +02:00
parent 685c961624
commit 8c05f05edb
1 changed files with 23 additions and 14 deletions

View File

@ -232,7 +232,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static inline int copy_xregs_to_kernel_booting(struct xregs_state *xstate)
static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
{
u64 mask = -1;
u32 lmask = mask;
@ -253,14 +253,16 @@ static inline int copy_xregs_to_kernel_booting(struct xregs_state *xstate)
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
return err;
/* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err);
}
/*
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static inline int copy_kernel_to_xregs_booting(struct xregs_state *xstate, u64 mask)
static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
@ -280,13 +282,15 @@ static inline int copy_kernel_to_xregs_booting(struct xregs_state *xstate, u64 m
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
return err;
/* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err);
}
/*
* Save processor xstate to xsave area.
*/
static inline int copy_xregs_to_kernel(struct xregs_state *xstate)
static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
{
u64 mask = -1;
u32 lmask = mask;
@ -319,13 +323,14 @@ static inline int copy_xregs_to_kernel(struct xregs_state *xstate)
: "0" (err)
: "memory");
return err;
/* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err);
}
/*
* Restore processor xstate from xsave area.
*/
static inline int copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
@ -347,7 +352,8 @@ static inline int copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
: "0" (err)
: "memory");
return err;
/* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err);
}
/*
@ -433,12 +439,15 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
{
if (use_xsave())
return copy_kernel_to_xregs(&fpu->state.xsave, -1);
else if (use_fxsr())
return copy_kernel_to_fxregs(&fpu->state.fxsave);
else
return copy_kernel_to_fregs(&fpu->state.fsave);
if (use_xsave()) {
copy_kernel_to_xregs(&fpu->state.xsave, -1);
return 0;
} else {
if (use_fxsr())
return copy_kernel_to_fxregs(&fpu->state.fxsave);
else
return copy_kernel_to_fregs(&fpu->state.fsave);
}
}
static inline int copy_fpstate_to_fpregs(struct fpu *fpu)