Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fpu updates from Ingo Molnar: "This cleans up the FPU fault handling methods to be more robust, and moves eligible variables to .init.data" * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/fpu: Put a few variables in .init.data x86/fpu: Get rid of xstate_fault() x86/fpu: Add an XSTATE_OP() macro
This commit is contained in:
commit
6896d9f7e7
|
@ -224,18 +224,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
|
|||
#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
|
||||
#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
|
||||
|
||||
/* xstate instruction fault handler: */
|
||||
#define xstate_fault(__err) \
|
||||
\
|
||||
".section .fixup,\"ax\"\n" \
|
||||
\
|
||||
"3: movl $-2,%[_err]\n" \
|
||||
" jmp 2b\n" \
|
||||
\
|
||||
".previous\n" \
|
||||
\
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: [_err] "=r" (__err)
|
||||
#define XSTATE_OP(op, st, lmask, hmask, err) \
|
||||
asm volatile("1:" op "\n\t" \
|
||||
"xor %[err], %[err]\n" \
|
||||
"2:\n\t" \
|
||||
".pushsection .fixup,\"ax\"\n\t" \
|
||||
"3: movl $-2,%[err]\n\t" \
|
||||
"jmp 2b\n\t" \
|
||||
".popsection\n\t" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: [err] "=r" (err) \
|
||||
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
|
||||
: "memory")
|
||||
|
||||
/*
|
||||
* If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
|
||||
* format and supervisor states in addition to modified optimization in
|
||||
* XSAVEOPT.
|
||||
*
|
||||
* Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
|
||||
* supports modified optimization which is not supported by XSAVE.
|
||||
*
|
||||
* We use XSAVE as a fallback.
|
||||
*
|
||||
* The 661 label is defined in the ALTERNATIVE* macros as the address of the
|
||||
* original instruction which gets replaced. We need to use it here as the
|
||||
* address of the instruction where we might get an exception at.
|
||||
*/
|
||||
#define XSTATE_XSAVE(st, lmask, hmask, err) \
|
||||
asm volatile(ALTERNATIVE_2(XSAVE, \
|
||||
XSAVEOPT, X86_FEATURE_XSAVEOPT, \
|
||||
XSAVES, X86_FEATURE_XSAVES) \
|
||||
"\n" \
|
||||
"xor %[err], %[err]\n" \
|
||||
"3:\n" \
|
||||
".pushsection .fixup,\"ax\"\n" \
|
||||
"4: movl $-2, %[err]\n" \
|
||||
"jmp 3b\n" \
|
||||
".popsection\n" \
|
||||
_ASM_EXTABLE(661b, 4b) \
|
||||
: [err] "=r" (err) \
|
||||
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
|
||||
: "memory")
|
||||
|
||||
/*
|
||||
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
|
||||
* XSAVE area format.
|
||||
*/
|
||||
#define XSTATE_XRESTORE(st, lmask, hmask, err) \
|
||||
asm volatile(ALTERNATIVE(XRSTOR, \
|
||||
XRSTORS, X86_FEATURE_XSAVES) \
|
||||
"\n" \
|
||||
"xor %[err], %[err]\n" \
|
||||
"3:\n" \
|
||||
".pushsection .fixup,\"ax\"\n" \
|
||||
"4: movl $-2, %[err]\n" \
|
||||
"jmp 3b\n" \
|
||||
".popsection\n" \
|
||||
_ASM_EXTABLE(661b, 4b) \
|
||||
: [err] "=r" (err) \
|
||||
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
|
||||
: "memory")
|
||||
|
||||
/*
|
||||
* This function is called only during boot time when x86 caps are not set
|
||||
|
@ -246,22 +295,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
|
|||
u64 mask = -1;
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
WARN_ON(system_state != SYSTEM_BOOTING);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
asm volatile("1:"XSAVES"\n\t"
|
||||
"2:\n\t"
|
||||
xstate_fault(err)
|
||||
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
|
||||
: "memory");
|
||||
if (static_cpu_has_safe(X86_FEATURE_XSAVES))
|
||||
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
|
||||
else
|
||||
asm volatile("1:"XSAVE"\n\t"
|
||||
"2:\n\t"
|
||||
xstate_fault(err)
|
||||
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
|
||||
: "memory");
|
||||
XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
|
||||
|
||||
/* We should never fault when copying to a kernel buffer: */
|
||||
WARN_ON_FPU(err);
|
||||
|
@ -276,22 +317,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
|
|||
u64 mask = -1;
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
WARN_ON(system_state != SYSTEM_BOOTING);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
asm volatile("1:"XRSTORS"\n\t"
|
||||
"2:\n\t"
|
||||
xstate_fault(err)
|
||||
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
|
||||
: "memory");
|
||||
if (static_cpu_has_safe(X86_FEATURE_XSAVES))
|
||||
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
|
||||
else
|
||||
asm volatile("1:"XRSTOR"\n\t"
|
||||
"2:\n\t"
|
||||
xstate_fault(err)
|
||||
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
|
||||
: "memory");
|
||||
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
|
||||
|
||||
/* We should never fault when copying from a kernel buffer: */
|
||||
WARN_ON_FPU(err);
|
||||
|
@ -305,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
|
|||
u64 mask = -1;
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
WARN_ON(!alternatives_patched);
|
||||
|
||||
/*
|
||||
* If xsaves is enabled, xsaves replaces xsaveopt because
|
||||
* it supports compact format and supervisor states in addition to
|
||||
* modified optimization in xsaveopt.
|
||||
*
|
||||
* Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
|
||||
* because xsaveopt supports modified optimization which is not
|
||||
* supported by xsave.
|
||||
*
|
||||
* If none of xsaves and xsaveopt is enabled, use xsave.
|
||||
*/
|
||||
alternative_input_2(
|
||||
"1:"XSAVE,
|
||||
XSAVEOPT,
|
||||
X86_FEATURE_XSAVEOPT,
|
||||
XSAVES,
|
||||
X86_FEATURE_XSAVES,
|
||||
[xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
|
||||
"memory");
|
||||
asm volatile("2:\n\t"
|
||||
xstate_fault(err)
|
||||
: "0" (err)
|
||||
: "memory");
|
||||
XSTATE_XSAVE(xstate, lmask, hmask, err);
|
||||
|
||||
/* We should never fault when copying to a kernel buffer: */
|
||||
WARN_ON_FPU(err);
|
||||
|
@ -344,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
|
|||
{
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Use xrstors to restore context if it is enabled. xrstors supports
|
||||
* compacted format of xsave area which is not supported by xrstor.
|
||||
*/
|
||||
alternative_input(
|
||||
"1: " XRSTOR,
|
||||
XRSTORS,
|
||||
X86_FEATURE_XSAVES,
|
||||
"D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
|
||||
: "memory");
|
||||
|
||||
asm volatile("2:\n"
|
||||
xstate_fault(err)
|
||||
: "0" (err)
|
||||
: "memory");
|
||||
XSTATE_XRESTORE(xstate, lmask, hmask, err);
|
||||
|
||||
/* We should never fault when copying from a kernel buffer: */
|
||||
WARN_ON_FPU(err);
|
||||
|
@ -388,12 +385,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
|
|||
if (unlikely(err))
|
||||
return -EFAULT;
|
||||
|
||||
__asm__ __volatile__(ASM_STAC "\n"
|
||||
"1:"XSAVE"\n"
|
||||
"2: " ASM_CLAC "\n"
|
||||
xstate_fault(err)
|
||||
: "D" (buf), "a" (-1), "d" (-1), "0" (err)
|
||||
: "memory");
|
||||
stac();
|
||||
XSTATE_OP(XSAVE, buf, -1, -1, err);
|
||||
clac();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -405,14 +400,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
|
|||
struct xregs_state *xstate = ((__force struct xregs_state *)buf);
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
stac();
|
||||
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
|
||||
clac();
|
||||
|
||||
__asm__ __volatile__(ASM_STAC "\n"
|
||||
"1:"XRSTOR"\n"
|
||||
"2: " ASM_CLAC "\n"
|
||||
xstate_fault(err)
|
||||
: "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
|
||||
: "memory"); /* memory required? */
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -197,7 +197,7 @@ static void __init fpu__init_task_struct_size(void)
|
|||
*/
|
||||
static void __init fpu__init_system_xstate_size_legacy(void)
|
||||
{
|
||||
static int on_boot_cpu = 1;
|
||||
static int on_boot_cpu __initdata = 1;
|
||||
|
||||
WARN_ON_FPU(!on_boot_cpu);
|
||||
on_boot_cpu = 0;
|
||||
|
@ -287,7 +287,7 @@ __setup("eagerfpu=", eager_fpu_setup);
|
|||
*/
|
||||
static void __init fpu__init_system_ctx_switch(void)
|
||||
{
|
||||
static bool on_boot_cpu = 1;
|
||||
static bool on_boot_cpu __initdata = 1;
|
||||
|
||||
WARN_ON_FPU(!on_boot_cpu);
|
||||
on_boot_cpu = 0;
|
||||
|
|
|
@ -297,7 +297,7 @@ static void __init setup_xstate_comp(void)
|
|||
*/
|
||||
static void __init setup_init_fpu_buf(void)
|
||||
{
|
||||
static int on_boot_cpu = 1;
|
||||
static int on_boot_cpu __initdata = 1;
|
||||
|
||||
WARN_ON_FPU(!on_boot_cpu);
|
||||
on_boot_cpu = 0;
|
||||
|
@ -608,7 +608,7 @@ static void fpu__init_disable_system_xstate(void)
|
|||
void __init fpu__init_system_xstate(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
static int on_boot_cpu = 1;
|
||||
static int on_boot_cpu __initdata = 1;
|
||||
int err;
|
||||
|
||||
WARN_ON_FPU(!on_boot_cpu);
|
||||
|
|
Loading…
Reference in New Issue