x86/fpu: Factor out fpu__flush_thread() from flush_thread()
flush_thread() open codes a lot of FPU internals - create a separate function for it in fpu/core.c. Turns out that this does not hurt performance: text data bss dec hex filename 11843039 1884440 1130496 14857975 e2b6f7 vmlinux.before 11843039 1884440 1130496 14857975 e2b6f7 vmlinux.after and since this is a slowpath clarity comes first anyway. We can reconsider inlining decisions after the FPU code has been cleaned up. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
11ad19277e
commit
81683cc827
|
@ -20,6 +20,7 @@ struct user_i387_struct;
|
||||||
|
|
||||||
extern int fpstate_alloc_init(struct task_struct *curr);
|
extern int fpstate_alloc_init(struct task_struct *curr);
|
||||||
extern void fpstate_init(struct fpu *fpu);
|
extern void fpstate_init(struct fpu *fpu);
|
||||||
|
extern void fpu__flush_thread(struct task_struct *tsk);
|
||||||
|
|
||||||
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
|
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
|
||||||
extern void math_state_restore(void);
|
extern void math_state_restore(void);
|
||||||
|
|
|
@ -227,6 +227,23 @@ static int fpu__unlazy_stopped(struct task_struct *child)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void fpu__flush_thread(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
if (!use_eager_fpu()) {
|
||||||
|
/* FPU state will be reallocated lazily at the first use. */
|
||||||
|
drop_fpu(tsk);
|
||||||
|
fpstate_free(&tsk->thread.fpu);
|
||||||
|
} else {
|
||||||
|
if (!tsk_used_math(tsk)) {
|
||||||
|
/* kthread execs. TODO: cleanup this horror. */
|
||||||
|
if (WARN_ON(fpstate_alloc_init(tsk)))
|
||||||
|
force_sig(SIGKILL, tsk);
|
||||||
|
user_fpu_begin();
|
||||||
|
}
|
||||||
|
restore_init_xstate();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The xstateregs_active() routine is the same as the fpregs_active() routine,
|
* The xstateregs_active() routine is the same as the fpregs_active() routine,
|
||||||
* as the "regset->n" for the xstate regset will be updated based on the feature
|
* as the "regset->n" for the xstate regset will be updated based on the feature
|
||||||
|
|
|
@ -146,19 +146,7 @@ void flush_thread(void)
|
||||||
flush_ptrace_hw_breakpoint(tsk);
|
flush_ptrace_hw_breakpoint(tsk);
|
||||||
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
|
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
|
||||||
|
|
||||||
if (!use_eager_fpu()) {
|
fpu__flush_thread(tsk);
|
||||||
/* FPU state will be reallocated lazily at the first use. */
|
|
||||||
drop_fpu(tsk);
|
|
||||||
fpstate_free(&tsk->thread.fpu);
|
|
||||||
} else {
|
|
||||||
if (!tsk_used_math(tsk)) {
|
|
||||||
/* kthread execs. TODO: cleanup this horror. */
|
|
||||||
if (WARN_ON(fpstate_alloc_init(tsk)))
|
|
||||||
force_sig(SIGKILL, tsk);
|
|
||||||
user_fpu_begin();
|
|
||||||
}
|
|
||||||
restore_init_xstate();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hard_disable_TSC(void)
|
static void hard_disable_TSC(void)
|
||||||
|
|
Loading…
Reference in New Issue