powerpc: Move part of giveup_vsx into c
Move the MSR modification into c. Removing it from the assembly function will allow us to avoid costly MSR writes by batching them up. Check the FP and VMX bits before calling the relevant giveup_*() function. This makes giveup_vsx() and flush_vsx_to_thread() perform more like their sister functions, and allows us to use flush_vsx_to_thread() in the signal code. Move the check_if_tm_restore_required() check in. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
98da581e08
commit
a7d623d4d0
|
@ -205,6 +205,25 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
|||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
void giveup_vsx(struct task_struct *tsk)
|
||||
{
|
||||
u64 oldmsr = mfmsr();
|
||||
u64 newmsr;
|
||||
|
||||
check_if_tm_restore_required(tsk);
|
||||
|
||||
newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
|
||||
if (oldmsr != newmsr)
|
||||
mtmsr_isync(newmsr);
|
||||
|
||||
if (tsk->thread.regs->msr & MSR_FP)
|
||||
__giveup_fpu(tsk);
|
||||
if (tsk->thread.regs->msr & MSR_VEC)
|
||||
__giveup_altivec(tsk);
|
||||
__giveup_vsx(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(giveup_vsx);
|
||||
|
||||
void enable_kernel_vsx(void)
|
||||
{
|
||||
WARN_ON(preemptible());
|
||||
|
@ -220,15 +239,6 @@ void enable_kernel_vsx(void)
|
|||
}
|
||||
EXPORT_SYMBOL(enable_kernel_vsx);
|
||||
|
||||
void giveup_vsx(struct task_struct *tsk)
|
||||
{
|
||||
check_if_tm_restore_required(tsk);
|
||||
giveup_fpu(tsk);
|
||||
giveup_altivec(tsk);
|
||||
__giveup_vsx(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(giveup_vsx);
|
||||
|
||||
void flush_vsx_to_thread(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.regs) {
|
||||
|
|
|
@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
|||
* contains valid data
|
||||
*/
|
||||
if (current->thread.used_vsr && ctx_has_vsx_region) {
|
||||
__giveup_vsx(current);
|
||||
flush_vsx_to_thread(current);
|
||||
if (copy_vsx_to_user(&frame->mc_vsregs, current))
|
||||
return 1;
|
||||
msr |= MSR_VSX;
|
||||
|
@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
|
|||
* contains valid data
|
||||
*/
|
||||
if (current->thread.used_vsr) {
|
||||
__giveup_vsx(current);
|
||||
flush_vsx_to_thread(current);
|
||||
if (copy_vsx_to_user(&frame->mc_vsregs, current))
|
||||
return 1;
|
||||
if (msr & MSR_VSX) {
|
||||
|
|
|
@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
|
|||
* VMX data.
|
||||
*/
|
||||
if (current->thread.used_vsr && ctx_has_vsx_region) {
|
||||
__giveup_vsx(current);
|
||||
flush_vsx_to_thread(current);
|
||||
v_regs += ELF_NVRREG;
|
||||
err |= copy_vsx_to_user(v_regs, current);
|
||||
/* set MSR_VSX in the MSR value in the frame to
|
||||
|
@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
|||
* VMX data.
|
||||
*/
|
||||
if (current->thread.used_vsr) {
|
||||
__giveup_vsx(current);
|
||||
flush_vsx_to_thread(current);
|
||||
v_regs += ELF_NVRREG;
|
||||
tm_v_regs += ELF_NVRREG;
|
||||
|
||||
|
|
|
@ -177,14 +177,8 @@ _GLOBAL(load_up_vsx)
|
|||
* __giveup_vsx(tsk)
|
||||
* Disable VSX for the task given as the argument.
|
||||
* Does NOT save vsx registers.
|
||||
* Enables the VSX for use in the kernel on return.
|
||||
*/
|
||||
_GLOBAL(__giveup_vsx)
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VSX@h
|
||||
mtmsrd r5 /* enable use of VSX now */
|
||||
isync
|
||||
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
ld r5,PT_REGS(r3)
|
||||
cmpdi 0,r5,0
|
||||
|
|
Loading…
Reference in New Issue