powerpc: Create mtmsrd_isync()
mtmsrd_isync() will do an mtmsrd followed by an isync on older processors. On newer processors we avoid the isync via a feature fixup. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
b86fd2bd03
commit
611b0e5c19
|
@ -1193,12 +1193,20 @@
|
||||||
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
|
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
|
||||||
: : "r" (v) : "memory")
|
: : "r" (v) : "memory")
|
||||||
#define mtmsr(v) __mtmsrd((v), 0)
|
#define mtmsr(v) __mtmsrd((v), 0)
|
||||||
|
#define __MTMSR "mtmsrd"
|
||||||
#else
|
#else
|
||||||
#define mtmsr(v) asm volatile("mtmsr %0" : \
|
#define mtmsr(v) asm volatile("mtmsr %0" : \
|
||||||
: "r" ((unsigned long)(v)) \
|
: "r" ((unsigned long)(v)) \
|
||||||
: "memory")
|
: "memory")
|
||||||
|
#define __MTMSR "mtmsr"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline void mtmsr_isync(unsigned long val)
|
||||||
|
{
|
||||||
|
asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : :
|
||||||
|
"r" (val), "i" (CPU_FTR_ARCH_206) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
#define mfspr(rn) ({unsigned long rval; \
|
#define mfspr(rn) ({unsigned long rval; \
|
||||||
asm volatile("mfspr %0," __stringify(rn) \
|
asm volatile("mfspr %0," __stringify(rn) \
|
||||||
: "=r" (rval)); rval;})
|
: "=r" (rval)); rval;})
|
||||||
|
|
|
@ -130,7 +130,10 @@ void enable_kernel_fp(void)
|
||||||
check_if_tm_restore_required(current);
|
check_if_tm_restore_required(current);
|
||||||
giveup_fpu(current);
|
giveup_fpu(current);
|
||||||
} else {
|
} else {
|
||||||
giveup_fpu(NULL); /* just enables FP for kernel */
|
u64 oldmsr = mfmsr();
|
||||||
|
|
||||||
|
if (!(oldmsr & MSR_FP))
|
||||||
|
mtmsr_isync(oldmsr | MSR_FP);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(enable_kernel_fp);
|
EXPORT_SYMBOL(enable_kernel_fp);
|
||||||
|
@ -144,7 +147,10 @@ void enable_kernel_altivec(void)
|
||||||
check_if_tm_restore_required(current);
|
check_if_tm_restore_required(current);
|
||||||
giveup_altivec(current);
|
giveup_altivec(current);
|
||||||
} else {
|
} else {
|
||||||
giveup_altivec_notask();
|
u64 oldmsr = mfmsr();
|
||||||
|
|
||||||
|
if (!(oldmsr & MSR_VEC))
|
||||||
|
mtmsr_isync(oldmsr | MSR_VEC);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(enable_kernel_altivec);
|
EXPORT_SYMBOL(enable_kernel_altivec);
|
||||||
|
@ -173,10 +179,14 @@ void enable_kernel_vsx(void)
|
||||||
{
|
{
|
||||||
WARN_ON(preemptible());
|
WARN_ON(preemptible());
|
||||||
|
|
||||||
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
|
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
|
||||||
giveup_vsx(current);
|
giveup_vsx(current);
|
||||||
else
|
} else {
|
||||||
giveup_vsx(NULL); /* just enable vsx for kernel - force */
|
u64 oldmsr = mfmsr();
|
||||||
|
|
||||||
|
if (!(oldmsr & MSR_VSX))
|
||||||
|
mtmsr_isync(oldmsr | MSR_VSX);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(enable_kernel_vsx);
|
EXPORT_SYMBOL(enable_kernel_vsx);
|
||||||
|
|
||||||
|
@ -209,10 +219,14 @@ void enable_kernel_spe(void)
|
||||||
{
|
{
|
||||||
WARN_ON(preemptible());
|
WARN_ON(preemptible());
|
||||||
|
|
||||||
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
|
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
|
||||||
giveup_spe(current);
|
giveup_spe(current);
|
||||||
else
|
} else {
|
||||||
giveup_spe(NULL); /* just enable SPE for kernel - force */
|
u64 oldmsr = mfmsr();
|
||||||
|
|
||||||
|
if (!(oldmsr & MSR_SPE))
|
||||||
|
mtmsr_isync(oldmsr | MSR_SPE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(enable_kernel_spe);
|
EXPORT_SYMBOL(enable_kernel_spe);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue