x86/fpu: Remove struct fpu::counter
With the lazy FPU code gone, we no longer use the counter field in struct fpu for anything. Get rid it. Signed-off-by: Rik van Riel <riel@redhat.com> Reviewed-by: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: pbonzini@redhat.com Link: http://lkml.kernel.org/r/1475627678-20788-6-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c592b57347
commit
3913cc3507
|
@ -581,16 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
|
|||
|
||||
/* Don't change CR0.TS if we just switch! */
|
||||
if (fpu.preload) {
|
||||
new_fpu->counter++;
|
||||
__fpregs_activate(new_fpu);
|
||||
trace_x86_fpu_regs_activated(new_fpu);
|
||||
prefetch(&new_fpu->state);
|
||||
}
|
||||
} else {
|
||||
old_fpu->counter = 0;
|
||||
old_fpu->last_cpu = -1;
|
||||
if (fpu.preload) {
|
||||
new_fpu->counter++;
|
||||
if (fpu_want_lazy_restore(new_fpu, cpu))
|
||||
fpu.preload = 0;
|
||||
else
|
||||
|
|
|
@ -321,17 +321,6 @@ struct fpu {
|
|||
*/
|
||||
unsigned char fpregs_active;
|
||||
|
||||
/*
|
||||
* @counter:
|
||||
*
|
||||
* This counter contains the number of consecutive context switches
|
||||
* during which the FPU stays used. If this is over a threshold, the
|
||||
* lazy FPU restore logic becomes eager, to save the trap overhead.
|
||||
* This is an unsigned char so that after 256 iterations the counter
|
||||
* wraps and the context switch behavior turns lazy again; this is to
|
||||
* deal with bursty apps that only use the FPU for a short time:
|
||||
*/
|
||||
unsigned char counter;
|
||||
/*
|
||||
* @state:
|
||||
*
|
||||
|
|
|
@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
|
|||
__field(struct fpu *, fpu)
|
||||
__field(bool, fpregs_active)
|
||||
__field(bool, fpstate_active)
|
||||
__field(int, counter)
|
||||
__field(u64, xfeatures)
|
||||
__field(u64, xcomp_bv)
|
||||
),
|
||||
|
@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
|
|||
__entry->fpu = fpu;
|
||||
__entry->fpregs_active = fpu->fpregs_active;
|
||||
__entry->fpstate_active = fpu->fpstate_active;
|
||||
__entry->counter = fpu->counter;
|
||||
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
|
||||
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
|
||||
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
|
||||
}
|
||||
),
|
||||
TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
|
||||
TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
|
||||
__entry->fpu,
|
||||
__entry->fpregs_active,
|
||||
__entry->fpstate_active,
|
||||
__entry->counter,
|
||||
__entry->xfeatures,
|
||||
__entry->xcomp_bv
|
||||
)
|
||||
|
|
|
@ -222,7 +222,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
|
|||
|
||||
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
||||
{
|
||||
dst_fpu->counter = 0;
|
||||
dst_fpu->fpregs_active = 0;
|
||||
dst_fpu->last_cpu = -1;
|
||||
|
||||
|
@ -430,7 +429,6 @@ void fpu__restore(struct fpu *fpu)
|
|||
trace_x86_fpu_before_restore(fpu);
|
||||
fpregs_activate(fpu);
|
||||
copy_kernel_to_fpregs(&fpu->state);
|
||||
fpu->counter++;
|
||||
trace_x86_fpu_after_restore(fpu);
|
||||
kernel_fpu_enable();
|
||||
}
|
||||
|
@ -448,7 +446,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
|
|||
void fpu__drop(struct fpu *fpu)
|
||||
{
|
||||
preempt_disable();
|
||||
fpu->counter = 0;
|
||||
|
||||
if (fpu->fpregs_active) {
|
||||
/* Ignore delayed exceptions from user space */
|
||||
|
|
Loading…
Reference in New Issue