tracing: Kill the dead code in probe_sched_switch() and probe_sched_wakeup()
After the previous patch it is clear that "tracer_enabled" can never be true, we can remove the "if (tracer_enabled)" code in probe_sched_switch() and probe_sched_wakeup(). Plus we can obviously remove tracer_enabled, ctx_trace, and sched_stopped as well. Link: http://lkml.kernel.org/p/20140723193503.GA30217@redhat.com Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
632537256e
commit
458faf0b88
|
@ -14,12 +14,8 @@
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
static struct trace_array *ctx_trace;
|
|
||||||
static int __read_mostly tracer_enabled;
|
|
||||||
static int sched_ref;
|
static int sched_ref;
|
||||||
static DEFINE_MUTEX(sched_register_mutex);
|
static DEFINE_MUTEX(sched_register_mutex);
|
||||||
static int sched_stopped;
|
|
||||||
|
|
||||||
|
|
||||||
void
|
void
|
||||||
tracing_sched_switch_trace(struct trace_array *tr,
|
tracing_sched_switch_trace(struct trace_array *tr,
|
||||||
|
@ -52,29 +48,11 @@ tracing_sched_switch_trace(struct trace_array *tr,
|
||||||
static void
|
static void
|
||||||
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
|
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
|
||||||
{
|
{
|
||||||
struct trace_array_cpu *data;
|
|
||||||
unsigned long flags;
|
|
||||||
int cpu;
|
|
||||||
int pc;
|
|
||||||
|
|
||||||
if (unlikely(!sched_ref))
|
if (unlikely(!sched_ref))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tracing_record_cmdline(prev);
|
tracing_record_cmdline(prev);
|
||||||
tracing_record_cmdline(next);
|
tracing_record_cmdline(next);
|
||||||
|
|
||||||
if (!tracer_enabled || sched_stopped)
|
|
||||||
return;
|
|
||||||
|
|
||||||
pc = preempt_count();
|
|
||||||
local_irq_save(flags);
|
|
||||||
cpu = raw_smp_processor_id();
|
|
||||||
data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
|
|
||||||
|
|
||||||
if (likely(!atomic_read(&data->disabled)))
|
|
||||||
tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -108,28 +86,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||||
static void
|
static void
|
||||||
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
|
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
|
||||||
{
|
{
|
||||||
struct trace_array_cpu *data;
|
|
||||||
unsigned long flags;
|
|
||||||
int cpu, pc;
|
|
||||||
|
|
||||||
if (unlikely(!sched_ref))
|
if (unlikely(!sched_ref))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tracing_record_cmdline(current);
|
tracing_record_cmdline(current);
|
||||||
|
|
||||||
if (!tracer_enabled || sched_stopped)
|
|
||||||
return;
|
|
||||||
|
|
||||||
pc = preempt_count();
|
|
||||||
local_irq_save(flags);
|
|
||||||
cpu = raw_smp_processor_id();
|
|
||||||
data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
|
|
||||||
|
|
||||||
if (likely(!atomic_read(&data->disabled)))
|
|
||||||
tracing_sched_wakeup_trace(ctx_trace, wakee, current,
|
|
||||||
flags, pc);
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tracing_sched_register(void)
|
static int tracing_sched_register(void)
|
||||||
|
|
Loading…
Reference in New Issue