tracing: Move tracing_sched_{switch,wakeup}() into wakeup tracer
The only code that references tracing_sched_switch_trace() and tracing_sched_wakeup_trace() is the wakeup latency tracer. Those two functions use to belong to the sched_switch tracer which has long been removed. These functions were left behind because the wakeup latency tracer used them. But since the wakeup latency tracer is the only one to use them, they should be static functions inside that code. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
458faf0b88
commit
243f7610a6
|
@ -569,15 +569,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
|
||||||
|
|
||||||
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
|
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
|
||||||
|
|
||||||
void tracing_sched_switch_trace(struct trace_array *tr,
|
|
||||||
struct task_struct *prev,
|
|
||||||
struct task_struct *next,
|
|
||||||
unsigned long flags, int pc);
|
|
||||||
|
|
||||||
void tracing_sched_wakeup_trace(struct trace_array *tr,
|
|
||||||
struct task_struct *wakee,
|
|
||||||
struct task_struct *cur,
|
|
||||||
unsigned long flags, int pc);
|
|
||||||
void trace_function(struct trace_array *tr,
|
void trace_function(struct trace_array *tr,
|
||||||
unsigned long ip,
|
unsigned long ip,
|
||||||
unsigned long parent_ip,
|
unsigned long parent_ip,
|
||||||
|
|
|
@ -17,34 +17,6 @@
|
||||||
static int sched_ref;
|
static int sched_ref;
|
||||||
static DEFINE_MUTEX(sched_register_mutex);
|
static DEFINE_MUTEX(sched_register_mutex);
|
||||||
|
|
||||||
void
|
|
||||||
tracing_sched_switch_trace(struct trace_array *tr,
|
|
||||||
struct task_struct *prev,
|
|
||||||
struct task_struct *next,
|
|
||||||
unsigned long flags, int pc)
|
|
||||||
{
|
|
||||||
struct ftrace_event_call *call = &event_context_switch;
|
|
||||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
|
||||||
struct ring_buffer_event *event;
|
|
||||||
struct ctx_switch_entry *entry;
|
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
|
|
||||||
sizeof(*entry), flags, pc);
|
|
||||||
if (!event)
|
|
||||||
return;
|
|
||||||
entry = ring_buffer_event_data(event);
|
|
||||||
entry->prev_pid = prev->pid;
|
|
||||||
entry->prev_prio = prev->prio;
|
|
||||||
entry->prev_state = prev->state;
|
|
||||||
entry->next_pid = next->pid;
|
|
||||||
entry->next_prio = next->prio;
|
|
||||||
entry->next_state = next->state;
|
|
||||||
entry->next_cpu = task_cpu(next);
|
|
||||||
|
|
||||||
if (!call_filter_check_discard(call, entry, buffer, event))
|
|
||||||
trace_buffer_unlock_commit(buffer, event, flags, pc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
|
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
|
||||||
{
|
{
|
||||||
|
@ -55,34 +27,6 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
|
||||||
tracing_record_cmdline(next);
|
tracing_record_cmdline(next);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
tracing_sched_wakeup_trace(struct trace_array *tr,
|
|
||||||
struct task_struct *wakee,
|
|
||||||
struct task_struct *curr,
|
|
||||||
unsigned long flags, int pc)
|
|
||||||
{
|
|
||||||
struct ftrace_event_call *call = &event_wakeup;
|
|
||||||
struct ring_buffer_event *event;
|
|
||||||
struct ctx_switch_entry *entry;
|
|
||||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
|
|
||||||
sizeof(*entry), flags, pc);
|
|
||||||
if (!event)
|
|
||||||
return;
|
|
||||||
entry = ring_buffer_event_data(event);
|
|
||||||
entry->prev_pid = curr->pid;
|
|
||||||
entry->prev_prio = curr->prio;
|
|
||||||
entry->prev_state = curr->state;
|
|
||||||
entry->next_pid = wakee->pid;
|
|
||||||
entry->next_prio = wakee->prio;
|
|
||||||
entry->next_state = wakee->state;
|
|
||||||
entry->next_cpu = task_cpu(wakee);
|
|
||||||
|
|
||||||
if (!call_filter_check_discard(call, entry, buffer, event))
|
|
||||||
trace_buffer_unlock_commit(buffer, event, flags, pc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
|
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
|
||||||
{
|
{
|
||||||
|
|
|
@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
|
||||||
wakeup_current_cpu = cpu;
|
wakeup_current_cpu = cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
tracing_sched_switch_trace(struct trace_array *tr,
|
||||||
|
struct task_struct *prev,
|
||||||
|
struct task_struct *next,
|
||||||
|
unsigned long flags, int pc)
|
||||||
|
{
|
||||||
|
struct ftrace_event_call *call = &event_context_switch;
|
||||||
|
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||||
|
struct ring_buffer_event *event;
|
||||||
|
struct ctx_switch_entry *entry;
|
||||||
|
|
||||||
|
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
|
||||||
|
sizeof(*entry), flags, pc);
|
||||||
|
if (!event)
|
||||||
|
return;
|
||||||
|
entry = ring_buffer_event_data(event);
|
||||||
|
entry->prev_pid = prev->pid;
|
||||||
|
entry->prev_prio = prev->prio;
|
||||||
|
entry->prev_state = prev->state;
|
||||||
|
entry->next_pid = next->pid;
|
||||||
|
entry->next_prio = next->prio;
|
||||||
|
entry->next_state = next->state;
|
||||||
|
entry->next_cpu = task_cpu(next);
|
||||||
|
|
||||||
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
||||||
|
trace_buffer_unlock_commit(buffer, event, flags, pc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||||
|
struct task_struct *wakee,
|
||||||
|
struct task_struct *curr,
|
||||||
|
unsigned long flags, int pc)
|
||||||
|
{
|
||||||
|
struct ftrace_event_call *call = &event_wakeup;
|
||||||
|
struct ring_buffer_event *event;
|
||||||
|
struct ctx_switch_entry *entry;
|
||||||
|
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||||
|
|
||||||
|
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
|
||||||
|
sizeof(*entry), flags, pc);
|
||||||
|
if (!event)
|
||||||
|
return;
|
||||||
|
entry = ring_buffer_event_data(event);
|
||||||
|
entry->prev_pid = curr->pid;
|
||||||
|
entry->prev_prio = curr->prio;
|
||||||
|
entry->prev_state = curr->state;
|
||||||
|
entry->next_pid = wakee->pid;
|
||||||
|
entry->next_prio = wakee->prio;
|
||||||
|
entry->next_state = wakee->state;
|
||||||
|
entry->next_cpu = task_cpu(wakee);
|
||||||
|
|
||||||
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
||||||
|
trace_buffer_unlock_commit(buffer, event, flags, pc);
|
||||||
|
}
|
||||||
|
|
||||||
static void notrace
|
static void notrace
|
||||||
probe_wakeup_sched_switch(void *ignore,
|
probe_wakeup_sched_switch(void *ignore,
|
||||||
struct task_struct *prev, struct task_struct *next)
|
struct task_struct *prev, struct task_struct *next)
|
||||||
|
|
Loading…
Reference in New Issue