tracing: Inject seq_print_userip_objs() into its only user
seq_print_userip_objs() is used only in one location, in one file. Instead of having it as an external function, go one further than making it static, but inject is code into its only user. It doesn't make the calling function much more complex. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
ca475e831f
commit
6b1032d53c
|
@ -354,50 +354,6 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
|
|||
return !trace_seq_has_overflowed(s);
|
||||
}
|
||||
|
||||
int
|
||||
seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
|
||||
unsigned long sym_flags)
|
||||
{
|
||||
struct mm_struct *mm = NULL;
|
||||
unsigned int i;
|
||||
|
||||
if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
|
||||
struct task_struct *task;
|
||||
/*
|
||||
* we do the lookup on the thread group leader,
|
||||
* since individual threads might have already quit!
|
||||
*/
|
||||
rcu_read_lock();
|
||||
task = find_task_by_vpid(entry->tgid);
|
||||
if (task)
|
||||
mm = get_task_mm(task);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
|
||||
unsigned long ip = entry->caller[i];
|
||||
|
||||
if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
|
||||
break;
|
||||
|
||||
trace_seq_puts(s, " => ");
|
||||
|
||||
if (!ip) {
|
||||
trace_seq_puts(s, "??");
|
||||
trace_seq_putc(s, '\n');
|
||||
continue;
|
||||
}
|
||||
|
||||
seq_print_user_ip(s, mm, ip, sym_flags);
|
||||
trace_seq_putc(s, '\n');
|
||||
}
|
||||
|
||||
if (mm)
|
||||
mmput(mm);
|
||||
|
||||
return !trace_seq_has_overflowed(s);
|
||||
}
|
||||
|
||||
int
|
||||
seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
|
||||
{
|
||||
|
@ -1081,11 +1037,46 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
|
|||
{
|
||||
struct userstack_entry *field;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct mm_struct *mm = NULL;
|
||||
unsigned int i;
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
|
||||
trace_seq_puts(s, "<user stack trace>\n");
|
||||
seq_print_userip_objs(field, s, flags);
|
||||
|
||||
if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
|
||||
struct task_struct *task;
|
||||
/*
|
||||
* we do the lookup on the thread group leader,
|
||||
* since individual threads might have already quit!
|
||||
*/
|
||||
rcu_read_lock();
|
||||
task = find_task_by_vpid(field->tgid);
|
||||
if (task)
|
||||
mm = get_task_mm(task);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
|
||||
unsigned long ip = field->caller[i];
|
||||
|
||||
if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
|
||||
break;
|
||||
|
||||
trace_seq_puts(s, " => ");
|
||||
|
||||
if (!ip) {
|
||||
trace_seq_puts(s, "??");
|
||||
trace_seq_putc(s, '\n');
|
||||
continue;
|
||||
}
|
||||
|
||||
seq_print_user_ip(s, mm, ip, flags);
|
||||
trace_seq_putc(s, '\n');
|
||||
}
|
||||
|
||||
if (mm)
|
||||
mmput(mm);
|
||||
|
||||
return trace_handle_return(s);
|
||||
}
|
||||
|
|
|
@ -14,8 +14,6 @@ trace_print_printk_msg_only(struct trace_iterator *iter);
|
|||
extern int
|
||||
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
|
||||
unsigned long sym_flags);
|
||||
extern int seq_print_userip_objs(const struct userstack_entry *entry,
|
||||
struct trace_seq *s, unsigned long sym_flags);
|
||||
extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
|
||||
unsigned long ip, unsigned long sym_flags);
|
||||
|
||||
|
|
Loading…
Reference in New Issue