tracing: Rename ftrace_event_file to trace_event_file
The name "ftrace" really refers to the function hook infrastructure. It is not about the trace_events. The structure ftrace_event_file is really about trace events and not "ftrace". Rename it to trace_event_file. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
9023c93090
commit
7f1d2f8210
|
@ -157,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s)
|
|||
void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned long flags,
|
||||
int pc);
|
||||
struct ftrace_event_file;
|
||||
struct trace_event_file;
|
||||
|
||||
struct ring_buffer_event *
|
||||
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
|
||||
struct ftrace_event_file *ftrace_file,
|
||||
struct trace_event_file *trace_file,
|
||||
int type, unsigned long len,
|
||||
unsigned long flags, int pc);
|
||||
struct ring_buffer_event *
|
||||
|
@ -222,14 +222,14 @@ extern int trace_event_reg(struct ftrace_event_call *event,
|
|||
struct ftrace_event_buffer {
|
||||
struct ring_buffer *buffer;
|
||||
struct ring_buffer_event *event;
|
||||
struct ftrace_event_file *ftrace_file;
|
||||
struct trace_event_file *trace_file;
|
||||
void *entry;
|
||||
unsigned long flags;
|
||||
int pc;
|
||||
};
|
||||
|
||||
void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
|
||||
struct ftrace_event_file *ftrace_file,
|
||||
struct trace_event_file *trace_file,
|
||||
unsigned long len);
|
||||
|
||||
void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
|
||||
|
@ -349,7 +349,7 @@ enum {
|
|||
FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
|
||||
};
|
||||
|
||||
struct ftrace_event_file {
|
||||
struct trace_event_file {
|
||||
struct list_head list;
|
||||
struct ftrace_event_call *event_call;
|
||||
struct event_filter *filter;
|
||||
|
@ -414,15 +414,15 @@ enum event_trigger_type {
|
|||
|
||||
extern int filter_match_preds(struct event_filter *filter, void *rec);
|
||||
|
||||
extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
|
||||
extern int filter_check_discard(struct trace_event_file *file, void *rec,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event);
|
||||
extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event);
|
||||
extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
|
||||
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
|
||||
void *rec);
|
||||
extern void event_triggers_post_call(struct ftrace_event_file *file,
|
||||
extern void event_triggers_post_call(struct trace_event_file *file,
|
||||
enum event_trigger_type tt);
|
||||
|
||||
/**
|
||||
|
@ -435,7 +435,7 @@ extern void event_triggers_post_call(struct ftrace_event_file *file,
|
|||
* otherwise false.
|
||||
*/
|
||||
static inline bool
|
||||
ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
|
||||
ftrace_trigger_soft_disabled(struct trace_event_file *file)
|
||||
{
|
||||
unsigned long eflags = file->flags;
|
||||
|
||||
|
@ -462,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
|
|||
* Returns true if the event is discarded, false otherwise.
|
||||
*/
|
||||
static inline bool
|
||||
__event_trigger_test_discard(struct ftrace_event_file *file,
|
||||
__event_trigger_test_discard(struct trace_event_file *file,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
void *entry,
|
||||
|
@ -495,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
|
|||
* if the event is soft disabled and should be discarded.
|
||||
*/
|
||||
static inline void
|
||||
event_trigger_unlock_commit(struct ftrace_event_file *file,
|
||||
event_trigger_unlock_commit(struct trace_event_file *file,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
void *entry, unsigned long irq_flags, int pc)
|
||||
|
@ -526,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
|
|||
* trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
|
||||
*/
|
||||
static inline void
|
||||
event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
|
||||
event_trigger_unlock_commit_regs(struct trace_event_file *file,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
void *entry, unsigned long irq_flags, int pc,
|
||||
|
|
|
@ -9,10 +9,10 @@
|
|||
*
|
||||
* static void ftrace_raw_event_<call>(void *__data, proto)
|
||||
* {
|
||||
* struct ftrace_event_file *ftrace_file = __data;
|
||||
* struct ftrace_event_call *event_call = ftrace_file->event_call;
|
||||
* struct trace_event_file *trace_file = __data;
|
||||
* struct ftrace_event_call *event_call = trace_file->event_call;
|
||||
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
|
||||
* unsigned long eflags = ftrace_file->flags;
|
||||
* unsigned long eflags = trace_file->flags;
|
||||
* enum event_trigger_type __tt = ETT_NONE;
|
||||
* struct ring_buffer_event *event;
|
||||
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
|
||||
|
@ -23,7 +23,7 @@
|
|||
*
|
||||
* if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
|
||||
* if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
|
||||
* event_triggers_call(ftrace_file, NULL);
|
||||
* event_triggers_call(trace_file, NULL);
|
||||
* if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
|
||||
* return;
|
||||
* }
|
||||
|
@ -33,7 +33,7 @@
|
|||
*
|
||||
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
|
||||
*
|
||||
* event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
|
||||
* event = trace_event_buffer_lock_reserve(&buffer, trace_file,
|
||||
* event_<call>->event.type,
|
||||
* sizeof(*entry) + __data_size,
|
||||
* irq_flags, pc);
|
||||
|
@ -45,16 +45,16 @@
|
|||
* __array macros.
|
||||
*
|
||||
* if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
|
||||
* __tt = event_triggers_call(ftrace_file, entry);
|
||||
* __tt = event_triggers_call(trace_file, entry);
|
||||
*
|
||||
* if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
|
||||
* &ftrace_file->flags))
|
||||
* &trace_file->flags))
|
||||
* ring_buffer_discard_commit(buffer, event);
|
||||
* else if (!filter_check_discard(ftrace_file, entry, buffer, event))
|
||||
* else if (!filter_check_discard(trace_file, entry, buffer, event))
|
||||
* trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
*
|
||||
* if (__tt)
|
||||
* event_triggers_post_call(ftrace_file, __tt);
|
||||
* event_triggers_post_call(trace_file, __tt);
|
||||
* }
|
||||
*
|
||||
* static struct trace_event ftrace_event_type_<call> = {
|
||||
|
@ -153,18 +153,18 @@
|
|||
static notrace void \
|
||||
ftrace_raw_event_##call(void *__data, proto) \
|
||||
{ \
|
||||
struct ftrace_event_file *ftrace_file = __data; \
|
||||
struct trace_event_file *trace_file = __data; \
|
||||
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
||||
struct ftrace_event_buffer fbuffer; \
|
||||
struct ftrace_raw_##call *entry; \
|
||||
int __data_size; \
|
||||
\
|
||||
if (ftrace_trigger_soft_disabled(ftrace_file)) \
|
||||
if (ftrace_trigger_soft_disabled(trace_file)) \
|
||||
return; \
|
||||
\
|
||||
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
||||
\
|
||||
entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
|
||||
entry = ftrace_event_buffer_reserve(&fbuffer, trace_file, \
|
||||
sizeof(*entry) + __data_size); \
|
||||
\
|
||||
if (!entry) \
|
||||
|
|
|
@ -297,7 +297,7 @@ void trace_array_put(struct trace_array *this_tr)
|
|||
mutex_unlock(&trace_types_lock);
|
||||
}
|
||||
|
||||
int filter_check_discard(struct ftrace_event_file *file, void *rec,
|
||||
int filter_check_discard(struct trace_event_file *file, void *rec,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event)
|
||||
{
|
||||
|
@ -1694,13 +1694,13 @@ static struct ring_buffer *temp_buffer;
|
|||
|
||||
struct ring_buffer_event *
|
||||
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
|
||||
struct ftrace_event_file *ftrace_file,
|
||||
struct trace_event_file *trace_file,
|
||||
int type, unsigned long len,
|
||||
unsigned long flags, int pc)
|
||||
{
|
||||
struct ring_buffer_event *entry;
|
||||
|
||||
*current_rb = ftrace_file->tr->trace_buffer.buffer;
|
||||
*current_rb = trace_file->tr->trace_buffer.buffer;
|
||||
entry = trace_buffer_lock_reserve(*current_rb,
|
||||
type, len, flags, pc);
|
||||
/*
|
||||
|
@ -1709,7 +1709,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
|
|||
* to store the trace event for the tigger to use. It's recusive
|
||||
* safe and will not be recorded anywhere.
|
||||
*/
|
||||
if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
|
||||
if (!entry && trace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
|
||||
*current_rb = temp_buffer;
|
||||
entry = trace_buffer_lock_reserve(*current_rb,
|
||||
type, len, flags, pc);
|
||||
|
|
|
@ -211,8 +211,8 @@ struct trace_array {
|
|||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
int sys_refcount_enter;
|
||||
int sys_refcount_exit;
|
||||
struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
|
||||
struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
|
||||
struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
|
||||
struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
|
||||
#endif
|
||||
int stop_count;
|
||||
int clock_id;
|
||||
|
@ -1052,9 +1052,9 @@ struct filter_pred {
|
|||
|
||||
extern enum regex_type
|
||||
filter_parse_regex(char *buff, int len, char **search, int *not);
|
||||
extern void print_event_filter(struct ftrace_event_file *file,
|
||||
extern void print_event_filter(struct trace_event_file *file,
|
||||
struct trace_seq *s);
|
||||
extern int apply_event_filter(struct ftrace_event_file *file,
|
||||
extern int apply_event_filter(struct trace_event_file *file,
|
||||
char *filter_string);
|
||||
extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
|
||||
char *filter_string);
|
||||
|
@ -1073,9 +1073,9 @@ extern void trace_event_enable_cmd_record(bool enable);
|
|||
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
|
||||
extern int event_trace_del_tracer(struct trace_array *tr);
|
||||
|
||||
extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
|
||||
const char *system,
|
||||
const char *event);
|
||||
extern struct trace_event_file *find_event_file(struct trace_array *tr,
|
||||
const char *system,
|
||||
const char *event);
|
||||
|
||||
static inline void *event_file_data(struct file *filp)
|
||||
{
|
||||
|
@ -1242,23 +1242,23 @@ struct event_command {
|
|||
enum event_trigger_type trigger_type;
|
||||
bool post_trigger;
|
||||
int (*func)(struct event_command *cmd_ops,
|
||||
struct ftrace_event_file *file,
|
||||
struct trace_event_file *file,
|
||||
char *glob, char *cmd, char *params);
|
||||
int (*reg)(char *glob,
|
||||
struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data,
|
||||
struct ftrace_event_file *file);
|
||||
struct trace_event_file *file);
|
||||
void (*unreg)(char *glob,
|
||||
struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data,
|
||||
struct ftrace_event_file *file);
|
||||
struct trace_event_file *file);
|
||||
int (*set_filter)(char *filter_str,
|
||||
struct event_trigger_data *data,
|
||||
struct ftrace_event_file *file);
|
||||
struct trace_event_file *file);
|
||||
struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
|
||||
};
|
||||
|
||||
extern int trace_event_enable_disable(struct ftrace_event_file *file,
|
||||
extern int trace_event_enable_disable(struct trace_event_file *file,
|
||||
int enable, int soft_disable);
|
||||
extern int tracing_alloc_snapshot(void);
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ static int system_refcount_dec(struct event_subsystem *system)
|
|||
|
||||
#define do_for_each_event_file_safe(tr, file) \
|
||||
list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
|
||||
struct ftrace_event_file *___n; \
|
||||
struct trace_event_file *___n; \
|
||||
list_for_each_entry_safe(file, ___n, &tr->events, list)
|
||||
|
||||
#define while_for_each_event_file() \
|
||||
|
@ -191,17 +191,17 @@ int trace_event_raw_init(struct ftrace_event_call *call)
|
|||
EXPORT_SYMBOL_GPL(trace_event_raw_init);
|
||||
|
||||
void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
|
||||
struct ftrace_event_file *ftrace_file,
|
||||
struct trace_event_file *trace_file,
|
||||
unsigned long len)
|
||||
{
|
||||
struct ftrace_event_call *event_call = ftrace_file->event_call;
|
||||
struct ftrace_event_call *event_call = trace_file->event_call;
|
||||
|
||||
local_save_flags(fbuffer->flags);
|
||||
fbuffer->pc = preempt_count();
|
||||
fbuffer->ftrace_file = ftrace_file;
|
||||
fbuffer->trace_file = trace_file;
|
||||
|
||||
fbuffer->event =
|
||||
trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
|
||||
trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
|
||||
event_call->event.type, len,
|
||||
fbuffer->flags, fbuffer->pc);
|
||||
if (!fbuffer->event)
|
||||
|
@ -224,12 +224,12 @@ static void output_printk(struct ftrace_event_buffer *fbuffer)
|
|||
if (!iter)
|
||||
return;
|
||||
|
||||
event_call = fbuffer->ftrace_file->event_call;
|
||||
event_call = fbuffer->trace_file->event_call;
|
||||
if (!event_call || !event_call->event.funcs ||
|
||||
!event_call->event.funcs->trace)
|
||||
return;
|
||||
|
||||
event = &fbuffer->ftrace_file->event_call->event;
|
||||
event = &fbuffer->trace_file->event_call->event;
|
||||
|
||||
spin_lock_irqsave(&tracepoint_iter_lock, flags);
|
||||
trace_seq_init(&iter->seq);
|
||||
|
@ -246,7 +246,7 @@ void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
|
|||
if (tracepoint_printk)
|
||||
output_printk(fbuffer);
|
||||
|
||||
event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
|
||||
event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
|
||||
fbuffer->event, fbuffer->entry,
|
||||
fbuffer->flags, fbuffer->pc);
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
|
|||
int trace_event_reg(struct ftrace_event_call *call,
|
||||
enum trace_reg type, void *data)
|
||||
{
|
||||
struct ftrace_event_file *file = data;
|
||||
struct trace_event_file *file = data;
|
||||
|
||||
WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
|
||||
switch (type) {
|
||||
|
@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(trace_event_reg);
|
|||
|
||||
void trace_event_enable_cmd_record(bool enable)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct trace_array *tr;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
|
@ -312,7 +312,7 @@ void trace_event_enable_cmd_record(bool enable)
|
|||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
|
||||
static int __ftrace_event_enable_disable(struct trace_event_file *file,
|
||||
int enable, int soft_disable)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
@ -401,13 +401,13 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int trace_event_enable_disable(struct ftrace_event_file *file,
|
||||
int trace_event_enable_disable(struct trace_event_file *file,
|
||||
int enable, int soft_disable)
|
||||
{
|
||||
return __ftrace_event_enable_disable(file, enable, soft_disable);
|
||||
}
|
||||
|
||||
static int ftrace_event_enable_disable(struct ftrace_event_file *file,
|
||||
static int ftrace_event_enable_disable(struct trace_event_file *file,
|
||||
int enable)
|
||||
{
|
||||
return __ftrace_event_enable_disable(file, enable, 0);
|
||||
|
@ -415,7 +415,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file,
|
|||
|
||||
static void ftrace_clear_events(struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
list_for_each_entry(file, &tr->events, list) {
|
||||
|
@ -486,7 +486,7 @@ static void remove_subsystem(struct ftrace_subsystem_dir *dir)
|
|||
}
|
||||
}
|
||||
|
||||
static void remove_event_file_dir(struct ftrace_event_file *file)
|
||||
static void remove_event_file_dir(struct trace_event_file *file)
|
||||
{
|
||||
struct dentry *dir = file->dir;
|
||||
struct dentry *child;
|
||||
|
@ -515,7 +515,7 @@ static int
|
|||
__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
|
||||
const char *sub, const char *event, int set)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct ftrace_event_call *call;
|
||||
const char *name;
|
||||
int ret = -EINVAL;
|
||||
|
@ -671,7 +671,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
|
|||
static void *
|
||||
t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_file *file = v;
|
||||
struct trace_event_file *file = v;
|
||||
struct ftrace_event_call *call;
|
||||
struct trace_array *tr = m->private;
|
||||
|
||||
|
@ -692,13 +692,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
|||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct trace_array *tr = m->private;
|
||||
loff_t l;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
|
||||
file = list_entry(&tr->events, struct ftrace_event_file, list);
|
||||
file = list_entry(&tr->events, struct trace_event_file, list);
|
||||
for (l = 0; l <= *pos; ) {
|
||||
file = t_next(m, file, &l);
|
||||
if (!file)
|
||||
|
@ -710,7 +710,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
|||
static void *
|
||||
s_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_file *file = v;
|
||||
struct trace_event_file *file = v;
|
||||
struct trace_array *tr = m->private;
|
||||
|
||||
(*pos)++;
|
||||
|
@ -725,13 +725,13 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
|
|||
|
||||
static void *s_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct trace_array *tr = m->private;
|
||||
loff_t l;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
|
||||
file = list_entry(&tr->events, struct ftrace_event_file, list);
|
||||
file = list_entry(&tr->events, struct trace_event_file, list);
|
||||
for (l = 0; l <= *pos; ) {
|
||||
file = s_next(m, file, &l);
|
||||
if (!file)
|
||||
|
@ -742,7 +742,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
|||
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ftrace_event_file *file = v;
|
||||
struct trace_event_file *file = v;
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
|
||||
|
@ -761,7 +761,7 @@ static ssize_t
|
|||
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
unsigned long flags;
|
||||
char buf[4] = "0";
|
||||
|
||||
|
@ -791,7 +791,7 @@ static ssize_t
|
|||
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
|
@ -831,7 +831,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|||
struct ftrace_subsystem_dir *dir = filp->private_data;
|
||||
struct event_subsystem *system = dir->subsystem;
|
||||
struct ftrace_event_call *call;
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct trace_array *tr = dir->tr;
|
||||
char buf[2];
|
||||
int set = 0;
|
||||
|
@ -1062,7 +1062,7 @@ static ssize_t
|
|||
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct trace_seq *s;
|
||||
int r = -ENODEV;
|
||||
|
||||
|
@ -1095,7 +1095,7 @@ static ssize_t
|
|||
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
char *buf;
|
||||
int err = -ENODEV;
|
||||
|
||||
|
@ -1497,7 +1497,7 @@ create_new_subsystem(const char *name)
|
|||
|
||||
static struct dentry *
|
||||
event_subsystem_dir(struct trace_array *tr, const char *name,
|
||||
struct ftrace_event_file *file, struct dentry *parent)
|
||||
struct trace_event_file *file, struct dentry *parent)
|
||||
{
|
||||
struct ftrace_subsystem_dir *dir;
|
||||
struct event_subsystem *system;
|
||||
|
@ -1571,7 +1571,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
|
|||
}
|
||||
|
||||
static int
|
||||
event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
|
||||
event_create_dir(struct dentry *parent, struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
struct trace_array *tr = file->tr;
|
||||
|
@ -1636,7 +1636,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
|
|||
|
||||
static void remove_event_from_tracers(struct ftrace_event_call *call)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct trace_array *tr;
|
||||
|
||||
do_for_each_event_file_safe(tr, file) {
|
||||
|
@ -1657,7 +1657,7 @@ static void remove_event_from_tracers(struct ftrace_event_call *call)
|
|||
static void event_remove(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_array *tr;
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
do_for_each_event_file(tr, file) {
|
||||
if (file->event_call != call)
|
||||
|
@ -1836,11 +1836,11 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
|
|||
up_write(&trace_event_sem);
|
||||
}
|
||||
|
||||
static struct ftrace_event_file *
|
||||
static struct trace_event_file *
|
||||
trace_create_new_event(struct ftrace_event_call *call,
|
||||
struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
|
||||
if (!file)
|
||||
|
@ -1860,7 +1860,7 @@ trace_create_new_event(struct ftrace_event_call *call,
|
|||
static int
|
||||
__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
file = trace_create_new_event(call, tr);
|
||||
if (!file)
|
||||
|
@ -1878,7 +1878,7 @@ static __init int
|
|||
__trace_early_add_new_event(struct ftrace_event_call *call,
|
||||
struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
file = trace_create_new_event(call, tr);
|
||||
if (!file)
|
||||
|
@ -1921,7 +1921,7 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
|
|||
static int probe_remove_event_call(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_array *tr;
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (call->perf_refcount)
|
||||
|
@ -2066,10 +2066,10 @@ __trace_add_event_dirs(struct trace_array *tr)
|
|||
}
|
||||
}
|
||||
|
||||
struct ftrace_event_file *
|
||||
struct trace_event_file *
|
||||
find_event_file(struct trace_array *tr, const char *system, const char *event)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct ftrace_event_call *call;
|
||||
const char *name;
|
||||
|
||||
|
@ -2098,7 +2098,7 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
|
|||
#define DISABLE_EVENT_STR "disable_event"
|
||||
|
||||
struct event_probe_data {
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
unsigned long count;
|
||||
int ref;
|
||||
bool enable;
|
||||
|
@ -2226,7 +2226,7 @@ event_enable_func(struct ftrace_hash *hash,
|
|||
char *glob, char *cmd, char *param, int enabled)
|
||||
{
|
||||
struct trace_array *tr = top_trace_array();
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct ftrace_probe_ops *ops;
|
||||
struct event_probe_data *data;
|
||||
const char *system;
|
||||
|
@ -2358,7 +2358,7 @@ static inline int register_event_cmds(void) { return 0; }
|
|||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
/*
|
||||
* The top level array has already had its ftrace_event_file
|
||||
* The top level array has already had its trace_event_file
|
||||
* descriptors created in order to allow for early events to
|
||||
* be recorded. This function is called after the tracefs has been
|
||||
* initialized, and we now have to create the files associated
|
||||
|
@ -2367,7 +2367,7 @@ static inline int register_event_cmds(void) { return 0; }
|
|||
static __init void
|
||||
__trace_early_add_event_dirs(struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
int ret;
|
||||
|
||||
|
||||
|
@ -2407,7 +2407,7 @@ __trace_early_add_events(struct trace_array *tr)
|
|||
static void
|
||||
__trace_remove_event_dirs(struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file, *next;
|
||||
struct trace_event_file *file, *next;
|
||||
|
||||
list_for_each_entry_safe(file, next, &tr->events, list)
|
||||
remove_event_file_dir(file);
|
||||
|
@ -2557,7 +2557,7 @@ int event_trace_del_tracer(struct trace_array *tr)
|
|||
static __init int event_trace_memsetup(void)
|
||||
{
|
||||
field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
|
||||
file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
|
||||
file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2755,7 +2755,7 @@ static __init void event_test_stuff(void)
|
|||
static __init void event_trace_self_tests(void)
|
||||
{
|
||||
struct ftrace_subsystem_dir *dir;
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct ftrace_event_call *call;
|
||||
struct event_subsystem *system;
|
||||
struct trace_array *tr;
|
||||
|
|
|
@ -643,7 +643,7 @@ static void append_filter_err(struct filter_parse_state *ps,
|
|||
free_page((unsigned long) buf);
|
||||
}
|
||||
|
||||
static inline struct event_filter *event_filter(struct ftrace_event_file *file)
|
||||
static inline struct event_filter *event_filter(struct trace_event_file *file)
|
||||
{
|
||||
if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
|
||||
return file->event_call->filter;
|
||||
|
@ -652,7 +652,7 @@ static inline struct event_filter *event_filter(struct ftrace_event_file *file)
|
|||
}
|
||||
|
||||
/* caller must hold event_mutex */
|
||||
void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
|
||||
void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
|
||||
{
|
||||
struct event_filter *filter = event_filter(file);
|
||||
|
||||
|
@ -780,7 +780,7 @@ static void __free_preds(struct event_filter *filter)
|
|||
filter->n_preds = 0;
|
||||
}
|
||||
|
||||
static void filter_disable(struct ftrace_event_file *file)
|
||||
static void filter_disable(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -837,7 +837,7 @@ static int __alloc_preds(struct event_filter *filter, int n_preds)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void __remove_filter(struct ftrace_event_file *file)
|
||||
static inline void __remove_filter(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -851,7 +851,7 @@ static inline void __remove_filter(struct ftrace_event_file *file)
|
|||
static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
|
||||
struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
list_for_each_entry(file, &tr->events, list) {
|
||||
if (file->system != dir)
|
||||
|
@ -860,7 +860,7 @@ static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void __free_subsystem_filter(struct ftrace_event_file *file)
|
||||
static inline void __free_subsystem_filter(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -876,7 +876,7 @@ static inline void __free_subsystem_filter(struct ftrace_event_file *file)
|
|||
static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir,
|
||||
struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
list_for_each_entry(file, &tr->events, list) {
|
||||
if (file->system != dir)
|
||||
|
@ -1662,7 +1662,7 @@ fail:
|
|||
return err;
|
||||
}
|
||||
|
||||
static inline void event_set_filtered_flag(struct ftrace_event_file *file)
|
||||
static inline void event_set_filtered_flag(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -1672,7 +1672,7 @@ static inline void event_set_filtered_flag(struct ftrace_event_file *file)
|
|||
file->flags |= FTRACE_EVENT_FL_FILTERED;
|
||||
}
|
||||
|
||||
static inline void event_set_filter(struct ftrace_event_file *file,
|
||||
static inline void event_set_filter(struct trace_event_file *file,
|
||||
struct event_filter *filter)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
@ -1683,7 +1683,7 @@ static inline void event_set_filter(struct ftrace_event_file *file,
|
|||
rcu_assign_pointer(file->filter, filter);
|
||||
}
|
||||
|
||||
static inline void event_clear_filter(struct ftrace_event_file *file)
|
||||
static inline void event_clear_filter(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -1694,7 +1694,7 @@ static inline void event_clear_filter(struct ftrace_event_file *file)
|
|||
}
|
||||
|
||||
static inline void
|
||||
event_set_no_set_filter_flag(struct ftrace_event_file *file)
|
||||
event_set_no_set_filter_flag(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -1705,7 +1705,7 @@ event_set_no_set_filter_flag(struct ftrace_event_file *file)
|
|||
}
|
||||
|
||||
static inline void
|
||||
event_clear_no_set_filter_flag(struct ftrace_event_file *file)
|
||||
event_clear_no_set_filter_flag(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -1716,7 +1716,7 @@ event_clear_no_set_filter_flag(struct ftrace_event_file *file)
|
|||
}
|
||||
|
||||
static inline bool
|
||||
event_no_set_filter_flag(struct ftrace_event_file *file)
|
||||
event_no_set_filter_flag(struct trace_event_file *file)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
|
||||
|
@ -1740,7 +1740,7 @@ static int replace_system_preds(struct ftrace_subsystem_dir *dir,
|
|||
struct filter_parse_state *ps,
|
||||
char *filter_string)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct filter_list *filter_item;
|
||||
struct filter_list *tmp;
|
||||
LIST_HEAD(filter_list);
|
||||
|
@ -1961,7 +1961,7 @@ static int create_system_filter(struct ftrace_subsystem_dir *dir,
|
|||
}
|
||||
|
||||
/* caller must hold event_mutex */
|
||||
int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
|
||||
int apply_event_filter(struct trace_event_file *file, char *filter_string)
|
||||
{
|
||||
struct ftrace_event_call *call = file->event_call;
|
||||
struct event_filter *filter;
|
||||
|
|
|
@ -40,7 +40,7 @@ trigger_data_free(struct event_trigger_data *data)
|
|||
|
||||
/**
|
||||
* event_triggers_call - Call triggers associated with a trace event
|
||||
* @file: The ftrace_event_file associated with the event
|
||||
* @file: The trace_event_file associated with the event
|
||||
* @rec: The trace entry for the event, NULL for unconditional invocation
|
||||
*
|
||||
* For each trigger associated with an event, invoke the trigger
|
||||
|
@ -63,7 +63,7 @@ trigger_data_free(struct event_trigger_data *data)
|
|||
* any trigger that should be deferred, ETT_NONE if nothing to defer.
|
||||
*/
|
||||
enum event_trigger_type
|
||||
event_triggers_call(struct ftrace_event_file *file, void *rec)
|
||||
event_triggers_call(struct trace_event_file *file, void *rec)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
enum event_trigger_type tt = ETT_NONE;
|
||||
|
@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
|
|||
|
||||
/**
|
||||
* event_triggers_post_call - Call 'post_triggers' for a trace event
|
||||
* @file: The ftrace_event_file associated with the event
|
||||
* @file: The trace_event_file associated with the event
|
||||
* @tt: enum event_trigger_type containing a set bit for each trigger to invoke
|
||||
*
|
||||
* For each trigger associated with an event, invoke the trigger
|
||||
|
@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
|
|||
* Called from tracepoint handlers (with rcu_read_lock_sched() held).
|
||||
*/
|
||||
void
|
||||
event_triggers_post_call(struct ftrace_event_file *file,
|
||||
event_triggers_post_call(struct trace_event_file *file,
|
||||
enum event_trigger_type tt)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
|
@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(event_triggers_post_call);
|
|||
|
||||
static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_file *event_file = event_file_data(m->private);
|
||||
struct trace_event_file *event_file = event_file_data(m->private);
|
||||
|
||||
if (t == SHOW_AVAILABLE_TRIGGERS)
|
||||
return NULL;
|
||||
|
@ -129,7 +129,7 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
|
|||
|
||||
static void *trigger_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_file *event_file;
|
||||
struct trace_event_file *event_file;
|
||||
|
||||
/* ->stop() is called even if ->start() fails */
|
||||
mutex_lock(&event_mutex);
|
||||
|
@ -201,7 +201,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int trigger_process_regex(struct ftrace_event_file *file, char *buff)
|
||||
static int trigger_process_regex(struct trace_event_file *file, char *buff)
|
||||
{
|
||||
char *command, *next = buff;
|
||||
struct event_command *p;
|
||||
|
@ -227,7 +227,7 @@ static ssize_t event_trigger_regex_write(struct file *file,
|
|||
const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_file *event_file;
|
||||
struct trace_event_file *event_file;
|
||||
ssize_t ret;
|
||||
char *buf;
|
||||
|
||||
|
@ -430,7 +430,7 @@ event_trigger_free(struct event_trigger_ops *ops,
|
|||
trigger_data_free(data);
|
||||
}
|
||||
|
||||
static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
|
||||
static int trace_event_trigger_enable_disable(struct trace_event_file *file,
|
||||
int trigger_enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -466,7 +466,7 @@ static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
|
|||
void
|
||||
clear_event_triggers(struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
list_for_each_entry(file, &tr->events, list) {
|
||||
struct event_trigger_data *data;
|
||||
|
@ -480,7 +480,7 @@ clear_event_triggers(struct trace_array *tr)
|
|||
|
||||
/**
|
||||
* update_cond_flag - Set or reset the TRIGGER_COND bit
|
||||
* @file: The ftrace_event_file associated with the event
|
||||
* @file: The trace_event_file associated with the event
|
||||
*
|
||||
* If an event has triggers and any of those triggers has a filter or
|
||||
* a post_trigger, trigger invocation needs to be deferred until after
|
||||
|
@ -488,7 +488,7 @@ clear_event_triggers(struct trace_array *tr)
|
|||
* its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
|
||||
* cleared.
|
||||
*/
|
||||
static void update_cond_flag(struct ftrace_event_file *file)
|
||||
static void update_cond_flag(struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
bool set_cond = false;
|
||||
|
@ -511,7 +511,7 @@ static void update_cond_flag(struct ftrace_event_file *file)
|
|||
* @glob: The raw string used to register the trigger
|
||||
* @ops: The trigger ops associated with the trigger
|
||||
* @data: Trigger-specific data to associate with the trigger
|
||||
* @file: The ftrace_event_file associated with the event
|
||||
* @file: The trace_event_file associated with the event
|
||||
*
|
||||
* Common implementation for event trigger registration.
|
||||
*
|
||||
|
@ -522,7 +522,7 @@ static void update_cond_flag(struct ftrace_event_file *file)
|
|||
*/
|
||||
static int register_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data,
|
||||
struct ftrace_event_file *file)
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *test;
|
||||
int ret = 0;
|
||||
|
@ -557,7 +557,7 @@ out:
|
|||
* @glob: The raw string used to register the trigger
|
||||
* @ops: The trigger ops associated with the trigger
|
||||
* @test: Trigger-specific data used to find the trigger to remove
|
||||
* @file: The ftrace_event_file associated with the event
|
||||
* @file: The trace_event_file associated with the event
|
||||
*
|
||||
* Common implementation for event trigger unregistration.
|
||||
*
|
||||
|
@ -566,7 +566,7 @@ out:
|
|||
*/
|
||||
static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct ftrace_event_file *file)
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
bool unregistered = false;
|
||||
|
@ -588,7 +588,7 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
|||
/**
|
||||
* event_trigger_callback - Generic event_command @func implementation
|
||||
* @cmd_ops: The command ops, used for trigger registration
|
||||
* @file: The ftrace_event_file associated with the event
|
||||
* @file: The trace_event_file associated with the event
|
||||
* @glob: The raw string used to register the trigger
|
||||
* @cmd: The cmd portion of the string used to register the trigger
|
||||
* @param: The params portion of the string used to register the trigger
|
||||
|
@ -603,7 +603,7 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
|||
*/
|
||||
static int
|
||||
event_trigger_callback(struct event_command *cmd_ops,
|
||||
struct ftrace_event_file *file,
|
||||
struct trace_event_file *file,
|
||||
char *glob, char *cmd, char *param)
|
||||
{
|
||||
struct event_trigger_data *trigger_data;
|
||||
|
@ -688,7 +688,7 @@ event_trigger_callback(struct event_command *cmd_ops,
|
|||
* set_trigger_filter - Generic event_command @set_filter implementation
|
||||
* @filter_str: The filter string for the trigger, NULL to remove filter
|
||||
* @trigger_data: Trigger-specific data
|
||||
* @file: The ftrace_event_file associated with the event
|
||||
* @file: The trace_event_file associated with the event
|
||||
*
|
||||
* Common implementation for event command filter parsing and filter
|
||||
* instantiation.
|
||||
|
@ -702,7 +702,7 @@ event_trigger_callback(struct event_command *cmd_ops,
|
|||
*/
|
||||
static int set_trigger_filter(char *filter_str,
|
||||
struct event_trigger_data *trigger_data,
|
||||
struct ftrace_event_file *file)
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data = trigger_data;
|
||||
struct event_filter *filter = NULL, *tmp;
|
||||
|
@ -900,7 +900,7 @@ snapshot_count_trigger(struct event_trigger_data *data)
|
|||
static int
|
||||
register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data,
|
||||
struct ftrace_event_file *file)
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
int ret = register_trigger(glob, ops, data, file);
|
||||
|
||||
|
@ -1053,7 +1053,7 @@ static __init void unregister_trigger_traceon_traceoff_cmds(void)
|
|||
#define DISABLE_EVENT_STR "disable_event"
|
||||
|
||||
struct enable_trigger_data {
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
bool enable;
|
||||
};
|
||||
|
||||
|
@ -1159,10 +1159,10 @@ static struct event_trigger_ops event_disable_count_trigger_ops = {
|
|||
|
||||
static int
|
||||
event_enable_trigger_func(struct event_command *cmd_ops,
|
||||
struct ftrace_event_file *file,
|
||||
struct trace_event_file *file,
|
||||
char *glob, char *cmd, char *param)
|
||||
{
|
||||
struct ftrace_event_file *event_enable_file;
|
||||
struct trace_event_file *event_enable_file;
|
||||
struct enable_trigger_data *enable_data;
|
||||
struct event_trigger_data *trigger_data;
|
||||
struct event_trigger_ops *trigger_ops;
|
||||
|
@ -1294,7 +1294,7 @@ event_enable_trigger_func(struct event_command *cmd_ops,
|
|||
static int event_enable_register_trigger(char *glob,
|
||||
struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data,
|
||||
struct ftrace_event_file *file)
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct enable_trigger_data *enable_data = data->private_data;
|
||||
struct enable_trigger_data *test_enable_data;
|
||||
|
@ -1331,7 +1331,7 @@ out:
|
|||
static void event_enable_unregister_trigger(char *glob,
|
||||
struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct ftrace_event_file *file)
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct enable_trigger_data *test_enable_data = test->private_data;
|
||||
struct enable_trigger_data *enable_data;
|
||||
|
|
|
@ -359,7 +359,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
|
|||
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
|
||||
*/
|
||||
static int
|
||||
enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
|
||||
enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -394,7 +394,7 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
|
|||
* if the file is NULL, disable "perf" handler, or disable "trace" handler.
|
||||
*/
|
||||
static int
|
||||
disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
|
||||
disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
|
||||
{
|
||||
struct event_file_link *link = NULL;
|
||||
int wait = 0;
|
||||
|
@ -917,7 +917,7 @@ static const struct file_operations kprobe_profile_ops = {
|
|||
/* Kprobe handler */
|
||||
static nokprobe_inline void
|
||||
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
|
||||
struct ftrace_event_file *ftrace_file)
|
||||
struct trace_event_file *trace_file)
|
||||
{
|
||||
struct kprobe_trace_entry_head *entry;
|
||||
struct ring_buffer_event *event;
|
||||
|
@ -926,9 +926,9 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
|
|||
unsigned long irq_flags;
|
||||
struct ftrace_event_call *call = &tk->tp.call;
|
||||
|
||||
WARN_ON(call != ftrace_file->event_call);
|
||||
WARN_ON(call != trace_file->event_call);
|
||||
|
||||
if (ftrace_trigger_soft_disabled(ftrace_file))
|
||||
if (ftrace_trigger_soft_disabled(trace_file))
|
||||
return;
|
||||
|
||||
local_save_flags(irq_flags);
|
||||
|
@ -937,7 +937,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
|
|||
dsize = __get_data_size(&tk->tp, regs);
|
||||
size = sizeof(*entry) + tk->tp.size + dsize;
|
||||
|
||||
event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
|
||||
event = trace_event_buffer_lock_reserve(&buffer, trace_file,
|
||||
call->event.type,
|
||||
size, irq_flags, pc);
|
||||
if (!event)
|
||||
|
@ -947,7 +947,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
|
|||
entry->ip = (unsigned long)tk->rp.kp.addr;
|
||||
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
|
||||
|
||||
event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
|
||||
event_trigger_unlock_commit_regs(trace_file, buffer, event,
|
||||
entry, irq_flags, pc, regs);
|
||||
}
|
||||
|
||||
|
@ -965,7 +965,7 @@ NOKPROBE_SYMBOL(kprobe_trace_func);
|
|||
static nokprobe_inline void
|
||||
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs,
|
||||
struct ftrace_event_file *ftrace_file)
|
||||
struct trace_event_file *trace_file)
|
||||
{
|
||||
struct kretprobe_trace_entry_head *entry;
|
||||
struct ring_buffer_event *event;
|
||||
|
@ -974,9 +974,9 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
|||
unsigned long irq_flags;
|
||||
struct ftrace_event_call *call = &tk->tp.call;
|
||||
|
||||
WARN_ON(call != ftrace_file->event_call);
|
||||
WARN_ON(call != trace_file->event_call);
|
||||
|
||||
if (ftrace_trigger_soft_disabled(ftrace_file))
|
||||
if (ftrace_trigger_soft_disabled(trace_file))
|
||||
return;
|
||||
|
||||
local_save_flags(irq_flags);
|
||||
|
@ -985,7 +985,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
|||
dsize = __get_data_size(&tk->tp, regs);
|
||||
size = sizeof(*entry) + tk->tp.size + dsize;
|
||||
|
||||
event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
|
||||
event = trace_event_buffer_lock_reserve(&buffer, trace_file,
|
||||
call->event.type,
|
||||
size, irq_flags, pc);
|
||||
if (!event)
|
||||
|
@ -996,7 +996,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
|||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
|
||||
|
||||
event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
|
||||
event_trigger_unlock_commit_regs(trace_file, buffer, event,
|
||||
entry, irq_flags, pc, regs);
|
||||
}
|
||||
|
||||
|
@ -1210,7 +1210,7 @@ static int kprobe_register(struct ftrace_event_call *event,
|
|||
enum trace_reg type, void *data)
|
||||
{
|
||||
struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
|
||||
struct ftrace_event_file *file = data;
|
||||
struct trace_event_file *file = data;
|
||||
|
||||
switch (type) {
|
||||
case TRACE_REG_REGISTER:
|
||||
|
@ -1364,10 +1364,10 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
|
|||
return a1 + a2 + a3 + a4 + a5 + a6;
|
||||
}
|
||||
|
||||
static struct ftrace_event_file *
|
||||
static struct trace_event_file *
|
||||
find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
list_for_each_entry(file, &tr->events, list)
|
||||
if (file->event_call == &tk->tp.call)
|
||||
|
@ -1385,7 +1385,7 @@ static __init int kprobe_trace_self_tests_init(void)
|
|||
int ret, warn = 0;
|
||||
int (*target)(int, int, int, int, int, int);
|
||||
struct trace_kprobe *tk;
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
|
||||
if (tracing_is_disabled())
|
||||
return -ENODEV;
|
||||
|
|
|
@ -281,7 +281,7 @@ struct trace_probe {
|
|||
};
|
||||
|
||||
struct event_file_link {
|
||||
struct ftrace_event_file *file;
|
||||
struct trace_event_file *file;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
@ -314,7 +314,7 @@ static inline int is_good_name(const char *name)
|
|||
}
|
||||
|
||||
static inline struct event_file_link *
|
||||
find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
|
||||
find_event_file_link(struct trace_probe *tp, struct trace_event_file *file)
|
||||
{
|
||||
struct event_file_link *link;
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
|
|||
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
||||
{
|
||||
struct trace_array *tr = data;
|
||||
struct ftrace_event_file *ftrace_file;
|
||||
struct trace_event_file *trace_file;
|
||||
struct syscall_trace_enter *entry;
|
||||
struct syscall_metadata *sys_data;
|
||||
struct ring_buffer_event *event;
|
||||
|
@ -308,11 +308,11 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
|||
return;
|
||||
|
||||
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
|
||||
ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
|
||||
if (!ftrace_file)
|
||||
trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
|
||||
if (!trace_file)
|
||||
return;
|
||||
|
||||
if (ftrace_trigger_soft_disabled(ftrace_file))
|
||||
if (ftrace_trigger_soft_disabled(trace_file))
|
||||
return;
|
||||
|
||||
sys_data = syscall_nr_to_meta(syscall_nr);
|
||||
|
@ -334,14 +334,14 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
|||
entry->nr = syscall_nr;
|
||||
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
|
||||
|
||||
event_trigger_unlock_commit(ftrace_file, buffer, event, entry,
|
||||
event_trigger_unlock_commit(trace_file, buffer, event, entry,
|
||||
irq_flags, pc);
|
||||
}
|
||||
|
||||
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
||||
{
|
||||
struct trace_array *tr = data;
|
||||
struct ftrace_event_file *ftrace_file;
|
||||
struct trace_event_file *trace_file;
|
||||
struct syscall_trace_exit *entry;
|
||||
struct syscall_metadata *sys_data;
|
||||
struct ring_buffer_event *event;
|
||||
|
@ -355,11 +355,11 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
|||
return;
|
||||
|
||||
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
|
||||
ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
|
||||
if (!ftrace_file)
|
||||
trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
|
||||
if (!trace_file)
|
||||
return;
|
||||
|
||||
if (ftrace_trigger_soft_disabled(ftrace_file))
|
||||
if (ftrace_trigger_soft_disabled(trace_file))
|
||||
return;
|
||||
|
||||
sys_data = syscall_nr_to_meta(syscall_nr);
|
||||
|
@ -380,11 +380,11 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
|||
entry->nr = syscall_nr;
|
||||
entry->ret = syscall_get_return_value(current, regs);
|
||||
|
||||
event_trigger_unlock_commit(ftrace_file, buffer, event, entry,
|
||||
event_trigger_unlock_commit(trace_file, buffer, event, entry,
|
||||
irq_flags, pc);
|
||||
}
|
||||
|
||||
static int reg_event_syscall_enter(struct ftrace_event_file *file,
|
||||
static int reg_event_syscall_enter(struct trace_event_file *file,
|
||||
struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_array *tr = file->tr;
|
||||
|
@ -405,7 +405,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void unreg_event_syscall_enter(struct ftrace_event_file *file,
|
||||
static void unreg_event_syscall_enter(struct trace_event_file *file,
|
||||
struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_array *tr = file->tr;
|
||||
|
@ -422,7 +422,7 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
|
|||
mutex_unlock(&syscall_trace_lock);
|
||||
}
|
||||
|
||||
static int reg_event_syscall_exit(struct ftrace_event_file *file,
|
||||
static int reg_event_syscall_exit(struct trace_event_file *file,
|
||||
struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_array *tr = file->tr;
|
||||
|
@ -443,7 +443,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void unreg_event_syscall_exit(struct ftrace_event_file *file,
|
||||
static void unreg_event_syscall_exit(struct trace_event_file *file,
|
||||
struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_array *tr = file->tr;
|
||||
|
@ -696,7 +696,7 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
|
|||
static int syscall_enter_register(struct ftrace_event_call *event,
|
||||
enum trace_reg type, void *data)
|
||||
{
|
||||
struct ftrace_event_file *file = data;
|
||||
struct trace_event_file *file = data;
|
||||
|
||||
switch (type) {
|
||||
case TRACE_REG_REGISTER:
|
||||
|
@ -724,7 +724,7 @@ static int syscall_enter_register(struct ftrace_event_call *event,
|
|||
static int syscall_exit_register(struct ftrace_event_call *event,
|
||||
enum trace_reg type, void *data)
|
||||
{
|
||||
struct ftrace_event_file *file = data;
|
||||
struct trace_event_file *file = data;
|
||||
|
||||
switch (type) {
|
||||
case TRACE_REG_REGISTER:
|
||||
|
|
|
@ -770,7 +770,7 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
|
|||
static void __uprobe_trace_func(struct trace_uprobe *tu,
|
||||
unsigned long func, struct pt_regs *regs,
|
||||
struct uprobe_cpu_buffer *ucb, int dsize,
|
||||
struct ftrace_event_file *ftrace_file)
|
||||
struct trace_event_file *trace_file)
|
||||
{
|
||||
struct uprobe_trace_entry_head *entry;
|
||||
struct ring_buffer_event *event;
|
||||
|
@ -779,17 +779,17 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
|
|||
int size, esize;
|
||||
struct ftrace_event_call *call = &tu->tp.call;
|
||||
|
||||
WARN_ON(call != ftrace_file->event_call);
|
||||
WARN_ON(call != trace_file->event_call);
|
||||
|
||||
if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
|
||||
return;
|
||||
|
||||
if (ftrace_trigger_soft_disabled(ftrace_file))
|
||||
if (ftrace_trigger_soft_disabled(trace_file))
|
||||
return;
|
||||
|
||||
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
||||
size = esize + tu->tp.size + dsize;
|
||||
event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
|
||||
event = trace_event_buffer_lock_reserve(&buffer, trace_file,
|
||||
call->event.type, size, 0, 0);
|
||||
if (!event)
|
||||
return;
|
||||
|
@ -806,7 +806,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
|
|||
|
||||
memcpy(data, ucb->buf, tu->tp.size + dsize);
|
||||
|
||||
event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0);
|
||||
event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
|
||||
}
|
||||
|
||||
/* uprobe handler */
|
||||
|
@ -881,7 +881,7 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self,
|
|||
struct mm_struct *mm);
|
||||
|
||||
static int
|
||||
probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
|
||||
probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
|
||||
filter_func_t filter)
|
||||
{
|
||||
bool enabled = trace_probe_is_enabled(&tu->tp);
|
||||
|
@ -938,7 +938,7 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
|
|||
}
|
||||
|
||||
static void
|
||||
probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file)
|
||||
probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
|
||||
{
|
||||
if (!trace_probe_is_enabled(&tu->tp))
|
||||
return;
|
||||
|
@ -1163,7 +1163,7 @@ trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
|
|||
void *data)
|
||||
{
|
||||
struct trace_uprobe *tu = event->data;
|
||||
struct ftrace_event_file *file = data;
|
||||
struct trace_event_file *file = data;
|
||||
|
||||
switch (type) {
|
||||
case TRACE_REG_REGISTER:
|
||||
|
|
Loading…
Reference in New Issue