ftrace: Implement separate user function filtering

ftrace_ops that are registered to trace functions can now be
agnostic to each other in respect to what functions they trace.
Each ops has their own hash of the functions they want to trace
and a hash to what they do not want to trace. A empty hash for
the functions they want to trace denotes all functions should
be traced that are not in the notrace hash.

Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Steven Rostedt 2011-05-04 09:27:52 -04:00 committed by Steven Rostedt
parent 07fd5515f3
commit b848914ce3
6 changed files with 166 additions and 39 deletions

View File

@ -31,13 +31,18 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
struct ftrace_hash; struct ftrace_hash;
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1,
};
struct ftrace_ops { struct ftrace_ops {
ftrace_func_t func; ftrace_func_t func;
struct ftrace_ops *next; struct ftrace_ops *next;
unsigned long flags;
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash; struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash; struct ftrace_hash *filter_hash;
unsigned long flags;
#endif #endif
}; };

View File

@ -87,24 +87,29 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
.func = ftrace_stub, .func = ftrace_stub,
}; };
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops; static struct ftrace_ops global_ops;
static void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
/* /*
* Traverse the ftrace_list, invoking all entries. The reason that we * Traverse the ftrace_global_list, invoking all entries. The reason that we
* can use rcu_dereference_raw() is that elements removed from this list * can use rcu_dereference_raw() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period * are simply leaked, so there is no need to interact with a grace-period
* mechanism. The rcu_dereference_raw() calls are needed to handle * mechanism. The rcu_dereference_raw() calls are needed to handle
* concurrent insertions into the ftrace_list. * concurrent insertions into the ftrace_global_list.
* *
* Silly Alpha and silly pointer-speculation compiler optimizations! * Silly Alpha and silly pointer-speculation compiler optimizations!
*/ */
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) static void ftrace_global_list_func(unsigned long ip,
unsigned long parent_ip)
{ {
struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) { while (op != &ftrace_list_end) {
op->func(ip, parent_ip); op->func(ip, parent_ip);
@ -163,11 +168,11 @@ static void update_global_ops(void)
* function directly. Otherwise, we need to iterate over the * function directly. Otherwise, we need to iterate over the
* registered callers. * registered callers.
*/ */
if (ftrace_list == &ftrace_list_end || if (ftrace_global_list == &ftrace_list_end ||
ftrace_list->next == &ftrace_list_end) ftrace_global_list->next == &ftrace_list_end)
func = ftrace_list->func; func = ftrace_global_list->func;
else else
func = ftrace_list_func; func = ftrace_global_list_func;
/* If we filter on pids, update to use the pid function */ /* If we filter on pids, update to use the pid function */
if (!list_empty(&ftrace_pids)) { if (!list_empty(&ftrace_pids)) {
@ -184,7 +189,11 @@ static void update_ftrace_function(void)
update_global_ops(); update_global_ops();
func = global_ops.func; if (ftrace_ops_list == &ftrace_list_end ||
ftrace_ops_list->next == &ftrace_list_end)
func = ftrace_ops_list->func;
else
func = ftrace_ops_list_func;
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ftrace_trace_function = func; ftrace_trace_function = func;
@ -198,10 +207,10 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
{ {
ops->next = *list; ops->next = *list;
/* /*
* We are entering ops into the ftrace_list but another * We are entering ops into the list but another
* CPU might be walking that list. We need to make sure * CPU might be walking that list. We need to make sure
* the ops->next pointer is valid before another CPU sees * the ops->next pointer is valid before another CPU sees
* the ops pointer included into the ftrace_list. * the ops pointer included into the list.
*/ */
rcu_assign_pointer(*list, ops); rcu_assign_pointer(*list, ops);
} }
@ -238,7 +247,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (FTRACE_WARN_ON(ops == &global_ops)) if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL; return -EINVAL;
add_ftrace_ops(&ftrace_list, ops); if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
return -EBUSY;
if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
int first = ftrace_global_list == &ftrace_list_end;
add_ftrace_ops(&ftrace_global_list, ops);
ops->flags |= FTRACE_OPS_FL_ENABLED;
if (first)
add_ftrace_ops(&ftrace_ops_list, &global_ops);
} else
add_ftrace_ops(&ftrace_ops_list, ops);
if (ftrace_enabled) if (ftrace_enabled)
update_ftrace_function(); update_ftrace_function();
@ -252,12 +272,24 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_disabled) if (ftrace_disabled)
return -ENODEV; return -ENODEV;
if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
return -EBUSY;
if (FTRACE_WARN_ON(ops == &global_ops)) if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL; return -EINVAL;
ret = remove_ftrace_ops(&ftrace_list, ops); if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
ret = remove_ftrace_ops(&ftrace_global_list, ops);
if (!ret && ftrace_global_list == &ftrace_list_end)
ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
if (!ret)
ops->flags &= ~FTRACE_OPS_FL_ENABLED;
} else
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ftrace_enabled) if (ftrace_enabled)
update_ftrace_function(); update_ftrace_function();
@ -928,10 +960,6 @@ static const struct ftrace_hash empty_hash = {
}; };
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
enum {
FTRACE_OPS_FL_ENABLED = 1,
};
static struct ftrace_ops global_ops = { static struct ftrace_ops global_ops = {
.func = ftrace_stub, .func = ftrace_stub,
.notrace_hash = EMPTY_HASH, .notrace_hash = EMPTY_HASH,
@ -1189,6 +1217,40 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
return 0; return 0;
} }
/*
* Test the hashes for this ops to see if we want to call
* the ops->func or not.
*
* It's a match if the ip is in the ops->filter_hash or
* the filter_hash does not exist or is empty,
* AND
* the ip is not in the ops->notrace_hash.
*/
static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
{
struct ftrace_hash *filter_hash;
struct ftrace_hash *notrace_hash;
int ret;
/* The hashes are freed with call_rcu_sched() */
preempt_disable_notrace();
filter_hash = rcu_dereference_raw(ops->filter_hash);
notrace_hash = rcu_dereference_raw(ops->notrace_hash);
if ((!filter_hash || !filter_hash->count ||
ftrace_lookup_ip(filter_hash, ip)) &&
(!notrace_hash || !notrace_hash->count ||
!ftrace_lookup_ip(notrace_hash, ip)))
ret = 1;
else
ret = 0;
preempt_enable_notrace();
return ret;
}
/* /*
* This is a double for. Do not use 'break' to break out of the loop, * This is a double for. Do not use 'break' to break out of the loop,
* you must use a goto. * you must use a goto.
@ -1232,7 +1294,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
if (filter_hash) { if (filter_hash) {
hash = ops->filter_hash; hash = ops->filter_hash;
other_hash = ops->notrace_hash; other_hash = ops->notrace_hash;
if (!hash->count) if (!hash || !hash->count)
all = 1; all = 1;
} else { } else {
inc = !inc; inc = !inc;
@ -1242,7 +1304,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
* If the notrace hash has no items, * If the notrace hash has no items,
* then there's nothing to do. * then there's nothing to do.
*/ */
if (!hash->count) if (hash && !hash->count)
return; return;
} }
@ -1256,11 +1318,11 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
* Only the filter_hash affects all records. * Only the filter_hash affects all records.
* Update if the record is not in the notrace hash. * Update if the record is not in the notrace hash.
*/ */
if (!ftrace_lookup_ip(other_hash, rec->ip)) if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
match = 1; match = 1;
} else { } else {
in_hash = !!ftrace_lookup_ip(hash, rec->ip); in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
/* /*
* *
@ -1546,6 +1608,7 @@ static void ftrace_run_update_code(int command)
static ftrace_func_t saved_ftrace_func; static ftrace_func_t saved_ftrace_func;
static int ftrace_start_up; static int ftrace_start_up;
static int global_start_up;
static void ftrace_startup_enable(int command) static void ftrace_startup_enable(int command)
{ {
@ -1562,14 +1625,25 @@ static void ftrace_startup_enable(int command)
static void ftrace_startup(struct ftrace_ops *ops, int command) static void ftrace_startup(struct ftrace_ops *ops, int command)
{ {
bool hash_enable = true;
if (unlikely(ftrace_disabled)) if (unlikely(ftrace_disabled))
return; return;
ftrace_start_up++; ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS; command |= FTRACE_ENABLE_CALLS;
/* ops marked global share the filter hashes */
if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
ops = &global_ops;
/* Don't update hash if global is already set */
if (global_start_up)
hash_enable = false;
global_start_up++;
}
ops->flags |= FTRACE_OPS_FL_ENABLED; ops->flags |= FTRACE_OPS_FL_ENABLED;
if (ftrace_start_up == 1) if (hash_enable)
ftrace_hash_rec_enable(ops, 1); ftrace_hash_rec_enable(ops, 1);
ftrace_startup_enable(command); ftrace_startup_enable(command);
@ -1577,6 +1651,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
static void ftrace_shutdown(struct ftrace_ops *ops, int command) static void ftrace_shutdown(struct ftrace_ops *ops, int command)
{ {
bool hash_disable = true;
if (unlikely(ftrace_disabled)) if (unlikely(ftrace_disabled))
return; return;
@ -1588,13 +1664,25 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
*/ */
WARN_ON_ONCE(ftrace_start_up < 0); WARN_ON_ONCE(ftrace_start_up < 0);
if (!ftrace_start_up) if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
ops = &global_ops;
global_start_up--;
WARN_ON_ONCE(global_start_up < 0);
/* Don't update hash if global still has users */
if (global_start_up) {
WARN_ON_ONCE(!ftrace_start_up);
hash_disable = false;
}
}
if (hash_disable)
ftrace_hash_rec_disable(ops, 1); ftrace_hash_rec_disable(ops, 1);
if (!ftrace_start_up) { if (ops != &global_ops || !global_start_up)
command |= FTRACE_DISABLE_CALLS;
ops->flags &= ~FTRACE_OPS_FL_ENABLED; ops->flags &= ~FTRACE_OPS_FL_ENABLED;
}
if (!ftrace_start_up)
command |= FTRACE_DISABLE_CALLS;
if (saved_ftrace_func != ftrace_trace_function) { if (saved_ftrace_func != ftrace_trace_function) {
saved_ftrace_func = ftrace_trace_function; saved_ftrace_func = ftrace_trace_function;
@ -2381,6 +2469,7 @@ static int ftrace_probe_registered;
static void __enable_ftrace_function_probe(void) static void __enable_ftrace_function_probe(void)
{ {
int ret;
int i; int i;
if (ftrace_probe_registered) if (ftrace_probe_registered)
@ -2395,13 +2484,16 @@ static void __enable_ftrace_function_probe(void)
if (i == FTRACE_FUNC_HASHSIZE) if (i == FTRACE_FUNC_HASHSIZE)
return; return;
__register_ftrace_function(&trace_probe_ops); ret = __register_ftrace_function(&trace_probe_ops);
ftrace_startup(&global_ops, 0); if (!ret)
ftrace_startup(&trace_probe_ops, 0);
ftrace_probe_registered = 1; ftrace_probe_registered = 1;
} }
static void __disable_ftrace_function_probe(void) static void __disable_ftrace_function_probe(void)
{ {
int ret;
int i; int i;
if (!ftrace_probe_registered) if (!ftrace_probe_registered)
@ -2414,8 +2506,10 @@ static void __disable_ftrace_function_probe(void)
} }
/* no more funcs left */ /* no more funcs left */
__unregister_ftrace_function(&trace_probe_ops); ret = __unregister_ftrace_function(&trace_probe_ops);
ftrace_shutdown(&global_ops, 0); if (!ret)
ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0; ftrace_probe_registered = 0;
} }
@ -3319,8 +3413,28 @@ static inline void ftrace_startup_enable(int command) { }
# define ftrace_shutdown(ops, command) do { } while (0) # define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_startup_sysctl() do { } while (0) # define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0)
static inline int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
{
return 1;
}
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
static void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
/* see comment above ftrace_global_list_func */
struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list);
while (op != &ftrace_list_end) {
if (ftrace_ops_test(op, ip))
op->func(ip, parent_ip);
op = rcu_dereference_raw(op->next);
};
}
static void clear_ftrace_swapper(void) static void clear_ftrace_swapper(void)
{ {
struct task_struct *p; struct task_struct *p;
@ -3621,7 +3735,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
goto out_unlock; goto out_unlock;
ret = __register_ftrace_function(ops); ret = __register_ftrace_function(ops);
ftrace_startup(&global_ops, 0); if (!ret)
ftrace_startup(ops, 0);
out_unlock: out_unlock:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
@ -3640,7 +3756,8 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
ret = __unregister_ftrace_function(ops); ret = __unregister_ftrace_function(ops);
ftrace_shutdown(&global_ops, 0); if (!ret)
ftrace_shutdown(ops, 0);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
return ret; return ret;
@ -3670,11 +3787,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
ftrace_startup_sysctl(); ftrace_startup_sysctl();
/* we are starting ftrace again */ /* we are starting ftrace again */
if (ftrace_list != &ftrace_list_end) { if (ftrace_ops_list != &ftrace_list_end) {
if (ftrace_list->next == &ftrace_list_end) if (ftrace_ops_list->next == &ftrace_list_end)
ftrace_trace_function = ftrace_list->func; ftrace_trace_function = ftrace_ops_list->func;
else else
ftrace_trace_function = ftrace_list_func; ftrace_trace_function = ftrace_ops_list_func;
} }
} else { } else {

View File

@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = function_trace_call, .func = function_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL,
}; };
static struct ftrace_ops trace_stack_ops __read_mostly = static struct ftrace_ops trace_stack_ops __read_mostly =
{ {
.func = function_stack_trace_call, .func = function_stack_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL,
}; };
/* Our two options */ /* Our two options */

View File

@ -153,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = irqsoff_tracer_call, .func = irqsoff_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */

View File

@ -129,6 +129,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = wakeup_tracer_call, .func = wakeup_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */

View File

@ -133,6 +133,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = stack_trace_call, .func = stack_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL,
}; };
static ssize_t static ssize_t