This fixes a bug caused by not cleaning up the new instance unique triggers
when deleting an instance. It also creates a selftest that triggers that bug. Fix the delayed optimization happening after kprobes boot up self tests being removed by freeing of init memory. Comment kprobes on why the delay optimization is not a problem for removal of modules, to keep other developers from searching that riddle. Fix another rcu isn't watching in stack trace tracing. Naveen N. Rao (4): ftrace: Simplify glob handling in unregister_ftrace_function_probe_func() ftrace/instances: Clear function triggers when removing instances selftests/ftrace: Fix bashisms selftests/ftrace: Add test to remove instance with active event triggers Steven Rostedt (1): tracing: Move postpone selftests to core from early_initcall Steven Rostedt (VMware) (3): ftrace: Remove #ifdef from code and add clear_ftrace_function_probes() stub kprobes: Document how optimized kprobes are removed from module unload tracing: Make sure RCU is watching before calling a stack trace Thomas Gleixner (1): tracing/kprobes: Enforce kprobes teardown after testing -----BEGIN PGP SIGNATURE----- iQExBAABCAAbBQJZIQapFBxyb3N0ZWR0QGdvb2RtaXMub3JnAAoJEMm5BfJq2Y3L A6MIAKFLb6mQ4flRBXpWd2tD2B4DQpQ0H7SovseZnlH6Q7grU6POY/qbNl9xXiBA 3NavxqbIYokH8cxEqGAusL7ASUFPXJj6erMM1uc1WRuAzMpIjvgNacOtW5R+c5S9 ofR1xtKlBo/854J/IP6M3J0WqrK+B7TsS1WYKohe/tFMBpolbnFloHVfMMZlaL58 CQhCoAhkjJRsta6dJhbo+HoQy03VGyWsfFHtutBpIwsf81Naq4Stpxp7jdZLWhB8 Di5QdOji9lDayK6Uk7DDZqHxbjC9z6cCS9nVWIGHkE4AMpR3peYtsyCaAOBjVMLV 2OuhuREfZgKaYVMjUfdeYCayDAY= =1gek -----END PGP SIGNATURE----- Merge tag 'trace-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: - Fix a bug caused by not cleaning up the new instance unique triggers when deleting an instance. It also creates a selftest that triggers that bug. - Fix the delayed optimization happening after kprobes boot up self tests being removed by freeing of init memory. - Comment kprobes on why the delay optimization is not a problem for removal of modules, to keep other developers from searching that riddle. - Fix another case of rcu not watching in stack trace tracing. * tag 'trace-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Make sure RCU is watching before calling a stack trace kprobes: Document how optimized kprobes are removed from module unload selftests/ftrace: Add test to remove instance with active event triggers selftests/ftrace: Fix bashisms ftrace: Remove #ifdef from code and add clear_ftrace_function_probes() stub ftrace/instances: Clear function triggers when removing instances ftrace: Simplify glob handling in unregister_ftrace_function_probe_func() tracing/kprobes: Enforce kprobes teardown after testing tracing: Move postpone selftests to core from early_initcall
This commit is contained in:
commit
56f410cf45
|
@ -349,6 +349,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
|
|||
int write, void __user *buffer,
|
||||
size_t *length, loff_t *ppos);
|
||||
#endif
|
||||
extern void wait_for_kprobe_optimizer(void);
|
||||
#else
|
||||
static inline void wait_for_kprobe_optimizer(void) { }
|
||||
#endif /* CONFIG_OPTPROBES */
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
|
|
|
@ -595,7 +595,7 @@ static void kprobe_optimizer(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* Wait for completing optimization and unoptimization */
|
||||
static void wait_for_kprobe_optimizer(void)
|
||||
void wait_for_kprobe_optimizer(void)
|
||||
{
|
||||
mutex_lock(&kprobe_mutex);
|
||||
|
||||
|
@ -2183,6 +2183,12 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
|||
* The vaddr this probe is installed will soon
|
||||
* be vfreed buy not synced to disk. Hence,
|
||||
* disarming the breakpoint isn't needed.
|
||||
*
|
||||
* Note, this will also move any optimized probes
|
||||
* that are pending to be removed from their
|
||||
* corresponding lists to the freeing_list and
|
||||
* will not be touched by the delayed
|
||||
* kprobe_optimizer work handler.
|
||||
*/
|
||||
kill_kprobe(p);
|
||||
}
|
||||
|
|
|
@ -4144,9 +4144,9 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
|
|||
int i, ret = -ENODEV;
|
||||
int size;
|
||||
|
||||
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
|
||||
if (!glob || !strlen(glob) || !strcmp(glob, "*"))
|
||||
func_g.search = NULL;
|
||||
else if (glob) {
|
||||
else {
|
||||
int not;
|
||||
|
||||
func_g.type = filter_parse_regex(glob, strlen(glob),
|
||||
|
@ -4256,6 +4256,14 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void clear_ftrace_function_probes(struct trace_array *tr)
|
||||
{
|
||||
struct ftrace_func_probe *probe, *n;
|
||||
|
||||
list_for_each_entry_safe(probe, n, &tr->func_probes, list)
|
||||
unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
|
||||
}
|
||||
|
||||
static LIST_HEAD(ftrace_commands);
|
||||
static DEFINE_MUTEX(ftrace_cmd_mutex);
|
||||
|
||||
|
|
|
@ -1558,7 +1558,7 @@ static __init int init_trace_selftests(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_trace_selftests);
|
||||
core_initcall(init_trace_selftests);
|
||||
#else
|
||||
static inline int run_tracer_selftest(struct tracer *type)
|
||||
{
|
||||
|
@ -2568,7 +2568,36 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
|
|||
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
||||
int pc)
|
||||
{
|
||||
__ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
|
||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||
|
||||
if (rcu_is_watching()) {
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
|
||||
* but if the above rcu_is_watching() failed, then the NMI
|
||||
* triggered someplace critical, and rcu_irq_enter() should
|
||||
* not be called from NMI.
|
||||
*/
|
||||
if (unlikely(in_nmi()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* It is possible that a function is being traced in a
|
||||
* location that RCU is not watching. A call to
|
||||
* rcu_irq_enter() will make sure that it is, but there's
|
||||
* a few internal rcu functions that could be traced
|
||||
* where that wont work either. In those cases, we just
|
||||
* do nothing.
|
||||
*/
|
||||
if (unlikely(rcu_irq_enter_disabled()))
|
||||
return;
|
||||
|
||||
rcu_irq_enter_irqson();
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
||||
rcu_irq_exit_irqson();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7550,6 +7579,7 @@ static int instance_rmdir(const char *name)
|
|||
}
|
||||
|
||||
tracing_set_nop(tr);
|
||||
clear_ftrace_function_probes(tr);
|
||||
event_trace_del_tracer(tr);
|
||||
ftrace_clear_pids(tr);
|
||||
ftrace_destroy_function_files(tr);
|
||||
|
|
|
@ -980,6 +980,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
|
|||
extern int
|
||||
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
|
||||
struct ftrace_probe_ops *ops);
|
||||
extern void clear_ftrace_function_probes(struct trace_array *tr);
|
||||
|
||||
int register_ftrace_command(struct ftrace_func_command *cmd);
|
||||
int unregister_ftrace_command(struct ftrace_func_command *cmd);
|
||||
|
@ -998,6 +999,10 @@ static inline __init int unregister_ftrace_command(char *cmd_name)
|
|||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void clear_ftrace_function_probes(struct trace_array *tr)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* The ops parameter passed in is usually undefined.
|
||||
* This must be a macro.
|
||||
|
|
|
@ -1535,6 +1535,11 @@ static __init int kprobe_trace_self_tests_init(void)
|
|||
|
||||
end:
|
||||
release_all_trace_kprobes();
|
||||
/*
|
||||
* Wait for the optimizer work to finish. Otherwise it might fiddle
|
||||
* with probes in already freed __init text.
|
||||
*/
|
||||
wait_for_kprobe_optimizer();
|
||||
if (warn)
|
||||
pr_cont("NG: Some tests are failed. Please check them.\n");
|
||||
else
|
||||
|
|
|
@ -58,7 +58,7 @@ parse_opts() { # opts
|
|||
;;
|
||||
--verbose|-v|-vv)
|
||||
VERBOSE=$((VERBOSE + 1))
|
||||
[ $1 == '-vv' ] && VERBOSE=$((VERBOSE + 1))
|
||||
[ $1 = '-vv' ] && VERBOSE=$((VERBOSE + 1))
|
||||
shift 1
|
||||
;;
|
||||
--debug|-d)
|
||||
|
|
|
@ -48,7 +48,7 @@ test_event_enabled() {
|
|||
e=`cat $EVENT_ENABLE`
|
||||
if [ "$e" != $val ]; then
|
||||
echo "Expected $val but found $e"
|
||||
exit -1
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
|
|
@ -34,10 +34,10 @@ reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
|
|||
echo > set_ftrace_filter
|
||||
grep -v '^#' set_ftrace_filter | while read t; do
|
||||
tr=`echo $t | cut -d: -f2`
|
||||
if [ "$tr" == "" ]; then
|
||||
if [ "$tr" = "" ]; then
|
||||
continue
|
||||
fi
|
||||
if [ $tr == "enable_event" -o $tr == "disable_event" ]; then
|
||||
if [ $tr = "enable_event" -o $tr = "disable_event" ]; then
|
||||
tr=`echo $t | cut -d: -f1-4`
|
||||
limit=`echo $t | cut -d: -f5`
|
||||
else
|
||||
|
|
|
@ -75,9 +75,13 @@ rmdir foo
|
|||
if [ -d foo ]; then
|
||||
fail "foo still exists"
|
||||
fi
|
||||
exit 0
|
||||
|
||||
|
||||
mkdir foo
|
||||
echo "schedule:enable_event:sched:sched_switch" > foo/set_ftrace_filter
|
||||
rmdir foo
|
||||
if [ -d foo ]; then
|
||||
fail "foo still exists"
|
||||
fi
|
||||
|
||||
|
||||
instance_slam() {
|
||||
|
|
Loading…
Reference in New Issue