pstore/ftrace: Convert to its own enable/disable debugfs knob

With this patch we no longer reuse function tracer infrastructure, now
we register our own tracer back-end via a debugfs knob.

It's a bit more code, but that is the only downside. On the bright side we
have:

- Ability to make persistent_ram module removable (when needed, we can
  move ftrace_ops struct into a module). Note that persistent_ram is still
  not removable for other reasons, but with this patch it's just one
  thing less to worry about;

- Pstore part is more isolated from the generic function tracer. We tried
  it already by registering our own tracer in available_tracers, but that
  way we're loosing ability to see the traces while we record them to
  pstore. This solution is somewhere in the middle: we only register
  "internal ftracer" back-end, but not the "front-end";

- When there is only pstore tracing enabled, the kernel will only write
  to the pstore buffer, omitting function tracer buffer (which, of course,
  still can be enabled via 'echo function > current_tracer').

Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
This commit is contained in:
Anton Vorontsov 2012-07-17 14:26:15 -07:00
parent b4a871bce6
commit 65f8c95e46
7 changed files with 105 additions and 26 deletions

View File

@ -102,9 +102,7 @@ related hangs. The functions call chain log is stored in a "ftrace-ramoops"
file. Here is an example of usage: file. Here is an example of usage:
# mount -t debugfs debugfs /sys/kernel/debug/ # mount -t debugfs debugfs /sys/kernel/debug/
# cd /sys/kernel/debug/tracing # echo 1 > /sys/kernel/debug/pstore/record_ftrace
# echo function > current_tracer
# echo 1 > options/func_pstore
# reboot -f # reboot -f
[...] [...]
# mount -t pstore pstore /mnt/ # mount -t pstore pstore /mnt/

View File

@ -23,6 +23,7 @@ config PSTORE_FTRACE
bool "Persistent function tracer" bool "Persistent function tracer"
depends on PSTORE depends on PSTORE
depends on FUNCTION_TRACER depends on FUNCTION_TRACER
depends on DEBUG_FS
help help
With this option kernel traces function calls into a persistent With this option kernel traces function calls into a persistent
ram buffer that can be decoded and dumped after reboot through ram buffer that can be decoded and dumped after reboot through

View File

@ -17,19 +17,113 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/cache.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include "internal.h" #include "internal.h"
void notrace pstore_ftrace_call(unsigned long ip, unsigned long parent_ip) static void notrace pstore_ftrace_call(unsigned long ip,
unsigned long parent_ip)
{ {
unsigned long flags;
struct pstore_ftrace_record rec = {}; struct pstore_ftrace_record rec = {};
if (unlikely(oops_in_progress)) if (unlikely(oops_in_progress))
return; return;
local_irq_save(flags);
rec.ip = ip; rec.ip = ip;
rec.parent_ip = parent_ip; rec.parent_ip = parent_ip;
pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id()); pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec, psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
sizeof(rec), psinfo); sizeof(rec), psinfo);
local_irq_restore(flags);
}
static struct ftrace_ops pstore_ftrace_ops __read_mostly = {
.func = pstore_ftrace_call,
};
static DEFINE_MUTEX(pstore_ftrace_lock);
static bool pstore_ftrace_enabled;
static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
u8 on;
ssize_t ret;
ret = kstrtou8_from_user(buf, count, 2, &on);
if (ret)
return ret;
mutex_lock(&pstore_ftrace_lock);
if (!on ^ pstore_ftrace_enabled)
goto out;
if (on)
ret = register_ftrace_function(&pstore_ftrace_ops);
else
ret = unregister_ftrace_function(&pstore_ftrace_ops);
if (ret) {
pr_err("%s: unable to %sregister ftrace ops: %zd\n",
__func__, on ? "" : "un", ret);
goto err;
}
pstore_ftrace_enabled = on;
out:
ret = count;
err:
mutex_unlock(&pstore_ftrace_lock);
return ret;
}
static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
char val[] = { '0' + pstore_ftrace_enabled, '\n' };
return simple_read_from_buffer(buf, count, ppos, val, sizeof(val));
}
static const struct file_operations pstore_knob_fops = {
.open = simple_open,
.read = pstore_ftrace_knob_read,
.write = pstore_ftrace_knob_write,
};
void pstore_register_ftrace(void)
{
struct dentry *dir;
struct dentry *file;
if (!psinfo->write_buf)
return;
dir = debugfs_create_dir("pstore", NULL);
if (!dir) {
pr_err("%s: unable to create pstore directory\n", __func__);
return;
}
file = debugfs_create_file("record_ftrace", 0600, dir, NULL,
&pstore_knob_fops);
if (!file) {
pr_err("%s: unable to create record_ftrace file\n", __func__);
goto err_file;
}
return;
err_file:
debugfs_remove(dir);
} }

View File

@ -39,6 +39,12 @@ pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
#endif #endif
} }
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_register_ftrace(void);
#else
static inline void pstore_register_ftrace(void) {}
#endif
extern struct pstore_info *psinfo; extern struct pstore_info *psinfo;
extern void pstore_set_kmsg_bytes(int); extern void pstore_set_kmsg_bytes(int);

View File

@ -236,6 +236,7 @@ int pstore_register(struct pstore_info *psi)
kmsg_dump_register(&pstore_dumper); kmsg_dump_register(&pstore_dumper);
pstore_register_console(); pstore_register_console();
pstore_register_ftrace();
if (pstore_update_ms >= 0) { if (pstore_update_ms >= 0) {
pstore_timer.expires = jiffies + pstore_timer.expires = jiffies +

View File

@ -64,14 +64,6 @@ struct pstore_info {
void *data; void *data;
}; };
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip);
#else
static inline void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip)
{ }
#endif
#ifdef CONFIG_PSTORE #ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *); extern int pstore_register(struct pstore_info *);
#else #else

View File

@ -13,7 +13,6 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/pstore.h>
#include <linux/fs.h> #include <linux/fs.h>
#include "trace.h" #include "trace.h"
@ -75,10 +74,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
preempt_enable_notrace(); preempt_enable_notrace();
} }
/* Our two options */ /* Our option */
enum { enum {
TRACE_FUNC_OPT_STACK = 0x1, TRACE_FUNC_OPT_STACK = 0x1,
TRACE_FUNC_OPT_PSTORE = 0x2,
}; };
static struct tracer_flags func_flags; static struct tracer_flags func_flags;
@ -106,12 +104,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
/*
* So far tracing doesn't support multiple buffers, so
* we make an explicit call for now.
*/
if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
pstore_ftrace_call(ip, parent_ip);
pc = preempt_count(); pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc);
} }
@ -176,9 +168,6 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
static struct tracer_opt func_opts[] = { static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
#ifdef CONFIG_PSTORE_FTRACE
{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
#endif #endif
{ } /* Always set a last empty entry */ { } /* Always set a last empty entry */
}; };
@ -231,8 +220,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
register_ftrace_function(&trace_ops); register_ftrace_function(&trace_ops);
} }
break;
case TRACE_FUNC_OPT_PSTORE:
break; break;
default: default:
return -EINVAL; return -EINVAL;