Nothing major this round. Mostly small clean ups and fixes.
Some visible changes: A new flag was added to distinguish traces done in NMI context. Preempt tracer now shows functions where preemption is disabled but interrupts are still enabled. Other notes: Updates were done to function tracing to allow better performance with perf. Infrastructure code has been added to allow for a new histogram feature for recording live trace event histograms that can be configured by simple user commands. The feature itself was just finished, but needs a round in linux-next before being pulled. This only includes some infrastructure changes that will be needed. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJW8/WPAAoJEKKk/i67LK/8wrAH/j2gU9ZfjVxTu8068TBGWRJP yvvzq0cK5evB3dsVuUmKKRfU52nSv4J1WcFF569X0RulSLylR0dHlcxFJMn4kkgR bm0AHRrqOf87ub3VimcpG146iVQij37l5A0SRoFbvSPLQx1KUW18v99x41Ji8dv6 oWXRc6/YhdzEE7l0nUsVjmScQ4b2emsems3cxZzXOY+nRJsiim6i+VaDeatdyey1 csLVqtRCs+x62TVtxG3+GhcLdRoPRbnHAGzrKDFIn1SrQaRXCc54wN5d2hWxjgNI 1laOwaj070lnJiWfBLIP/K+lx+VKRx5/O0rKZX35foLUTqJJKSyjAbKXuMCcSAM= =2h2K -----END PGP SIGNATURE----- Merge tag 'trace-v4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing updates from Steven Rostedt: "Nothing major this round. Mostly small clean ups and fixes. Some visible changes: - A new flag was added to distinguish traces done in NMI context. - Preempt tracer now shows functions where preemption is disabled but interrupts are still enabled. Other notes: - Updates were done to function tracing to allow better performance with perf. - Infrastructure code has been added to allow for a new histogram feature for recording live trace event histograms that can be configured by simple user commands. The feature itself was just finished, but needs a round in linux-next before being pulled. This only includes some infrastructure changes that will be needed" * tag 'trace-v4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (22 commits) tracing: Record and show NMI state tracing: Fix trace_printk() to print when not using bprintk() tracing: Remove redundant reset per-CPU buff in irqsoff tracer x86: ftrace: Fix the misleading comment for arch/x86/kernel/ftrace.c tracing: Fix crash from reading trace_pipe with sendfile tracing: Have preempt(irqs)off trace preempt disabled functions tracing: Fix return while holding a lock in register_tracer() ftrace: Use kasprintf() in ftrace_profile_tracefs() ftrace: Update dynamic ftrace calls only if necessary ftrace: Make ftrace_hash_rec_enable return update bool tracing: Fix typoes in code comment and printk in trace_nop.c tracing, writeback: Replace cgroup path to cgroup ino tracing: Use flags instead of bool in trigger structure tracing: Add an unreg_all() callback to trigger commands tracing: Add needs_rec flag to event triggers tracing: Add a per-event-trigger 'paused' field tracing: Add get_syscall_name() tracing: Add event record param to trigger_ops.func() tracing: Make event trigger functions available tracing: Make ftrace_event_field checking functions available ...
This commit is contained in:
commit
e46b4e2b46
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Code for replacing ftrace calls with jumps.
|
||||
* Dynamic function tracing support.
|
||||
*
|
||||
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
|
|
|
@ -618,7 +618,7 @@ do { \
|
|||
|
||||
#define do_trace_printk(fmt, args...) \
|
||||
do { \
|
||||
static const char *trace_printk_fmt \
|
||||
static const char *trace_printk_fmt __used \
|
||||
__attribute__((section("__trace_printk_fmt"))) = \
|
||||
__builtin_constant_p(fmt) ? fmt : NULL; \
|
||||
\
|
||||
|
@ -662,7 +662,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
|
|||
*/
|
||||
|
||||
#define trace_puts(str) ({ \
|
||||
static const char *trace_printk_fmt \
|
||||
static const char *trace_printk_fmt __used \
|
||||
__attribute__((section("__trace_printk_fmt"))) = \
|
||||
__builtin_constant_p(str) ? str : NULL; \
|
||||
\
|
||||
|
@ -684,7 +684,7 @@ extern void trace_dump_stack(int skip);
|
|||
#define ftrace_vprintk(fmt, vargs) \
|
||||
do { \
|
||||
if (__builtin_constant_p(fmt)) { \
|
||||
static const char *trace_printk_fmt \
|
||||
static const char *trace_printk_fmt __used \
|
||||
__attribute__((section("__trace_printk_fmt"))) = \
|
||||
__builtin_constant_p(fmt) ? fmt : NULL; \
|
||||
\
|
||||
|
|
|
@ -420,7 +420,8 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
|
|||
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
|
||||
void *rec);
|
||||
extern void event_triggers_post_call(struct trace_event_file *file,
|
||||
enum event_trigger_type tt);
|
||||
enum event_trigger_type tt,
|
||||
void *rec);
|
||||
|
||||
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
|
||||
|
||||
|
@ -507,7 +508,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
|
|||
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
|
||||
|
||||
if (tt)
|
||||
event_triggers_post_call(file, tt);
|
||||
event_triggers_post_call(file, tt, entry);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -540,7 +541,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
|
|||
irq_flags, pc, regs);
|
||||
|
||||
if (tt)
|
||||
event_triggers_post_call(file, tt);
|
||||
event_triggers_post_call(file, tt, entry);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_EVENTS
|
||||
|
|
|
@ -140,42 +140,19 @@ DEFINE_EVENT(kmem_free, kfree,
|
|||
TP_ARGS(call_site, ptr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
|
||||
DEFINE_EVENT(kmem_free, kmem_cache_free,
|
||||
|
||||
TP_PROTO(unsigned long call_site, const void *ptr),
|
||||
|
||||
TP_ARGS(call_site, ptr),
|
||||
|
||||
/*
|
||||
* This trace can be potentially called from an offlined cpu.
|
||||
* Since trace points use RCU and RCU should not be used from
|
||||
* offline cpus, filter such calls out.
|
||||
* While this trace can be called from a preemptable section,
|
||||
* it has no impact on the condition since tasks can migrate
|
||||
* only from online cpus to other online cpus. Thus its safe
|
||||
* to use raw_smp_processor_id.
|
||||
*/
|
||||
TP_CONDITION(cpu_online(raw_smp_processor_id()))
|
||||
TP_ARGS(call_site, ptr)
|
||||
);
|
||||
|
||||
TRACE_EVENT_CONDITION(mm_page_free,
|
||||
TRACE_EVENT(mm_page_free,
|
||||
|
||||
TP_PROTO(struct page *page, unsigned int order),
|
||||
|
||||
TP_ARGS(page, order),
|
||||
|
||||
|
||||
/*
|
||||
* This trace can be potentially called from an offlined cpu.
|
||||
* Since trace points use RCU and RCU should not be used from
|
||||
* offline cpus, filter such calls out.
|
||||
* While this trace can be called from a preemptable section,
|
||||
* it has no impact on the condition since tasks can migrate
|
||||
* only from online cpus to other online cpus. Thus its safe
|
||||
* to use raw_smp_processor_id.
|
||||
*/
|
||||
TP_CONDITION(cpu_online(raw_smp_processor_id())),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, pfn )
|
||||
__field( unsigned int, order )
|
||||
|
@ -276,23 +253,12 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
|
|||
TP_ARGS(page, order, migratetype)
|
||||
);
|
||||
|
||||
TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
|
||||
TRACE_EVENT(mm_page_pcpu_drain,
|
||||
|
||||
TP_PROTO(struct page *page, unsigned int order, int migratetype),
|
||||
|
||||
TP_ARGS(page, order, migratetype),
|
||||
|
||||
/*
|
||||
* This trace can be potentially called from an offlined cpu.
|
||||
* Since trace points use RCU and RCU should not be used from
|
||||
* offline cpus, filter such calls out.
|
||||
* While this trace can be called from a preemptable section,
|
||||
* it has no impact on the condition since tasks can migrate
|
||||
* only from online cpus to other online cpus. Thus its safe
|
||||
* to use raw_smp_processor_id.
|
||||
*/
|
||||
TP_CONDITION(cpu_online(raw_smp_processor_id())),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, pfn )
|
||||
__field( unsigned int, order )
|
||||
|
|
|
@ -34,13 +34,11 @@ TLB_FLUSH_REASON
|
|||
#define EM(a,b) { a, b },
|
||||
#define EMe(a,b) { a, b }
|
||||
|
||||
TRACE_EVENT_CONDITION(tlb_flush,
|
||||
TRACE_EVENT(tlb_flush,
|
||||
|
||||
TP_PROTO(int reason, unsigned long pages),
|
||||
TP_ARGS(reason, pages),
|
||||
|
||||
TP_CONDITION(cpu_online(smp_processor_id())),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, reason)
|
||||
__field(unsigned long, pages)
|
||||
|
|
|
@ -134,58 +134,28 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
|
|||
#ifdef CREATE_TRACE_POINTS
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
|
||||
static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
|
||||
static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
|
||||
{
|
||||
return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
|
||||
return wb->memcg_css->cgroup->kn->ino;
|
||||
}
|
||||
|
||||
static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
|
||||
{
|
||||
struct cgroup *cgrp = wb->memcg_css->cgroup;
|
||||
char *path;
|
||||
|
||||
path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
|
||||
WARN_ON_ONCE(path != buf);
|
||||
}
|
||||
|
||||
static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
|
||||
static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
|
||||
{
|
||||
if (wbc->wb)
|
||||
return __trace_wb_cgroup_size(wbc->wb);
|
||||
return __trace_wb_assign_cgroup(wbc->wb);
|
||||
else
|
||||
return 2;
|
||||
return -1U;
|
||||
}
|
||||
|
||||
static inline void __trace_wbc_assign_cgroup(char *buf,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
if (wbc->wb)
|
||||
__trace_wb_assign_cgroup(buf, wbc->wb);
|
||||
else
|
||||
strcpy(buf, "/");
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUP_WRITEBACK */
|
||||
|
||||
static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
|
||||
static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
|
||||
{
|
||||
return 2;
|
||||
return -1U;
|
||||
}
|
||||
|
||||
static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
|
||||
static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
|
||||
{
|
||||
strcpy(buf, "/");
|
||||
}
|
||||
|
||||
static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
|
||||
{
|
||||
return 2;
|
||||
}
|
||||
|
||||
static inline void __trace_wbc_assign_cgroup(char *buf,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
strcpy(buf, "/");
|
||||
return -1U;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CGROUP_WRITEBACK */
|
||||
|
@ -201,7 +171,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
|
|||
__array(char, name, 32)
|
||||
__field(unsigned long, ino)
|
||||
__field(int, sync_mode)
|
||||
__dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -209,14 +179,14 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
|
|||
dev_name(inode_to_bdi(inode)->dev), 32);
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->sync_mode = wbc->sync_mode;
|
||||
__trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
|
||||
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
|
||||
),
|
||||
|
||||
TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
|
||||
TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
|
||||
__entry->name,
|
||||
__entry->ino,
|
||||
__entry->sync_mode,
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -246,7 +216,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
|
|||
__field(int, range_cyclic)
|
||||
__field(int, for_background)
|
||||
__field(int, reason)
|
||||
__dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
TP_fast_assign(
|
||||
strncpy(__entry->name,
|
||||
|
@ -258,10 +228,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
|
|||
__entry->range_cyclic = work->range_cyclic;
|
||||
__entry->for_background = work->for_background;
|
||||
__entry->reason = work->reason;
|
||||
__trace_wb_assign_cgroup(__get_str(cgroup), wb);
|
||||
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
||||
),
|
||||
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
|
||||
"kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
|
||||
"kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
|
||||
__entry->name,
|
||||
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
|
||||
__entry->nr_pages,
|
||||
|
@ -270,7 +240,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
|
|||
__entry->range_cyclic,
|
||||
__entry->for_background,
|
||||
__print_symbolic(__entry->reason, WB_WORK_REASON),
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
|
||||
|
@ -300,15 +270,15 @@ DECLARE_EVENT_CLASS(writeback_class,
|
|||
TP_ARGS(wb),
|
||||
TP_STRUCT__entry(
|
||||
__array(char, name, 32)
|
||||
__dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
TP_fast_assign(
|
||||
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
|
||||
__trace_wb_assign_cgroup(__get_str(cgroup), wb);
|
||||
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
||||
),
|
||||
TP_printk("bdi %s: cgroup=%s",
|
||||
TP_printk("bdi %s: cgroup_ino=%u",
|
||||
__entry->name,
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
#define DEFINE_WRITEBACK_EVENT(name) \
|
||||
|
@ -347,7 +317,7 @@ DECLARE_EVENT_CLASS(wbc_class,
|
|||
__field(int, range_cyclic)
|
||||
__field(long, range_start)
|
||||
__field(long, range_end)
|
||||
__dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -361,12 +331,12 @@ DECLARE_EVENT_CLASS(wbc_class,
|
|||
__entry->range_cyclic = wbc->range_cyclic;
|
||||
__entry->range_start = (long)wbc->range_start;
|
||||
__entry->range_end = (long)wbc->range_end;
|
||||
__trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
|
||||
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
|
||||
),
|
||||
|
||||
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
|
||||
"bgrd=%d reclm=%d cyclic=%d "
|
||||
"start=0x%lx end=0x%lx cgroup=%s",
|
||||
"start=0x%lx end=0x%lx cgroup_ino=%u",
|
||||
__entry->name,
|
||||
__entry->nr_to_write,
|
||||
__entry->pages_skipped,
|
||||
|
@ -377,7 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
|
|||
__entry->range_cyclic,
|
||||
__entry->range_start,
|
||||
__entry->range_end,
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -398,7 +368,7 @@ TRACE_EVENT(writeback_queue_io,
|
|||
__field(long, age)
|
||||
__field(int, moved)
|
||||
__field(int, reason)
|
||||
__dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
TP_fast_assign(
|
||||
unsigned long *older_than_this = work->older_than_this;
|
||||
|
@ -408,15 +378,15 @@ TRACE_EVENT(writeback_queue_io,
|
|||
(jiffies - *older_than_this) * 1000 / HZ : -1;
|
||||
__entry->moved = moved;
|
||||
__entry->reason = work->reason;
|
||||
__trace_wb_assign_cgroup(__get_str(cgroup), wb);
|
||||
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
||||
),
|
||||
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
|
||||
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
|
||||
__entry->name,
|
||||
__entry->older, /* older_than_this in jiffies */
|
||||
__entry->age, /* older_than_this in relative milliseconds */
|
||||
__entry->moved,
|
||||
__print_symbolic(__entry->reason, WB_WORK_REASON),
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -484,7 +454,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
|
|||
__field(unsigned long, dirty_ratelimit)
|
||||
__field(unsigned long, task_ratelimit)
|
||||
__field(unsigned long, balanced_dirty_ratelimit)
|
||||
__dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -496,13 +466,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
|
|||
__entry->task_ratelimit = KBps(task_ratelimit);
|
||||
__entry->balanced_dirty_ratelimit =
|
||||
KBps(wb->balanced_dirty_ratelimit);
|
||||
__trace_wb_assign_cgroup(__get_str(cgroup), wb);
|
||||
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
||||
),
|
||||
|
||||
TP_printk("bdi %s: "
|
||||
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
|
||||
"dirty_ratelimit=%lu task_ratelimit=%lu "
|
||||
"balanced_dirty_ratelimit=%lu cgroup=%s",
|
||||
"balanced_dirty_ratelimit=%lu cgroup_ino=%u",
|
||||
__entry->bdi,
|
||||
__entry->write_bw, /* write bandwidth */
|
||||
__entry->avg_write_bw, /* avg write bandwidth */
|
||||
|
@ -510,7 +480,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
|
|||
__entry->dirty_ratelimit, /* base ratelimit */
|
||||
__entry->task_ratelimit, /* ratelimit with position control */
|
||||
__entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -548,7 +518,7 @@ TRACE_EVENT(balance_dirty_pages,
|
|||
__field( long, pause)
|
||||
__field(unsigned long, period)
|
||||
__field( long, think)
|
||||
__dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -571,7 +541,7 @@ TRACE_EVENT(balance_dirty_pages,
|
|||
__entry->period = period * 1000 / HZ;
|
||||
__entry->pause = pause * 1000 / HZ;
|
||||
__entry->paused = (jiffies - start_time) * 1000 / HZ;
|
||||
__trace_wb_assign_cgroup(__get_str(cgroup), wb);
|
||||
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
||||
),
|
||||
|
||||
|
||||
|
@ -580,7 +550,7 @@ TRACE_EVENT(balance_dirty_pages,
|
|||
"bdi_setpoint=%lu bdi_dirty=%lu "
|
||||
"dirty_ratelimit=%lu task_ratelimit=%lu "
|
||||
"dirtied=%u dirtied_pause=%u "
|
||||
"paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
|
||||
"paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
|
||||
__entry->bdi,
|
||||
__entry->limit,
|
||||
__entry->setpoint,
|
||||
|
@ -595,7 +565,7 @@ TRACE_EVENT(balance_dirty_pages,
|
|||
__entry->pause, /* ms */
|
||||
__entry->period, /* ms */
|
||||
__entry->think, /* ms */
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -609,8 +579,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
|
|||
__field(unsigned long, ino)
|
||||
__field(unsigned long, state)
|
||||
__field(unsigned long, dirtied_when)
|
||||
__dynamic_array(char, cgroup,
|
||||
__trace_wb_cgroup_size(inode_to_wb(inode)))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -619,16 +588,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
|
|||
__entry->ino = inode->i_ino;
|
||||
__entry->state = inode->i_state;
|
||||
__entry->dirtied_when = inode->dirtied_when;
|
||||
__trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
|
||||
__entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
|
||||
),
|
||||
|
||||
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
|
||||
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
|
||||
__entry->name,
|
||||
__entry->ino,
|
||||
show_inode_state(__entry->state),
|
||||
__entry->dirtied_when,
|
||||
(jiffies - __entry->dirtied_when) / HZ,
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -684,7 +653,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
|
|||
__field(unsigned long, writeback_index)
|
||||
__field(long, nr_to_write)
|
||||
__field(unsigned long, wrote)
|
||||
__dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
|
||||
__field(unsigned int, cgroup_ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -696,11 +665,11 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
|
|||
__entry->writeback_index = inode->i_mapping->writeback_index;
|
||||
__entry->nr_to_write = nr_to_write;
|
||||
__entry->wrote = nr_to_write - wbc->nr_to_write;
|
||||
__trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
|
||||
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
|
||||
),
|
||||
|
||||
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
|
||||
"index=%lu to_write=%ld wrote=%lu cgroup=%s",
|
||||
"index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
|
||||
__entry->name,
|
||||
__entry->ino,
|
||||
show_inode_state(__entry->state),
|
||||
|
@ -709,7 +678,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
|
|||
__entry->writeback_index,
|
||||
__entry->nr_to_write,
|
||||
__entry->wrote,
|
||||
__get_str(cgroup)
|
||||
__entry->cgroup_ino
|
||||
)
|
||||
);
|
||||
|
||||
|
|
|
@ -1030,8 +1030,7 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
|
|||
for_each_possible_cpu(cpu) {
|
||||
stat = &per_cpu(ftrace_profile_stats, cpu);
|
||||
|
||||
/* allocate enough for function name + cpu number */
|
||||
name = kmalloc(32, GFP_KERNEL);
|
||||
name = kasprintf(GFP_KERNEL, "function%d", cpu);
|
||||
if (!name) {
|
||||
/*
|
||||
* The files created are permanent, if something happens
|
||||
|
@ -1043,7 +1042,6 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
|
|||
return;
|
||||
}
|
||||
stat->stat = function_stats;
|
||||
snprintf(name, 32, "function%d", cpu);
|
||||
stat->stat.name = name;
|
||||
ret = register_stat_tracer(&stat->stat);
|
||||
if (ret) {
|
||||
|
@ -1609,7 +1607,7 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
|
|||
return keep_regs;
|
||||
}
|
||||
|
||||
static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
int filter_hash,
|
||||
bool inc)
|
||||
{
|
||||
|
@ -1617,12 +1615,13 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|||
struct ftrace_hash *other_hash;
|
||||
struct ftrace_page *pg;
|
||||
struct dyn_ftrace *rec;
|
||||
bool update = false;
|
||||
int count = 0;
|
||||
int all = 0;
|
||||
|
||||
/* Only update if the ops has been registered */
|
||||
if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
|
||||
return;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* In the filter_hash case:
|
||||
|
@ -1649,7 +1648,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|||
* then there's nothing to do.
|
||||
*/
|
||||
if (ftrace_hash_empty(hash))
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
|
@ -1693,7 +1692,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|||
if (inc) {
|
||||
rec->flags++;
|
||||
if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
|
||||
return;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If there's only a single callback registered to a
|
||||
|
@ -1719,7 +1718,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|||
rec->flags |= FTRACE_FL_REGS;
|
||||
} else {
|
||||
if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
|
||||
return;
|
||||
return false;
|
||||
rec->flags--;
|
||||
|
||||
/*
|
||||
|
@ -1752,22 +1751,28 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|||
*/
|
||||
}
|
||||
count++;
|
||||
|
||||
/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
|
||||
update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
|
||||
|
||||
/* Shortcut, if we handled all records, we are done. */
|
||||
if (!all && count == hash->count)
|
||||
return;
|
||||
return update;
|
||||
} while_for_each_ftrace_rec();
|
||||
|
||||
return update;
|
||||
}
|
||||
|
||||
static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
|
||||
static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
|
||||
int filter_hash)
|
||||
{
|
||||
__ftrace_hash_rec_update(ops, filter_hash, 0);
|
||||
return __ftrace_hash_rec_update(ops, filter_hash, 0);
|
||||
}
|
||||
|
||||
static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
|
||||
static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
|
||||
int filter_hash)
|
||||
{
|
||||
__ftrace_hash_rec_update(ops, filter_hash, 1);
|
||||
return __ftrace_hash_rec_update(ops, filter_hash, 1);
|
||||
}
|
||||
|
||||
static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
|
||||
|
@ -2643,7 +2648,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
|
|||
return ret;
|
||||
|
||||
ftrace_start_up++;
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
/*
|
||||
* Note that ftrace probes uses this to start up
|
||||
|
@ -2664,7 +2668,8 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ftrace_hash_rec_enable(ops, 1);
|
||||
if (ftrace_hash_rec_enable(ops, 1))
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
ftrace_startup_enable(command);
|
||||
|
||||
|
@ -2694,12 +2699,12 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
|||
|
||||
/* Disabling ipmodify never fails */
|
||||
ftrace_hash_ipmodify_disable(ops);
|
||||
ftrace_hash_rec_disable(ops, 1);
|
||||
|
||||
if (ftrace_hash_rec_disable(ops, 1))
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
ops->flags &= ~FTRACE_OPS_FL_ENABLED;
|
||||
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
if (saved_ftrace_func != ftrace_trace_function) {
|
||||
saved_ftrace_func = ftrace_trace_function;
|
||||
command |= FTRACE_UPDATE_TRACE_FUNC;
|
||||
|
|
|
@ -74,11 +74,6 @@ static struct tracer_opt dummy_tracer_opt[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct tracer_flags dummy_tracer_flags = {
|
||||
.val = 0,
|
||||
.opts = dummy_tracer_opt
|
||||
};
|
||||
|
||||
static int
|
||||
dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
||||
{
|
||||
|
@ -1258,12 +1253,22 @@ int __init register_tracer(struct tracer *type)
|
|||
|
||||
if (!type->set_flag)
|
||||
type->set_flag = &dummy_set_flag;
|
||||
if (!type->flags)
|
||||
type->flags = &dummy_tracer_flags;
|
||||
else
|
||||
if (!type->flags) {
|
||||
/*allocate a dummy tracer_flags*/
|
||||
type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
|
||||
if (!type->flags) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
type->flags->val = 0;
|
||||
type->flags->opts = dummy_tracer_opt;
|
||||
} else
|
||||
if (!type->flags->opts)
|
||||
type->flags->opts = dummy_tracer_opt;
|
||||
|
||||
/* store the tracer for __set_tracer_option */
|
||||
type->flags->trace = type;
|
||||
|
||||
ret = run_tracer_selftest(type);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1659,6 +1664,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
|||
#else
|
||||
TRACE_FLAG_IRQS_NOSUPPORT |
|
||||
#endif
|
||||
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
|
||||
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
|
||||
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
|
||||
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
|
||||
|
@ -3505,7 +3511,7 @@ static int __set_tracer_option(struct trace_array *tr,
|
|||
struct tracer_flags *tracer_flags,
|
||||
struct tracer_opt *opts, int neg)
|
||||
{
|
||||
struct tracer *trace = tr->current_trace;
|
||||
struct tracer *trace = tracer_flags->trace;
|
||||
int ret;
|
||||
|
||||
ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
|
||||
|
@ -4949,7 +4955,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|||
|
||||
spd.nr_pages = i;
|
||||
|
||||
ret = splice_to_pipe(pipe, &spd);
|
||||
if (i)
|
||||
ret = splice_to_pipe(pipe, &spd);
|
||||
else
|
||||
ret = 0;
|
||||
out:
|
||||
splice_shrink_spd(&spd);
|
||||
return ret;
|
||||
|
@ -6391,11 +6400,8 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
|
|||
return;
|
||||
|
||||
for (i = 0; i < tr->nr_topts; i++) {
|
||||
/*
|
||||
* Check if these flags have already been added.
|
||||
* Some tracers share flags.
|
||||
*/
|
||||
if (tr->topts[i].tracer->flags == tracer->flags)
|
||||
/* Make sure there's no duplicate flags. */
|
||||
if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,6 +125,7 @@ enum trace_flag_type {
|
|||
TRACE_FLAG_HARDIRQ = 0x08,
|
||||
TRACE_FLAG_SOFTIRQ = 0x10,
|
||||
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
|
||||
TRACE_FLAG_NMI = 0x40,
|
||||
};
|
||||
|
||||
#define TRACE_BUF_SIZE 1024
|
||||
|
@ -345,6 +346,7 @@ struct tracer_opt {
|
|||
struct tracer_flags {
|
||||
u32 val;
|
||||
struct tracer_opt *opts;
|
||||
struct tracer *trace;
|
||||
};
|
||||
|
||||
/* Makes more easy to define a tracer opt */
|
||||
|
@ -1111,6 +1113,18 @@ struct filter_pred {
|
|||
unsigned short right;
|
||||
};
|
||||
|
||||
static inline bool is_string_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_DYN_STRING ||
|
||||
field->filter_type == FILTER_STATIC_STRING ||
|
||||
field->filter_type == FILTER_PTR_STRING;
|
||||
}
|
||||
|
||||
static inline bool is_function_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_TRACE_FN;
|
||||
}
|
||||
|
||||
extern enum regex_type
|
||||
filter_parse_regex(char *buff, int len, char **search, int *not);
|
||||
extern void print_event_filter(struct trace_event_file *file,
|
||||
|
@ -1159,9 +1173,24 @@ struct event_trigger_data {
|
|||
struct event_filter __rcu *filter;
|
||||
char *filter_str;
|
||||
void *private_data;
|
||||
bool paused;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
extern void trigger_data_free(struct event_trigger_data *data);
|
||||
extern int event_trigger_init(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data);
|
||||
extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
|
||||
int trigger_enable);
|
||||
extern void update_cond_flag(struct trace_event_file *file);
|
||||
extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct trace_event_file *file);
|
||||
extern int set_trigger_filter(char *filter_str,
|
||||
struct event_trigger_data *trigger_data,
|
||||
struct trace_event_file *file);
|
||||
extern int register_event_command(struct event_command *cmd);
|
||||
|
||||
/**
|
||||
* struct event_trigger_ops - callbacks for trace event triggers
|
||||
*
|
||||
|
@ -1174,7 +1203,8 @@ struct event_trigger_data {
|
|||
* @func: The trigger 'probe' function called when the triggering
|
||||
* event occurs. The data passed into this callback is the data
|
||||
* that was supplied to the event_command @reg() function that
|
||||
* registered the trigger (see struct event_command).
|
||||
* registered the trigger (see struct event_command) along with
|
||||
* the trace record, rec.
|
||||
*
|
||||
* @init: An optional initialization function called for the trigger
|
||||
* when the trigger is registered (via the event_command reg()
|
||||
|
@ -1199,7 +1229,8 @@ struct event_trigger_data {
|
|||
* (see trace_event_triggers.c).
|
||||
*/
|
||||
struct event_trigger_ops {
|
||||
void (*func)(struct event_trigger_data *data);
|
||||
void (*func)(struct event_trigger_data *data,
|
||||
void *rec);
|
||||
int (*init)(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data);
|
||||
void (*free)(struct event_trigger_ops *ops,
|
||||
|
@ -1243,27 +1274,10 @@ struct event_trigger_ops {
|
|||
* values are defined by adding new values to the trigger_type
|
||||
* enum in include/linux/trace_events.h.
|
||||
*
|
||||
* @post_trigger: A flag that says whether or not this command needs
|
||||
* to have its action delayed until after the current event has
|
||||
* been closed. Some triggers need to avoid being invoked while
|
||||
* an event is currently in the process of being logged, since
|
||||
* the trigger may itself log data into the trace buffer. Thus
|
||||
* we make sure the current event is committed before invoking
|
||||
* those triggers. To do that, the trigger invocation is split
|
||||
* in two - the first part checks the filter using the current
|
||||
* trace record; if a command has the @post_trigger flag set, it
|
||||
* sets a bit for itself in the return value, otherwise it
|
||||
* directly invokes the trigger. Once all commands have been
|
||||
* either invoked or set their return flag, the current record is
|
||||
* either committed or discarded. At that point, if any commands
|
||||
* have deferred their triggers, those commands are finally
|
||||
* invoked following the close of the current event. In other
|
||||
* words, if the event_trigger_ops @func() probe implementation
|
||||
* itself logs to the trace buffer, this flag should be set,
|
||||
* otherwise it can be left unspecified.
|
||||
* @flags: See the enum event_command_flags below.
|
||||
*
|
||||
* All the methods below, except for @set_filter(), must be
|
||||
* implemented.
|
||||
* All the methods below, except for @set_filter() and @unreg_all(),
|
||||
* must be implemented.
|
||||
*
|
||||
* @func: The callback function responsible for parsing and
|
||||
* registering the trigger written to the 'trigger' file by the
|
||||
|
@ -1288,6 +1302,10 @@ struct event_trigger_ops {
|
|||
* This is usually implemented by the generic utility function
|
||||
* @unregister_trigger() (see trace_event_triggers.c).
|
||||
*
|
||||
* @unreg_all: An optional function called to remove all the triggers
|
||||
* from the list of triggers associated with the event. Called
|
||||
* when a trigger file is opened in truncate mode.
|
||||
*
|
||||
* @set_filter: An optional function called to parse and set a filter
|
||||
* for the trigger. If no @set_filter() method is set for the
|
||||
* event command, filters set by the user for the command will be
|
||||
|
@ -1301,7 +1319,7 @@ struct event_command {
|
|||
struct list_head list;
|
||||
char *name;
|
||||
enum event_trigger_type trigger_type;
|
||||
bool post_trigger;
|
||||
int flags;
|
||||
int (*func)(struct event_command *cmd_ops,
|
||||
struct trace_event_file *file,
|
||||
char *glob, char *cmd, char *params);
|
||||
|
@ -1313,12 +1331,56 @@ struct event_command {
|
|||
struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data,
|
||||
struct trace_event_file *file);
|
||||
void (*unreg_all)(struct trace_event_file *file);
|
||||
int (*set_filter)(char *filter_str,
|
||||
struct event_trigger_data *data,
|
||||
struct trace_event_file *file);
|
||||
struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
|
||||
};
|
||||
|
||||
/**
|
||||
* enum event_command_flags - flags for struct event_command
|
||||
*
|
||||
* @POST_TRIGGER: A flag that says whether or not this command needs
|
||||
* to have its action delayed until after the current event has
|
||||
* been closed. Some triggers need to avoid being invoked while
|
||||
* an event is currently in the process of being logged, since
|
||||
* the trigger may itself log data into the trace buffer. Thus
|
||||
* we make sure the current event is committed before invoking
|
||||
* those triggers. To do that, the trigger invocation is split
|
||||
* in two - the first part checks the filter using the current
|
||||
* trace record; if a command has the @post_trigger flag set, it
|
||||
* sets a bit for itself in the return value, otherwise it
|
||||
* directly invokes the trigger. Once all commands have been
|
||||
* either invoked or set their return flag, the current record is
|
||||
* either committed or discarded. At that point, if any commands
|
||||
* have deferred their triggers, those commands are finally
|
||||
* invoked following the close of the current event. In other
|
||||
* words, if the event_trigger_ops @func() probe implementation
|
||||
* itself logs to the trace buffer, this flag should be set,
|
||||
* otherwise it can be left unspecified.
|
||||
*
|
||||
* @NEEDS_REC: A flag that says whether or not this command needs
|
||||
* access to the trace record in order to perform its function,
|
||||
* regardless of whether or not it has a filter associated with
|
||||
* it (filters make a trigger require access to the trace record
|
||||
* but are not always present).
|
||||
*/
|
||||
enum event_command_flags {
|
||||
EVENT_CMD_FL_POST_TRIGGER = 1,
|
||||
EVENT_CMD_FL_NEEDS_REC = 2,
|
||||
};
|
||||
|
||||
static inline bool event_command_post_trigger(struct event_command *cmd_ops)
|
||||
{
|
||||
return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
|
||||
}
|
||||
|
||||
static inline bool event_command_needs_rec(struct event_command *cmd_ops)
|
||||
{
|
||||
return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
|
||||
}
|
||||
|
||||
extern int trace_event_enable_disable(struct trace_event_file *file,
|
||||
int enable, int soft_disable);
|
||||
extern int tracing_alloc_snapshot(void);
|
||||
|
@ -1365,8 +1427,13 @@ int perf_ftrace_event_register(struct trace_event_call *call,
|
|||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
void init_ftrace_syscalls(void);
|
||||
const char *get_syscall_name(int syscall);
|
||||
#else
|
||||
static inline void init_ftrace_syscalls(void) { }
|
||||
static inline const char *get_syscall_name(int syscall)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
|
|
|
@ -961,18 +961,6 @@ int filter_assign_type(const char *type)
|
|||
return FILTER_OTHER;
|
||||
}
|
||||
|
||||
static bool is_function_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_TRACE_FN;
|
||||
}
|
||||
|
||||
static bool is_string_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_DYN_STRING ||
|
||||
field->filter_type == FILTER_STATIC_STRING ||
|
||||
field->filter_type == FILTER_PTR_STRING;
|
||||
}
|
||||
|
||||
static bool is_legal_op(struct ftrace_event_field *field, int op)
|
||||
{
|
||||
if (is_string_field(field) &&
|
||||
|
|
|
@ -28,8 +28,7 @@
|
|||
static LIST_HEAD(trigger_commands);
|
||||
static DEFINE_MUTEX(trigger_cmd_mutex);
|
||||
|
||||
static void
|
||||
trigger_data_free(struct event_trigger_data *data)
|
||||
void trigger_data_free(struct event_trigger_data *data)
|
||||
{
|
||||
if (data->cmd_ops->set_filter)
|
||||
data->cmd_ops->set_filter(NULL, data, NULL);
|
||||
|
@ -73,18 +72,20 @@ event_triggers_call(struct trace_event_file *file, void *rec)
|
|||
return tt;
|
||||
|
||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
||||
if (data->paused)
|
||||
continue;
|
||||
if (!rec) {
|
||||
data->ops->func(data);
|
||||
data->ops->func(data, rec);
|
||||
continue;
|
||||
}
|
||||
filter = rcu_dereference_sched(data->filter);
|
||||
if (filter && !filter_match_preds(filter, rec))
|
||||
continue;
|
||||
if (data->cmd_ops->post_trigger) {
|
||||
if (event_command_post_trigger(data->cmd_ops)) {
|
||||
tt |= data->cmd_ops->trigger_type;
|
||||
continue;
|
||||
}
|
||||
data->ops->func(data);
|
||||
data->ops->func(data, rec);
|
||||
}
|
||||
return tt;
|
||||
}
|
||||
|
@ -94,6 +95,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
|
|||
* event_triggers_post_call - Call 'post_triggers' for a trace event
|
||||
* @file: The trace_event_file associated with the event
|
||||
* @tt: enum event_trigger_type containing a set bit for each trigger to invoke
|
||||
* @rec: The trace entry for the event
|
||||
*
|
||||
* For each trigger associated with an event, invoke the trigger
|
||||
* function registered with the associated trigger command, if the
|
||||
|
@ -104,13 +106,16 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
|
|||
*/
|
||||
void
|
||||
event_triggers_post_call(struct trace_event_file *file,
|
||||
enum event_trigger_type tt)
|
||||
enum event_trigger_type tt,
|
||||
void *rec)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
|
||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
||||
if (data->paused)
|
||||
continue;
|
||||
if (data->cmd_ops->trigger_type & tt)
|
||||
data->ops->func(data);
|
||||
data->ops->func(data, rec);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(event_triggers_post_call);
|
||||
|
@ -188,6 +193,19 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
(file->f_flags & O_TRUNC)) {
|
||||
struct trace_event_file *event_file;
|
||||
struct event_command *p;
|
||||
|
||||
event_file = event_file_data(file);
|
||||
|
||||
list_for_each_entry(p, &trigger_commands, list) {
|
||||
if (p->unreg_all)
|
||||
p->unreg_all(event_file);
|
||||
}
|
||||
}
|
||||
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
ret = seq_open(file, &event_triggers_seq_ops);
|
||||
if (!ret) {
|
||||
|
@ -306,7 +324,7 @@ const struct file_operations event_trigger_fops = {
|
|||
* Currently we only register event commands from __init, so mark this
|
||||
* __init too.
|
||||
*/
|
||||
static __init int register_event_command(struct event_command *cmd)
|
||||
__init int register_event_command(struct event_command *cmd)
|
||||
{
|
||||
struct event_command *p;
|
||||
int ret = 0;
|
||||
|
@ -395,9 +413,8 @@ event_trigger_print(const char *name, struct seq_file *m,
|
|||
*
|
||||
* Return: 0 on success, errno otherwise
|
||||
*/
|
||||
static int
|
||||
event_trigger_init(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data)
|
||||
int event_trigger_init(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data)
|
||||
{
|
||||
data->ref++;
|
||||
return 0;
|
||||
|
@ -425,8 +442,8 @@ event_trigger_free(struct event_trigger_ops *ops,
|
|||
trigger_data_free(data);
|
||||
}
|
||||
|
||||
static int trace_event_trigger_enable_disable(struct trace_event_file *file,
|
||||
int trigger_enable)
|
||||
int trace_event_trigger_enable_disable(struct trace_event_file *file,
|
||||
int trigger_enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -483,13 +500,14 @@ clear_event_triggers(struct trace_array *tr)
|
|||
* its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
|
||||
* cleared.
|
||||
*/
|
||||
static void update_cond_flag(struct trace_event_file *file)
|
||||
void update_cond_flag(struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
bool set_cond = false;
|
||||
|
||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
||||
if (data->filter || data->cmd_ops->post_trigger) {
|
||||
if (data->filter || event_command_post_trigger(data->cmd_ops) ||
|
||||
event_command_needs_rec(data->cmd_ops)) {
|
||||
set_cond = true;
|
||||
break;
|
||||
}
|
||||
|
@ -560,9 +578,9 @@ out:
|
|||
* Usually used directly as the @unreg method in event command
|
||||
* implementations.
|
||||
*/
|
||||
static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct trace_event_file *file)
|
||||
void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
bool unregistered = false;
|
||||
|
@ -696,9 +714,9 @@ event_trigger_callback(struct event_command *cmd_ops,
|
|||
*
|
||||
* Return: 0 on success, errno otherwise
|
||||
*/
|
||||
static int set_trigger_filter(char *filter_str,
|
||||
struct event_trigger_data *trigger_data,
|
||||
struct trace_event_file *file)
|
||||
int set_trigger_filter(char *filter_str,
|
||||
struct event_trigger_data *trigger_data,
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data = trigger_data;
|
||||
struct event_filter *filter = NULL, *tmp;
|
||||
|
@ -747,7 +765,7 @@ static int set_trigger_filter(char *filter_str,
|
|||
}
|
||||
|
||||
static void
|
||||
traceon_trigger(struct event_trigger_data *data)
|
||||
traceon_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (tracing_is_on())
|
||||
return;
|
||||
|
@ -756,7 +774,7 @@ traceon_trigger(struct event_trigger_data *data)
|
|||
}
|
||||
|
||||
static void
|
||||
traceon_count_trigger(struct event_trigger_data *data)
|
||||
traceon_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (tracing_is_on())
|
||||
return;
|
||||
|
@ -771,7 +789,7 @@ traceon_count_trigger(struct event_trigger_data *data)
|
|||
}
|
||||
|
||||
static void
|
||||
traceoff_trigger(struct event_trigger_data *data)
|
||||
traceoff_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
|
@ -780,7 +798,7 @@ traceoff_trigger(struct event_trigger_data *data)
|
|||
}
|
||||
|
||||
static void
|
||||
traceoff_count_trigger(struct event_trigger_data *data)
|
||||
traceoff_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
|
@ -876,13 +894,13 @@ static struct event_command trigger_traceoff_cmd = {
|
|||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
static void
|
||||
snapshot_trigger(struct event_trigger_data *data)
|
||||
snapshot_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
tracing_snapshot();
|
||||
}
|
||||
|
||||
static void
|
||||
snapshot_count_trigger(struct event_trigger_data *data)
|
||||
snapshot_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!data->count)
|
||||
return;
|
||||
|
@ -890,7 +908,7 @@ snapshot_count_trigger(struct event_trigger_data *data)
|
|||
if (data->count != -1)
|
||||
(data->count)--;
|
||||
|
||||
snapshot_trigger(data);
|
||||
snapshot_trigger(data, rec);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -969,13 +987,13 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
|
|||
#define STACK_SKIP 3
|
||||
|
||||
static void
|
||||
stacktrace_trigger(struct event_trigger_data *data)
|
||||
stacktrace_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
trace_dump_stack(STACK_SKIP);
|
||||
}
|
||||
|
||||
static void
|
||||
stacktrace_count_trigger(struct event_trigger_data *data)
|
||||
stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!data->count)
|
||||
return;
|
||||
|
@ -983,7 +1001,7 @@ stacktrace_count_trigger(struct event_trigger_data *data)
|
|||
if (data->count != -1)
|
||||
(data->count)--;
|
||||
|
||||
stacktrace_trigger(data);
|
||||
stacktrace_trigger(data, rec);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1017,7 +1035,7 @@ stacktrace_get_trigger_ops(char *cmd, char *param)
|
|||
static struct event_command trigger_stacktrace_cmd = {
|
||||
.name = "stacktrace",
|
||||
.trigger_type = ETT_STACKTRACE,
|
||||
.post_trigger = true,
|
||||
.flags = EVENT_CMD_FL_POST_TRIGGER,
|
||||
.func = event_trigger_callback,
|
||||
.reg = register_trigger,
|
||||
.unreg = unregister_trigger,
|
||||
|
@ -1054,7 +1072,7 @@ struct enable_trigger_data {
|
|||
};
|
||||
|
||||
static void
|
||||
event_enable_trigger(struct event_trigger_data *data)
|
||||
event_enable_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
struct enable_trigger_data *enable_data = data->private_data;
|
||||
|
||||
|
@ -1065,7 +1083,7 @@ event_enable_trigger(struct event_trigger_data *data)
|
|||
}
|
||||
|
||||
static void
|
||||
event_enable_count_trigger(struct event_trigger_data *data)
|
||||
event_enable_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
struct enable_trigger_data *enable_data = data->private_data;
|
||||
|
||||
|
@ -1079,7 +1097,7 @@ event_enable_count_trigger(struct event_trigger_data *data)
|
|||
if (data->count != -1)
|
||||
(data->count)--;
|
||||
|
||||
event_enable_trigger(data);
|
||||
event_enable_trigger(data, rec);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -219,6 +219,8 @@ static void tracing_stop_function_trace(struct trace_array *tr)
|
|||
unregister_ftrace_function(tr->ops);
|
||||
}
|
||||
|
||||
static struct tracer function_trace;
|
||||
|
||||
static int
|
||||
func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
||||
{
|
||||
|
@ -228,6 +230,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
|||
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
|
||||
break;
|
||||
|
||||
/* We can change this flag when not running. */
|
||||
if (tr->current_trace != &function_trace)
|
||||
break;
|
||||
|
||||
unregister_ftrace_function(tr->ops);
|
||||
|
||||
if (set) {
|
||||
|
|
|
@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
|
|||
return 0;
|
||||
|
||||
local_save_flags(*flags);
|
||||
/* slight chance to get a false positive on tracing_cpu */
|
||||
if (!irqs_disabled_flags(*flags))
|
||||
/*
|
||||
* Slight chance to get a false positive on tracing_cpu,
|
||||
* although I'm starting to think there isn't a chance.
|
||||
* Leave this for now just to be paranoid.
|
||||
*/
|
||||
if (!irqs_disabled_flags(*flags) && !preempt_count())
|
||||
return 0;
|
||||
|
||||
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
||||
|
@ -622,7 +626,6 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
|
|||
irqsoff_trace = tr;
|
||||
/* make sure that the tracer is visible */
|
||||
smp_wmb();
|
||||
tracing_reset_online_cpus(&tr->trace_buffer);
|
||||
|
||||
ftrace_init_array_ops(tr, irqsoff_tracer_call);
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ static void nop_trace_reset(struct trace_array *tr)
|
|||
}
|
||||
|
||||
/* It only serves as a signal handler and a callback to
|
||||
* accept or refuse tthe setting of a flag.
|
||||
* accept or refuse the setting of a flag.
|
||||
* If you don't implement it, then the flag setting will be
|
||||
* automatically accepted.
|
||||
*/
|
||||
|
@ -75,7 +75,7 @@ static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
|||
|
||||
if (bit == TRACE_NOP_OPT_REFUSE) {
|
||||
printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse."
|
||||
"Now cat trace_options to see the result\n",
|
||||
" Now cat trace_options to see the result\n",
|
||||
set);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -389,7 +389,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
|||
char irqs_off;
|
||||
int hardirq;
|
||||
int softirq;
|
||||
int nmi;
|
||||
|
||||
nmi = entry->flags & TRACE_FLAG_NMI;
|
||||
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
|
||||
softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
|
||||
|
||||
|
@ -415,10 +417,12 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
|||
}
|
||||
|
||||
hardsoft_irq =
|
||||
(nmi && hardirq) ? 'Z' :
|
||||
nmi ? 'z' :
|
||||
(hardirq && softirq) ? 'H' :
|
||||
hardirq ? 'h' :
|
||||
softirq ? 's' :
|
||||
'.';
|
||||
hardirq ? 'h' :
|
||||
softirq ? 's' :
|
||||
'.' ;
|
||||
|
||||
trace_seq_printf(s, "%c%c%c",
|
||||
irqs_off, need_resched, hardsoft_irq);
|
||||
|
|
|
@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
|
|||
const char *str = *fmt;
|
||||
int i;
|
||||
|
||||
if (!*fmt)
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
|
||||
|
||||
/*
|
||||
|
|
|
@ -106,6 +106,17 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr)
|
|||
return syscalls_metadata[nr];
|
||||
}
|
||||
|
||||
const char *get_syscall_name(int syscall)
|
||||
{
|
||||
struct syscall_metadata *entry;
|
||||
|
||||
entry = syscall_nr_to_meta(syscall);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
return entry->name;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_syscall_enter(struct trace_iterator *iter, int flags,
|
||||
struct trace_event *event)
|
||||
|
|
Loading…
Reference in New Issue