Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (121 commits) perf symbols: Increase symbol KSYM_NAME_LEN size perf hists browser: Refuse 'a' hotkey on non symbolic views perf ui browser: Use libslang to read keys perf tools: Fix tracing info recording perf hists browser: Elide DSO column when it is set to just one DSO, ditto for threads perf hists: Don't consider filtered entries when calculating column widths perf hists: Don't decay total_period for filtered entries perf hists browser: Honour symbol_conf.show_{nr_samples,total_period} perf hists browser: Do not exit on tab key with single event perf annotate browser: Don't change selection line when returning from callq perf tools: handle endianness of feature bitmap perf tools: Add prelink suggestion to dso update message perf script: Fix unknown feature comment perf hists browser: Apply the dso and thread filters when merging new batches perf hists: Move the dso and thread filters from hist_browser perf ui browser: Honour the xterm colors perf top tui: Give color hints just on the percentage, like on --stdio perf ui browser: Make the colors configurable and change the defaults perf tui: Remove unneeded call to newtCls on startup perf hists: Don't format the percentage on hist_entry__snprintf ... Fix up conflicts in arch/x86/kernel/kprobes.c manually. Ingo's tree did the insane "add volatile to const array", which just doesn't make sense ("volatile const"?). But we could remove the const *and* make the array volatile to make doubly sure that gcc doesn't optimize it away.. Also fix up kernel/trace/ring_buffer.c non-data-conflicts manually: the reader_lock has been turned into a raw lock by the core locking merge, and there was a new user of it introduced in this perf core merge. Make sure that new use also uses the raw accessor functions.
This commit is contained in:
commit
7115e3fcf4
|
@ -10,6 +10,7 @@ config M32R
|
||||||
select HAVE_GENERIC_HARDIRQS
|
select HAVE_GENERIC_HARDIRQS
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
|
select GENERIC_ATOMIC64
|
||||||
|
|
||||||
config SBUS
|
config SBUS
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -22,27 +22,26 @@ void arch_trigger_all_cpu_backtrace(void);
|
||||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
#define NMI_FLAG_FIRST 1
|
||||||
* Define some priorities for the nmi notifier call chain.
|
|
||||||
*
|
|
||||||
* Create a local nmi bit that has a higher priority than
|
|
||||||
* external nmis, because the local ones are more frequent.
|
|
||||||
*
|
|
||||||
* Also setup some default high/normal/low settings for
|
|
||||||
* subsystems to registers with. Using 4 bits to separate
|
|
||||||
* the priorities. This can go a lot higher if needed be.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
|
enum {
|
||||||
#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
|
NMI_LOCAL=0,
|
||||||
#define NMI_HIGH_PRIOR (1ULL << 8)
|
NMI_UNKNOWN,
|
||||||
#define NMI_NORMAL_PRIOR (1ULL << 4)
|
NMI_MAX
|
||||||
#define NMI_LOW_PRIOR (1ULL << 0)
|
};
|
||||||
#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
|
|
||||||
#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
|
#define NMI_DONE 0
|
||||||
#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
|
#define NMI_HANDLED 1
|
||||||
|
|
||||||
|
typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
|
||||||
|
|
||||||
|
int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long,
|
||||||
|
const char *);
|
||||||
|
|
||||||
|
void unregister_nmi_handler(unsigned int, const char *);
|
||||||
|
|
||||||
void stop_nmi(void);
|
void stop_nmi(void);
|
||||||
void restart_nmi(void);
|
void restart_nmi(void);
|
||||||
|
void local_touch_nmi(void);
|
||||||
|
|
||||||
#endif /* _ASM_X86_NMI_H */
|
#endif /* _ASM_X86_NMI_H */
|
||||||
|
|
|
@ -29,6 +29,9 @@
|
||||||
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
|
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
|
||||||
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
|
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
|
||||||
|
|
||||||
|
#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
|
||||||
|
#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
|
||||||
|
|
||||||
#define AMD64_EVENTSEL_EVENT \
|
#define AMD64_EVENTSEL_EVENT \
|
||||||
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
|
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
|
||||||
#define INTEL_ARCH_EVENT_MASK \
|
#define INTEL_ARCH_EVENT_MASK \
|
||||||
|
@ -43,14 +46,17 @@
|
||||||
#define AMD64_RAW_EVENT_MASK \
|
#define AMD64_RAW_EVENT_MASK \
|
||||||
(X86_RAW_EVENT_MASK | \
|
(X86_RAW_EVENT_MASK | \
|
||||||
AMD64_EVENTSEL_EVENT)
|
AMD64_EVENTSEL_EVENT)
|
||||||
|
#define AMD64_NUM_COUNTERS 4
|
||||||
|
#define AMD64_NUM_COUNTERS_F15H 6
|
||||||
|
#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
|
||||||
|
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
|
||||||
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
|
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
|
||||||
|
|
||||||
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
|
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel "Architectural Performance Monitoring" CPUID
|
* Intel "Architectural Performance Monitoring" CPUID
|
||||||
|
@ -110,6 +116,35 @@ union cpuid10_edx {
|
||||||
*/
|
*/
|
||||||
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
|
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IBS cpuid feature detection
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define IBS_CPUID_FEATURES 0x8000001b
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
|
||||||
|
* bit 0 is used to indicate the existence of IBS.
|
||||||
|
*/
|
||||||
|
#define IBS_CAPS_AVAIL (1U<<0)
|
||||||
|
#define IBS_CAPS_FETCHSAM (1U<<1)
|
||||||
|
#define IBS_CAPS_OPSAM (1U<<2)
|
||||||
|
#define IBS_CAPS_RDWROPCNT (1U<<3)
|
||||||
|
#define IBS_CAPS_OPCNT (1U<<4)
|
||||||
|
#define IBS_CAPS_BRNTRGT (1U<<5)
|
||||||
|
#define IBS_CAPS_OPCNTEXT (1U<<6)
|
||||||
|
|
||||||
|
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
|
||||||
|
| IBS_CAPS_FETCHSAM \
|
||||||
|
| IBS_CAPS_OPSAM)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IBS APIC setup
|
||||||
|
*/
|
||||||
|
#define IBSCTL 0x1cc
|
||||||
|
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
|
||||||
|
#define IBSCTL_LVT_OFFSET_MASK 0x0F
|
||||||
|
|
||||||
/* IbsFetchCtl bits/masks */
|
/* IbsFetchCtl bits/masks */
|
||||||
#define IBS_FETCH_RAND_EN (1ULL<<57)
|
#define IBS_FETCH_RAND_EN (1ULL<<57)
|
||||||
#define IBS_FETCH_VAL (1ULL<<49)
|
#define IBS_FETCH_VAL (1ULL<<49)
|
||||||
|
@ -124,6 +159,8 @@ union cpuid10_edx {
|
||||||
#define IBS_OP_MAX_CNT 0x0000FFFFULL
|
#define IBS_OP_MAX_CNT 0x0000FFFFULL
|
||||||
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
|
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
|
||||||
|
|
||||||
|
extern u32 get_ibs_caps(void);
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
extern void perf_events_lapic_init(void);
|
extern void perf_events_lapic_init(void);
|
||||||
|
|
||||||
|
@ -159,7 +196,19 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||||
); \
|
); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct perf_guest_switch_msr {
|
||||||
|
unsigned msr;
|
||||||
|
u64 host, guest;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
|
||||||
#else
|
#else
|
||||||
|
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
|
||||||
|
{
|
||||||
|
*nr = 0;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void perf_events_lapic_init(void) { }
|
static inline void perf_events_lapic_init(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type);
|
||||||
#define MRR_BIOS 0
|
#define MRR_BIOS 0
|
||||||
#define MRR_APM 1
|
#define MRR_APM 1
|
||||||
|
|
||||||
typedef void (*nmi_shootdown_cb)(int, struct die_args*);
|
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
|
||||||
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
|
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
|
||||||
|
|
||||||
#endif /* _ASM_X86_REBOOT_H */
|
#endif /* _ASM_X86_REBOOT_H */
|
||||||
|
|
|
@ -19,7 +19,7 @@ endif
|
||||||
|
|
||||||
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
|
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
|
||||||
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||||
obj-y += time.o ioport.o ldt.o dumpstack.o
|
obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
|
||||||
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
||||||
obj-$(CONFIG_IRQ_WORK) += irq_work.o
|
obj-$(CONFIG_IRQ_WORK) += irq_work.o
|
||||||
obj-y += probe_roms.o
|
obj-y += probe_roms.o
|
||||||
|
|
|
@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __kprobes
|
static int __kprobes
|
||||||
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
|
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
|
||||||
unsigned long cmd, void *__args)
|
|
||||||
{
|
{
|
||||||
struct die_args *args = __args;
|
|
||||||
struct pt_regs *regs;
|
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
switch (cmd) {
|
|
||||||
case DIE_NMI:
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
regs = args->regs;
|
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
|
|
||||||
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
||||||
|
@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
|
||||||
show_regs(regs);
|
show_regs(regs);
|
||||||
arch_spin_unlock(&lock);
|
arch_spin_unlock(&lock);
|
||||||
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NOTIFY_DONE;
|
return NMI_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __read_mostly struct notifier_block backtrace_notifier = {
|
|
||||||
.notifier_call = arch_trigger_all_cpu_backtrace_handler,
|
|
||||||
.next = NULL,
|
|
||||||
.priority = NMI_LOCAL_LOW_PRIOR,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init register_trigger_all_cpu_backtrace(void)
|
static int __init register_trigger_all_cpu_backtrace(void)
|
||||||
{
|
{
|
||||||
register_die_notifier(&backtrace_notifier);
|
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
|
||||||
|
0, "arch_bt");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_initcall(register_trigger_all_cpu_backtrace);
|
early_initcall(register_trigger_all_cpu_backtrace);
|
||||||
|
|
|
@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
|
||||||
/*
|
/*
|
||||||
* When NMI is received, print a stack trace.
|
* When NMI is received, print a stack trace.
|
||||||
*/
|
*/
|
||||||
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
|
int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long real_uv_nmi;
|
unsigned long real_uv_nmi;
|
||||||
int bid;
|
int bid;
|
||||||
|
|
||||||
if (reason != DIE_NMIUNKNOWN)
|
|
||||||
return NOTIFY_OK;
|
|
||||||
|
|
||||||
if (in_crash_kexec)
|
|
||||||
/* do nothing if entering the crash kernel */
|
|
||||||
return NOTIFY_OK;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each blade has an MMR that indicates when an NMI has been sent
|
* Each blade has an MMR that indicates when an NMI has been sent
|
||||||
* to cpus on the blade. If an NMI is detected, atomically
|
* to cpus on the blade. If an NMI is detected, atomically
|
||||||
|
@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
|
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
|
||||||
return NOTIFY_DONE;
|
return NMI_DONE;
|
||||||
|
|
||||||
__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
|
__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
|
||||||
|
|
||||||
|
@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
|
||||||
dump_stack();
|
dump_stack();
|
||||||
spin_unlock(&uv_nmi_lock);
|
spin_unlock(&uv_nmi_lock);
|
||||||
|
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block uv_dump_stack_nmi_nb = {
|
|
||||||
.notifier_call = uv_handle_nmi,
|
|
||||||
.priority = NMI_LOCAL_LOW_PRIOR - 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
void uv_register_nmi_notifier(void)
|
void uv_register_nmi_notifier(void)
|
||||||
{
|
{
|
||||||
if (register_die_notifier(&uv_dump_stack_nmi_nb))
|
if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
|
||||||
printk(KERN_WARNING "UV NMI handler failed to register\n");
|
printk(KERN_WARNING "UV NMI handler failed to register\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,10 +28,15 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
|
||||||
|
|
||||||
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
||||||
|
|
||||||
|
ifdef CONFIG_PERF_EVENTS
|
||||||
|
obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
|
||||||
|
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
|
||||||
|
endif
|
||||||
|
|
||||||
obj-$(CONFIG_X86_MCE) += mcheck/
|
obj-$(CONFIG_X86_MCE) += mcheck/
|
||||||
obj-$(CONFIG_MTRR) += mtrr/
|
obj-$(CONFIG_MTRR) += mtrr/
|
||||||
|
|
||||||
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
|
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
|
||||||
|
|
||||||
quiet_cmd_mkcapflags = MKCAP $@
|
quiet_cmd_mkcapflags = MKCAP $@
|
||||||
cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
|
cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
|
||||||
|
|
|
@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
|
||||||
|
|
||||||
static cpumask_var_t mce_inject_cpumask;
|
static cpumask_var_t mce_inject_cpumask;
|
||||||
|
|
||||||
static int mce_raise_notify(struct notifier_block *self,
|
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
|
||||||
unsigned long val, void *data)
|
|
||||||
{
|
{
|
||||||
struct die_args *args = (struct die_args *)data;
|
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
struct mce *m = &__get_cpu_var(injectm);
|
struct mce *m = &__get_cpu_var(injectm);
|
||||||
if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
|
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
|
||||||
return NOTIFY_DONE;
|
return NMI_DONE;
|
||||||
cpumask_clear_cpu(cpu, mce_inject_cpumask);
|
cpumask_clear_cpu(cpu, mce_inject_cpumask);
|
||||||
if (m->inject_flags & MCJ_EXCEPTION)
|
if (m->inject_flags & MCJ_EXCEPTION)
|
||||||
raise_exception(m, args->regs);
|
raise_exception(m, regs);
|
||||||
else if (m->status)
|
else if (m->status)
|
||||||
raise_poll(m);
|
raise_poll(m);
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block mce_raise_nb = {
|
|
||||||
.notifier_call = mce_raise_notify,
|
|
||||||
.priority = NMI_LOCAL_NORMAL_PRIOR,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Inject mce on current CPU */
|
/* Inject mce on current CPU */
|
||||||
static int raise_local(void)
|
static int raise_local(void)
|
||||||
{
|
{
|
||||||
|
@ -216,7 +209,8 @@ static int inject_init(void)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
printk(KERN_INFO "Machine check injector initialized\n");
|
printk(KERN_INFO "Machine check injector initialized\n");
|
||||||
mce_chrdev_ops.write = mce_write;
|
mce_chrdev_ops.write = mce_write;
|
||||||
register_die_notifier(&mce_raise_nb);
|
register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
|
||||||
|
"mce_notify");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
|
||||||
|
|
||||||
percpu_inc(mce_exception_count);
|
percpu_inc(mce_exception_count);
|
||||||
|
|
||||||
if (notify_die(DIE_NMI, "machine check", regs, error_code,
|
|
||||||
18, SIGKILL) == NOTIFY_STOP)
|
|
||||||
goto out;
|
|
||||||
if (!banks)
|
if (!banks)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -1140,6 +1137,15 @@ static void mce_start_timer(unsigned long data)
|
||||||
add_timer_on(t, smp_processor_id());
|
add_timer_on(t, smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
|
||||||
|
static void mce_timer_delete_all(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
del_timer_sync(&per_cpu(mce_timer, cpu));
|
||||||
|
}
|
||||||
|
|
||||||
static void mce_do_trigger(struct work_struct *work)
|
static void mce_do_trigger(struct work_struct *work)
|
||||||
{
|
{
|
||||||
call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
|
call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
|
||||||
|
@ -1750,7 +1756,6 @@ static struct syscore_ops mce_syscore_ops = {
|
||||||
|
|
||||||
static void mce_cpu_restart(void *data)
|
static void mce_cpu_restart(void *data)
|
||||||
{
|
{
|
||||||
del_timer_sync(&__get_cpu_var(mce_timer));
|
|
||||||
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
||||||
return;
|
return;
|
||||||
__mcheck_cpu_init_generic();
|
__mcheck_cpu_init_generic();
|
||||||
|
@ -1760,16 +1765,15 @@ static void mce_cpu_restart(void *data)
|
||||||
/* Reinit MCEs after user configuration changes */
|
/* Reinit MCEs after user configuration changes */
|
||||||
static void mce_restart(void)
|
static void mce_restart(void)
|
||||||
{
|
{
|
||||||
|
mce_timer_delete_all();
|
||||||
on_each_cpu(mce_cpu_restart, NULL, 1);
|
on_each_cpu(mce_cpu_restart, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Toggle features for corrected errors */
|
/* Toggle features for corrected errors */
|
||||||
static void mce_disable_ce(void *all)
|
static void mce_disable_cmci(void *data)
|
||||||
{
|
{
|
||||||
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
||||||
return;
|
return;
|
||||||
if (all)
|
|
||||||
del_timer_sync(&__get_cpu_var(mce_timer));
|
|
||||||
cmci_clear();
|
cmci_clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1852,7 +1856,8 @@ static ssize_t set_ignore_ce(struct sys_device *s,
|
||||||
if (mce_ignore_ce ^ !!new) {
|
if (mce_ignore_ce ^ !!new) {
|
||||||
if (new) {
|
if (new) {
|
||||||
/* disable ce features */
|
/* disable ce features */
|
||||||
on_each_cpu(mce_disable_ce, (void *)1, 1);
|
mce_timer_delete_all();
|
||||||
|
on_each_cpu(mce_disable_cmci, NULL, 1);
|
||||||
mce_ignore_ce = 1;
|
mce_ignore_ce = 1;
|
||||||
} else {
|
} else {
|
||||||
/* enable ce features */
|
/* enable ce features */
|
||||||
|
@ -1875,7 +1880,7 @@ static ssize_t set_cmci_disabled(struct sys_device *s,
|
||||||
if (mce_cmci_disabled ^ !!new) {
|
if (mce_cmci_disabled ^ !!new) {
|
||||||
if (new) {
|
if (new) {
|
||||||
/* disable cmci */
|
/* disable cmci */
|
||||||
on_each_cpu(mce_disable_ce, NULL, 1);
|
on_each_cpu(mce_disable_cmci, NULL, 1);
|
||||||
mce_cmci_disabled = 1;
|
mce_cmci_disabled = 1;
|
||||||
} else {
|
} else {
|
||||||
/* enable cmci */
|
/* enable cmci */
|
||||||
|
|
|
@ -32,6 +32,8 @@
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
|
|
||||||
|
#include "perf_event.h"
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
#undef wrmsrl
|
#undef wrmsrl
|
||||||
#define wrmsrl(msr, val) \
|
#define wrmsrl(msr, val) \
|
||||||
|
@ -43,283 +45,17 @@ do { \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
struct x86_pmu x86_pmu __read_mostly;
|
||||||
* | NHM/WSM | SNB |
|
|
||||||
* register -------------------------------
|
|
||||||
* | HT | no HT | HT | no HT |
|
|
||||||
*-----------------------------------------
|
|
||||||
* offcore | core | core | cpu | core |
|
|
||||||
* lbr_sel | core | core | cpu | core |
|
|
||||||
* ld_lat | cpu | core | cpu | core |
|
|
||||||
*-----------------------------------------
|
|
||||||
*
|
|
||||||
* Given that there is a small number of shared regs,
|
|
||||||
* we can pre-allocate their slot in the per-cpu
|
|
||||||
* per-core reg tables.
|
|
||||||
*/
|
|
||||||
enum extra_reg_type {
|
|
||||||
EXTRA_REG_NONE = -1, /* not used */
|
|
||||||
|
|
||||||
EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
|
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
||||||
EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
|
|
||||||
|
|
||||||
EXTRA_REG_MAX /* number of entries needed */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct event_constraint {
|
|
||||||
union {
|
|
||||||
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
||||||
u64 idxmsk64;
|
|
||||||
};
|
|
||||||
u64 code;
|
|
||||||
u64 cmask;
|
|
||||||
int weight;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct amd_nb {
|
|
||||||
int nb_id; /* NorthBridge id */
|
|
||||||
int refcnt; /* reference count */
|
|
||||||
struct perf_event *owners[X86_PMC_IDX_MAX];
|
|
||||||
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct intel_percore;
|
|
||||||
|
|
||||||
#define MAX_LBR_ENTRIES 16
|
|
||||||
|
|
||||||
struct cpu_hw_events {
|
|
||||||
/*
|
|
||||||
* Generic x86 PMC bits
|
|
||||||
*/
|
|
||||||
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
|
|
||||||
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
||||||
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
||||||
int enabled;
|
|
||||||
|
|
||||||
int n_events;
|
|
||||||
int n_added;
|
|
||||||
int n_txn;
|
|
||||||
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
|
|
||||||
u64 tags[X86_PMC_IDX_MAX];
|
|
||||||
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
|
|
||||||
|
|
||||||
unsigned int group_flag;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel DebugStore bits
|
|
||||||
*/
|
|
||||||
struct debug_store *ds;
|
|
||||||
u64 pebs_enabled;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel LBR bits
|
|
||||||
*/
|
|
||||||
int lbr_users;
|
|
||||||
void *lbr_context;
|
|
||||||
struct perf_branch_stack lbr_stack;
|
|
||||||
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* manage shared (per-core, per-cpu) registers
|
|
||||||
* used on Intel NHM/WSM/SNB
|
|
||||||
*/
|
|
||||||
struct intel_shared_regs *shared_regs;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* AMD specific bits
|
|
||||||
*/
|
|
||||||
struct amd_nb *amd_nb;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __EVENT_CONSTRAINT(c, n, m, w) {\
|
|
||||||
{ .idxmsk64 = (n) }, \
|
|
||||||
.code = (c), \
|
|
||||||
.cmask = (m), \
|
|
||||||
.weight = (w), \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define EVENT_CONSTRAINT(c, n, m) \
|
|
||||||
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Constraint on the Event code.
|
|
||||||
*/
|
|
||||||
#define INTEL_EVENT_CONSTRAINT(c, n) \
|
|
||||||
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Constraint on the Event code + UMask + fixed-mask
|
|
||||||
*
|
|
||||||
* filter mask to validate fixed counter events.
|
|
||||||
* the following filters disqualify for fixed counters:
|
|
||||||
* - inv
|
|
||||||
* - edge
|
|
||||||
* - cnt-mask
|
|
||||||
* The other filters are supported by fixed counters.
|
|
||||||
* The any-thread option is supported starting with v3.
|
|
||||||
*/
|
|
||||||
#define FIXED_EVENT_CONSTRAINT(c, n) \
|
|
||||||
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Constraint on the Event code + UMask
|
|
||||||
*/
|
|
||||||
#define INTEL_UEVENT_CONSTRAINT(c, n) \
|
|
||||||
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
|
|
||||||
|
|
||||||
#define EVENT_CONSTRAINT_END \
|
|
||||||
EVENT_CONSTRAINT(0, 0, 0)
|
|
||||||
|
|
||||||
#define for_each_event_constraint(e, c) \
|
|
||||||
for ((e) = (c); (e)->weight; (e)++)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Per register state.
|
|
||||||
*/
|
|
||||||
struct er_account {
|
|
||||||
raw_spinlock_t lock; /* per-core: protect structure */
|
|
||||||
u64 config; /* extra MSR config */
|
|
||||||
u64 reg; /* extra MSR number */
|
|
||||||
atomic_t ref; /* reference count */
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Extra registers for specific events.
|
|
||||||
*
|
|
||||||
* Some events need large masks and require external MSRs.
|
|
||||||
* Those extra MSRs end up being shared for all events on
|
|
||||||
* a PMU and sometimes between PMU of sibling HT threads.
|
|
||||||
* In either case, the kernel needs to handle conflicting
|
|
||||||
* accesses to those extra, shared, regs. The data structure
|
|
||||||
* to manage those registers is stored in cpu_hw_event.
|
|
||||||
*/
|
|
||||||
struct extra_reg {
|
|
||||||
unsigned int event;
|
|
||||||
unsigned int msr;
|
|
||||||
u64 config_mask;
|
|
||||||
u64 valid_mask;
|
|
||||||
int idx; /* per_xxx->regs[] reg index */
|
|
||||||
};
|
|
||||||
|
|
||||||
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
|
|
||||||
.event = (e), \
|
|
||||||
.msr = (ms), \
|
|
||||||
.config_mask = (m), \
|
|
||||||
.valid_mask = (vm), \
|
|
||||||
.idx = EXTRA_REG_##i \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
|
|
||||||
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
|
|
||||||
|
|
||||||
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
|
|
||||||
|
|
||||||
union perf_capabilities {
|
|
||||||
struct {
|
|
||||||
u64 lbr_format : 6;
|
|
||||||
u64 pebs_trap : 1;
|
|
||||||
u64 pebs_arch_reg : 1;
|
|
||||||
u64 pebs_format : 4;
|
|
||||||
u64 smm_freeze : 1;
|
|
||||||
};
|
|
||||||
u64 capabilities;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* struct x86_pmu - generic x86 pmu
|
|
||||||
*/
|
|
||||||
struct x86_pmu {
|
|
||||||
/*
|
|
||||||
* Generic x86 PMC bits
|
|
||||||
*/
|
|
||||||
const char *name;
|
|
||||||
int version;
|
|
||||||
int (*handle_irq)(struct pt_regs *);
|
|
||||||
void (*disable_all)(void);
|
|
||||||
void (*enable_all)(int added);
|
|
||||||
void (*enable)(struct perf_event *);
|
|
||||||
void (*disable)(struct perf_event *);
|
|
||||||
int (*hw_config)(struct perf_event *event);
|
|
||||||
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
|
|
||||||
unsigned eventsel;
|
|
||||||
unsigned perfctr;
|
|
||||||
u64 (*event_map)(int);
|
|
||||||
int max_events;
|
|
||||||
int num_counters;
|
|
||||||
int num_counters_fixed;
|
|
||||||
int cntval_bits;
|
|
||||||
u64 cntval_mask;
|
|
||||||
int apic;
|
|
||||||
u64 max_period;
|
|
||||||
struct event_constraint *
|
|
||||||
(*get_event_constraints)(struct cpu_hw_events *cpuc,
|
|
||||||
struct perf_event *event);
|
|
||||||
|
|
||||||
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
|
|
||||||
struct perf_event *event);
|
|
||||||
struct event_constraint *event_constraints;
|
|
||||||
void (*quirks)(void);
|
|
||||||
int perfctr_second_write;
|
|
||||||
|
|
||||||
int (*cpu_prepare)(int cpu);
|
|
||||||
void (*cpu_starting)(int cpu);
|
|
||||||
void (*cpu_dying)(int cpu);
|
|
||||||
void (*cpu_dead)(int cpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel Arch Perfmon v2+
|
|
||||||
*/
|
|
||||||
u64 intel_ctrl;
|
|
||||||
union perf_capabilities intel_cap;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel DebugStore bits
|
|
||||||
*/
|
|
||||||
int bts, pebs;
|
|
||||||
int bts_active, pebs_active;
|
|
||||||
int pebs_record_size;
|
|
||||||
void (*drain_pebs)(struct pt_regs *regs);
|
|
||||||
struct event_constraint *pebs_constraints;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel LBR
|
|
||||||
*/
|
|
||||||
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
|
|
||||||
int lbr_nr; /* hardware stack size */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Extra registers for events
|
|
||||||
*/
|
|
||||||
struct extra_reg *extra_regs;
|
|
||||||
unsigned int er_flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define ERF_NO_HT_SHARING 1
|
|
||||||
#define ERF_HAS_RSP_1 2
|
|
||||||
|
|
||||||
static struct x86_pmu x86_pmu __read_mostly;
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
|
||||||
.enabled = 1,
|
.enabled = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int x86_perf_event_set_period(struct perf_event *event);
|
u64 __read_mostly hw_cache_event_ids
|
||||||
|
|
||||||
/*
|
|
||||||
* Generalized hw caching related hw_event table, filled
|
|
||||||
* in on a per model basis. A value of 0 means
|
|
||||||
* 'not supported', -1 means 'hw_event makes no sense on
|
|
||||||
* this CPU', any other value means the raw hw_event
|
|
||||||
* ID.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
|
||||||
|
|
||||||
static u64 __read_mostly hw_cache_event_ids
|
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||||
static u64 __read_mostly hw_cache_extra_regs
|
u64 __read_mostly hw_cache_extra_regs
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||||
|
@ -329,8 +65,7 @@ static u64 __read_mostly hw_cache_extra_regs
|
||||||
* Can only be executed on the CPU where the event is active.
|
* Can only be executed on the CPU where the event is active.
|
||||||
* Returns the delta events processed.
|
* Returns the delta events processed.
|
||||||
*/
|
*/
|
||||||
static u64
|
u64 x86_perf_event_update(struct perf_event *event)
|
||||||
x86_perf_event_update(struct perf_event *event)
|
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int shift = 64 - x86_pmu.cntval_bits;
|
int shift = 64 - x86_pmu.cntval_bits;
|
||||||
|
@ -373,30 +108,6 @@ again:
|
||||||
return new_raw_count;
|
return new_raw_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int x86_pmu_addr_offset(int index)
|
|
||||||
{
|
|
||||||
int offset;
|
|
||||||
|
|
||||||
/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
|
|
||||||
alternative_io(ASM_NOP2,
|
|
||||||
"shll $1, %%eax",
|
|
||||||
X86_FEATURE_PERFCTR_CORE,
|
|
||||||
"=a" (offset),
|
|
||||||
"a" (index));
|
|
||||||
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int x86_pmu_config_addr(int index)
|
|
||||||
{
|
|
||||||
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int x86_pmu_event_addr(int index)
|
|
||||||
{
|
|
||||||
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find and validate any extra registers to set up.
|
* Find and validate any extra registers to set up.
|
||||||
*/
|
*/
|
||||||
|
@ -532,9 +243,6 @@ msr_fail:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reserve_ds_buffers(void);
|
|
||||||
static void release_ds_buffers(void);
|
|
||||||
|
|
||||||
static void hw_perf_event_destroy(struct perf_event *event)
|
static void hw_perf_event_destroy(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
|
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
|
||||||
|
@ -583,7 +291,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
|
||||||
return x86_pmu_extra_regs(val, event);
|
return x86_pmu_extra_regs(val, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int x86_setup_perfctr(struct perf_event *event)
|
int x86_setup_perfctr(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct perf_event_attr *attr = &event->attr;
|
struct perf_event_attr *attr = &event->attr;
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
@ -647,7 +355,7 @@ static int x86_setup_perfctr(struct perf_event *event)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int x86_pmu_hw_config(struct perf_event *event)
|
int x86_pmu_hw_config(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (event->attr.precise_ip) {
|
if (event->attr.precise_ip) {
|
||||||
int precise = 0;
|
int precise = 0;
|
||||||
|
@ -723,7 +431,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
|
||||||
return x86_pmu.hw_config(event);
|
return x86_pmu.hw_config(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x86_pmu_disable_all(void)
|
void x86_pmu_disable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
int idx;
|
int idx;
|
||||||
|
@ -758,15 +466,7 @@ static void x86_pmu_disable(struct pmu *pmu)
|
||||||
x86_pmu.disable_all();
|
x86_pmu.disable_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
|
void x86_pmu_enable_all(int added)
|
||||||
u64 enable_mask)
|
|
||||||
{
|
|
||||||
if (hwc->extra_reg.reg)
|
|
||||||
wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
|
|
||||||
wrmsrl(hwc->config_base, hwc->config | enable_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void x86_pmu_enable_all(int added)
|
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
int idx;
|
int idx;
|
||||||
|
@ -788,7 +488,7 @@ static inline int is_x86_event(struct perf_event *event)
|
||||||
return event->pmu == &pmu;
|
return event->pmu == &pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||||
{
|
{
|
||||||
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
|
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
|
||||||
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||||
|
@ -959,7 +659,6 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x86_pmu_start(struct perf_event *event, int flags);
|
static void x86_pmu_start(struct perf_event *event, int flags);
|
||||||
static void x86_pmu_stop(struct perf_event *event, int flags);
|
|
||||||
|
|
||||||
static void x86_pmu_enable(struct pmu *pmu)
|
static void x86_pmu_enable(struct pmu *pmu)
|
||||||
{
|
{
|
||||||
|
@ -1031,21 +730,13 @@ static void x86_pmu_enable(struct pmu *pmu)
|
||||||
x86_pmu.enable_all(added);
|
x86_pmu.enable_all(added);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void x86_pmu_disable_event(struct perf_event *event)
|
|
||||||
{
|
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
|
||||||
|
|
||||||
wrmsrl(hwc->config_base, hwc->config);
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the next IRQ period, based on the hwc->period_left value.
|
* Set the next IRQ period, based on the hwc->period_left value.
|
||||||
* To be called with the event disabled in hw:
|
* To be called with the event disabled in hw:
|
||||||
*/
|
*/
|
||||||
static int
|
int x86_perf_event_set_period(struct perf_event *event)
|
||||||
x86_perf_event_set_period(struct perf_event *event)
|
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
s64 left = local64_read(&hwc->period_left);
|
s64 left = local64_read(&hwc->period_left);
|
||||||
|
@ -1105,7 +796,7 @@ x86_perf_event_set_period(struct perf_event *event)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x86_pmu_enable_event(struct perf_event *event)
|
void x86_pmu_enable_event(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (__this_cpu_read(cpu_hw_events.enabled))
|
if (__this_cpu_read(cpu_hw_events.enabled))
|
||||||
__x86_pmu_enable_event(&event->hw,
|
__x86_pmu_enable_event(&event->hw,
|
||||||
|
@ -1244,7 +935,7 @@ void perf_event_print_debug(void)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x86_pmu_stop(struct perf_event *event, int flags)
|
void x86_pmu_stop(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
@ -1297,7 +988,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int x86_pmu_handle_irq(struct pt_regs *regs)
|
int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc;
|
struct cpu_hw_events *cpuc;
|
||||||
|
@ -1367,109 +1058,28 @@ void perf_events_lapic_init(void)
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pmu_nmi_state {
|
|
||||||
unsigned int marked;
|
|
||||||
int handled;
|
|
||||||
};
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
|
|
||||||
|
|
||||||
static int __kprobes
|
static int __kprobes
|
||||||
perf_event_nmi_handler(struct notifier_block *self,
|
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
|
||||||
unsigned long cmd, void *__args)
|
|
||||||
{
|
{
|
||||||
struct die_args *args = __args;
|
|
||||||
unsigned int this_nmi;
|
|
||||||
int handled;
|
|
||||||
|
|
||||||
if (!atomic_read(&active_events))
|
if (!atomic_read(&active_events))
|
||||||
return NOTIFY_DONE;
|
return NMI_DONE;
|
||||||
|
|
||||||
switch (cmd) {
|
return x86_pmu.handle_irq(regs);
|
||||||
case DIE_NMI:
|
|
||||||
break;
|
|
||||||
case DIE_NMIUNKNOWN:
|
|
||||||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
|
||||||
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
|
|
||||||
/* let the kernel handle the unknown nmi */
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
/*
|
|
||||||
* This one is a PMU back-to-back nmi. Two events
|
|
||||||
* trigger 'simultaneously' raising two back-to-back
|
|
||||||
* NMIs. If the first NMI handles both, the latter
|
|
||||||
* will be empty and daze the CPU. So, we drop it to
|
|
||||||
* avoid false-positive 'unknown nmi' messages.
|
|
||||||
*/
|
|
||||||
return NOTIFY_STOP;
|
|
||||||
default:
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
handled = x86_pmu.handle_irq(args->regs);
|
|
||||||
if (!handled)
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
|
|
||||||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
|
||||||
if ((handled > 1) ||
|
|
||||||
/* the next nmi could be a back-to-back nmi */
|
|
||||||
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
|
|
||||||
(__this_cpu_read(pmu_nmi.handled) > 1))) {
|
|
||||||
/*
|
|
||||||
* We could have two subsequent back-to-back nmis: The
|
|
||||||
* first handles more than one counter, the 2nd
|
|
||||||
* handles only one counter and the 3rd handles no
|
|
||||||
* counter.
|
|
||||||
*
|
|
||||||
* This is the 2nd nmi because the previous was
|
|
||||||
* handling more than one counter. We will mark the
|
|
||||||
* next (3rd) and then drop it if unhandled.
|
|
||||||
*/
|
|
||||||
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
|
|
||||||
__this_cpu_write(pmu_nmi.handled, handled);
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_STOP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
|
struct event_constraint emptyconstraint;
|
||||||
.notifier_call = perf_event_nmi_handler,
|
struct event_constraint unconstrained;
|
||||||
.next = NULL,
|
|
||||||
.priority = NMI_LOCAL_LOW_PRIOR,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct event_constraint unconstrained;
|
|
||||||
static struct event_constraint emptyconstraint;
|
|
||||||
|
|
||||||
static struct event_constraint *
|
|
||||||
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
||||||
{
|
|
||||||
struct event_constraint *c;
|
|
||||||
|
|
||||||
if (x86_pmu.event_constraints) {
|
|
||||||
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
|
||||||
if ((event->hw.config & c->cmask) == c->code)
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &unconstrained;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include "perf_event_amd.c"
|
|
||||||
#include "perf_event_p6.c"
|
|
||||||
#include "perf_event_p4.c"
|
|
||||||
#include "perf_event_intel_lbr.c"
|
|
||||||
#include "perf_event_intel_ds.c"
|
|
||||||
#include "perf_event_intel.c"
|
|
||||||
|
|
||||||
static int __cpuinit
|
static int __cpuinit
|
||||||
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
unsigned int cpu = (long)hcpu;
|
unsigned int cpu = (long)hcpu;
|
||||||
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||||
int ret = NOTIFY_OK;
|
int ret = NOTIFY_OK;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
switch (action & ~CPU_TASKS_FROZEN) {
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
|
cpuc->kfree_on_online = NULL;
|
||||||
if (x86_pmu.cpu_prepare)
|
if (x86_pmu.cpu_prepare)
|
||||||
ret = x86_pmu.cpu_prepare(cpu);
|
ret = x86_pmu.cpu_prepare(cpu);
|
||||||
break;
|
break;
|
||||||
|
@ -1479,6 +1089,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||||
x86_pmu.cpu_starting(cpu);
|
x86_pmu.cpu_starting(cpu);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case CPU_ONLINE:
|
||||||
|
kfree(cpuc->kfree_on_online);
|
||||||
|
break;
|
||||||
|
|
||||||
case CPU_DYING:
|
case CPU_DYING:
|
||||||
if (x86_pmu.cpu_dying)
|
if (x86_pmu.cpu_dying)
|
||||||
x86_pmu.cpu_dying(cpu);
|
x86_pmu.cpu_dying(cpu);
|
||||||
|
@ -1557,7 +1171,7 @@ static int __init init_hw_perf_events(void)
|
||||||
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
|
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
|
||||||
|
|
||||||
perf_events_lapic_init();
|
perf_events_lapic_init();
|
||||||
register_die_notifier(&perf_event_nmi_notifier);
|
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
|
||||||
|
|
||||||
unconstrained = (struct event_constraint)
|
unconstrained = (struct event_constraint)
|
||||||
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
|
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
|
||||||
|
|
|
@ -0,0 +1,505 @@
|
||||||
|
/*
|
||||||
|
* Performance events x86 architecture header
|
||||||
|
*
|
||||||
|
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||||
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||||
|
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||||
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||||
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||||
|
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||||
|
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||||
|
*
|
||||||
|
* For licencing details see kernel-base/COPYING
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/perf_event.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* | NHM/WSM | SNB |
|
||||||
|
* register -------------------------------
|
||||||
|
* | HT | no HT | HT | no HT |
|
||||||
|
*-----------------------------------------
|
||||||
|
* offcore | core | core | cpu | core |
|
||||||
|
* lbr_sel | core | core | cpu | core |
|
||||||
|
* ld_lat | cpu | core | cpu | core |
|
||||||
|
*-----------------------------------------
|
||||||
|
*
|
||||||
|
* Given that there is a small number of shared regs,
|
||||||
|
* we can pre-allocate their slot in the per-cpu
|
||||||
|
* per-core reg tables.
|
||||||
|
*/
|
||||||
|
enum extra_reg_type {
|
||||||
|
EXTRA_REG_NONE = -1, /* not used */
|
||||||
|
|
||||||
|
EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
|
||||||
|
EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
|
||||||
|
|
||||||
|
EXTRA_REG_MAX /* number of entries needed */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct event_constraint {
|
||||||
|
union {
|
||||||
|
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||||
|
u64 idxmsk64;
|
||||||
|
};
|
||||||
|
u64 code;
|
||||||
|
u64 cmask;
|
||||||
|
int weight;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct amd_nb {
|
||||||
|
int nb_id; /* NorthBridge id */
|
||||||
|
int refcnt; /* reference count */
|
||||||
|
struct perf_event *owners[X86_PMC_IDX_MAX];
|
||||||
|
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* The maximal number of PEBS events: */
|
||||||
|
#define MAX_PEBS_EVENTS 4
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A debug store configuration.
|
||||||
|
*
|
||||||
|
* We only support architectures that use 64bit fields.
|
||||||
|
*/
|
||||||
|
struct debug_store {
|
||||||
|
u64 bts_buffer_base;
|
||||||
|
u64 bts_index;
|
||||||
|
u64 bts_absolute_maximum;
|
||||||
|
u64 bts_interrupt_threshold;
|
||||||
|
u64 pebs_buffer_base;
|
||||||
|
u64 pebs_index;
|
||||||
|
u64 pebs_absolute_maximum;
|
||||||
|
u64 pebs_interrupt_threshold;
|
||||||
|
u64 pebs_event_reset[MAX_PEBS_EVENTS];
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Per register state.
|
||||||
|
*/
|
||||||
|
struct er_account {
|
||||||
|
raw_spinlock_t lock; /* per-core: protect structure */
|
||||||
|
u64 config; /* extra MSR config */
|
||||||
|
u64 reg; /* extra MSR number */
|
||||||
|
atomic_t ref; /* reference count */
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Per core/cpu state
|
||||||
|
*
|
||||||
|
* Used to coordinate shared registers between HT threads or
|
||||||
|
* among events on a single PMU.
|
||||||
|
*/
|
||||||
|
struct intel_shared_regs {
|
||||||
|
struct er_account regs[EXTRA_REG_MAX];
|
||||||
|
int refcnt; /* per-core: #HT threads */
|
||||||
|
unsigned core_id; /* per-core: core id */
|
||||||
|
};
|
||||||
|
|
||||||
|
#define MAX_LBR_ENTRIES 16
|
||||||
|
|
||||||
|
struct cpu_hw_events {
|
||||||
|
/*
|
||||||
|
* Generic x86 PMC bits
|
||||||
|
*/
|
||||||
|
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
|
||||||
|
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||||
|
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||||
|
int enabled;
|
||||||
|
|
||||||
|
int n_events;
|
||||||
|
int n_added;
|
||||||
|
int n_txn;
|
||||||
|
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
|
||||||
|
u64 tags[X86_PMC_IDX_MAX];
|
||||||
|
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
|
||||||
|
|
||||||
|
unsigned int group_flag;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel DebugStore bits
|
||||||
|
*/
|
||||||
|
struct debug_store *ds;
|
||||||
|
u64 pebs_enabled;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel LBR bits
|
||||||
|
*/
|
||||||
|
int lbr_users;
|
||||||
|
void *lbr_context;
|
||||||
|
struct perf_branch_stack lbr_stack;
|
||||||
|
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel host/guest exclude bits
|
||||||
|
*/
|
||||||
|
u64 intel_ctrl_guest_mask;
|
||||||
|
u64 intel_ctrl_host_mask;
|
||||||
|
struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* manage shared (per-core, per-cpu) registers
|
||||||
|
* used on Intel NHM/WSM/SNB
|
||||||
|
*/
|
||||||
|
struct intel_shared_regs *shared_regs;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AMD specific bits
|
||||||
|
*/
|
||||||
|
struct amd_nb *amd_nb;
|
||||||
|
|
||||||
|
void *kfree_on_online;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define __EVENT_CONSTRAINT(c, n, m, w) {\
|
||||||
|
{ .idxmsk64 = (n) }, \
|
||||||
|
.code = (c), \
|
||||||
|
.cmask = (m), \
|
||||||
|
.weight = (w), \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define EVENT_CONSTRAINT(c, n, m) \
|
||||||
|
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constraint on the Event code.
|
||||||
|
*/
|
||||||
|
#define INTEL_EVENT_CONSTRAINT(c, n) \
|
||||||
|
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constraint on the Event code + UMask + fixed-mask
|
||||||
|
*
|
||||||
|
* filter mask to validate fixed counter events.
|
||||||
|
* the following filters disqualify for fixed counters:
|
||||||
|
* - inv
|
||||||
|
* - edge
|
||||||
|
* - cnt-mask
|
||||||
|
* The other filters are supported by fixed counters.
|
||||||
|
* The any-thread option is supported starting with v3.
|
||||||
|
*/
|
||||||
|
#define FIXED_EVENT_CONSTRAINT(c, n) \
|
||||||
|
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constraint on the Event code + UMask
|
||||||
|
*/
|
||||||
|
#define INTEL_UEVENT_CONSTRAINT(c, n) \
|
||||||
|
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
|
||||||
|
|
||||||
|
#define EVENT_CONSTRAINT_END \
|
||||||
|
EVENT_CONSTRAINT(0, 0, 0)
|
||||||
|
|
||||||
|
#define for_each_event_constraint(e, c) \
|
||||||
|
for ((e) = (c); (e)->weight; (e)++)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Extra registers for specific events.
|
||||||
|
*
|
||||||
|
* Some events need large masks and require external MSRs.
|
||||||
|
* Those extra MSRs end up being shared for all events on
|
||||||
|
* a PMU and sometimes between PMU of sibling HT threads.
|
||||||
|
* In either case, the kernel needs to handle conflicting
|
||||||
|
* accesses to those extra, shared, regs. The data structure
|
||||||
|
* to manage those registers is stored in cpu_hw_event.
|
||||||
|
*/
|
||||||
|
struct extra_reg {
|
||||||
|
unsigned int event;
|
||||||
|
unsigned int msr;
|
||||||
|
u64 config_mask;
|
||||||
|
u64 valid_mask;
|
||||||
|
int idx; /* per_xxx->regs[] reg index */
|
||||||
|
};
|
||||||
|
|
||||||
|
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
|
||||||
|
.event = (e), \
|
||||||
|
.msr = (ms), \
|
||||||
|
.config_mask = (m), \
|
||||||
|
.valid_mask = (vm), \
|
||||||
|
.idx = EXTRA_REG_##i \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
|
||||||
|
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
|
||||||
|
|
||||||
|
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
|
||||||
|
|
||||||
|
union perf_capabilities {
|
||||||
|
struct {
|
||||||
|
u64 lbr_format:6;
|
||||||
|
u64 pebs_trap:1;
|
||||||
|
u64 pebs_arch_reg:1;
|
||||||
|
u64 pebs_format:4;
|
||||||
|
u64 smm_freeze:1;
|
||||||
|
};
|
||||||
|
u64 capabilities;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* struct x86_pmu - generic x86 pmu
|
||||||
|
*/
|
||||||
|
struct x86_pmu {
|
||||||
|
/*
|
||||||
|
* Generic x86 PMC bits
|
||||||
|
*/
|
||||||
|
const char *name;
|
||||||
|
int version;
|
||||||
|
int (*handle_irq)(struct pt_regs *);
|
||||||
|
void (*disable_all)(void);
|
||||||
|
void (*enable_all)(int added);
|
||||||
|
void (*enable)(struct perf_event *);
|
||||||
|
void (*disable)(struct perf_event *);
|
||||||
|
int (*hw_config)(struct perf_event *event);
|
||||||
|
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
|
||||||
|
unsigned eventsel;
|
||||||
|
unsigned perfctr;
|
||||||
|
u64 (*event_map)(int);
|
||||||
|
int max_events;
|
||||||
|
int num_counters;
|
||||||
|
int num_counters_fixed;
|
||||||
|
int cntval_bits;
|
||||||
|
u64 cntval_mask;
|
||||||
|
int apic;
|
||||||
|
u64 max_period;
|
||||||
|
struct event_constraint *
|
||||||
|
(*get_event_constraints)(struct cpu_hw_events *cpuc,
|
||||||
|
struct perf_event *event);
|
||||||
|
|
||||||
|
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
|
||||||
|
struct perf_event *event);
|
||||||
|
struct event_constraint *event_constraints;
|
||||||
|
void (*quirks)(void);
|
||||||
|
int perfctr_second_write;
|
||||||
|
|
||||||
|
int (*cpu_prepare)(int cpu);
|
||||||
|
void (*cpu_starting)(int cpu);
|
||||||
|
void (*cpu_dying)(int cpu);
|
||||||
|
void (*cpu_dead)(int cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel Arch Perfmon v2+
|
||||||
|
*/
|
||||||
|
u64 intel_ctrl;
|
||||||
|
union perf_capabilities intel_cap;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel DebugStore bits
|
||||||
|
*/
|
||||||
|
int bts, pebs;
|
||||||
|
int bts_active, pebs_active;
|
||||||
|
int pebs_record_size;
|
||||||
|
void (*drain_pebs)(struct pt_regs *regs);
|
||||||
|
struct event_constraint *pebs_constraints;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel LBR
|
||||||
|
*/
|
||||||
|
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
|
||||||
|
int lbr_nr; /* hardware stack size */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Extra registers for events
|
||||||
|
*/
|
||||||
|
struct extra_reg *extra_regs;
|
||||||
|
unsigned int er_flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel host/guest support (KVM)
|
||||||
|
*/
|
||||||
|
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
|
||||||
|
};
|
||||||
|
|
||||||
|
#define ERF_NO_HT_SHARING 1
|
||||||
|
#define ERF_HAS_RSP_1 2
|
||||||
|
|
||||||
|
extern struct x86_pmu x86_pmu __read_mostly;
|
||||||
|
|
||||||
|
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
||||||
|
|
||||||
|
int x86_perf_event_set_period(struct perf_event *event);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generalized hw caching related hw_event table, filled
|
||||||
|
* in on a per model basis. A value of 0 means
|
||||||
|
* 'not supported', -1 means 'hw_event makes no sense on
|
||||||
|
* this CPU', any other value means the raw hw_event
|
||||||
|
* ID.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||||
|
|
||||||
|
extern u64 __read_mostly hw_cache_event_ids
|
||||||
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||||
|
extern u64 __read_mostly hw_cache_extra_regs
|
||||||
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||||
|
|
||||||
|
u64 x86_perf_event_update(struct perf_event *event);
|
||||||
|
|
||||||
|
static inline int x86_pmu_addr_offset(int index)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
|
||||||
|
/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
|
||||||
|
alternative_io(ASM_NOP2,
|
||||||
|
"shll $1, %%eax",
|
||||||
|
X86_FEATURE_PERFCTR_CORE,
|
||||||
|
"=a" (offset),
|
||||||
|
"a" (index));
|
||||||
|
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int x86_pmu_config_addr(int index)
|
||||||
|
{
|
||||||
|
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int x86_pmu_event_addr(int index)
|
||||||
|
{
|
||||||
|
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
int x86_setup_perfctr(struct perf_event *event);
|
||||||
|
|
||||||
|
int x86_pmu_hw_config(struct perf_event *event);
|
||||||
|
|
||||||
|
void x86_pmu_disable_all(void);
|
||||||
|
|
||||||
|
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
|
||||||
|
u64 enable_mask)
|
||||||
|
{
|
||||||
|
if (hwc->extra_reg.reg)
|
||||||
|
wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
|
||||||
|
wrmsrl(hwc->config_base, hwc->config | enable_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
void x86_pmu_enable_all(int added);
|
||||||
|
|
||||||
|
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
|
||||||
|
|
||||||
|
void x86_pmu_stop(struct perf_event *event, int flags);
|
||||||
|
|
||||||
|
static inline void x86_pmu_disable_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
wrmsrl(hwc->config_base, hwc->config);
|
||||||
|
}
|
||||||
|
|
||||||
|
void x86_pmu_enable_event(struct perf_event *event);
|
||||||
|
|
||||||
|
int x86_pmu_handle_irq(struct pt_regs *regs);
|
||||||
|
|
||||||
|
extern struct event_constraint emptyconstraint;
|
||||||
|
|
||||||
|
extern struct event_constraint unconstrained;
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_SUP_AMD
|
||||||
|
|
||||||
|
int amd_pmu_init(void);
|
||||||
|
|
||||||
|
#else /* CONFIG_CPU_SUP_AMD */
|
||||||
|
|
||||||
|
static inline int amd_pmu_init(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CPU_SUP_AMD */
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_SUP_INTEL
|
||||||
|
|
||||||
|
int intel_pmu_save_and_restart(struct perf_event *event);
|
||||||
|
|
||||||
|
struct event_constraint *
|
||||||
|
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
|
||||||
|
|
||||||
|
struct intel_shared_regs *allocate_shared_regs(int cpu);
|
||||||
|
|
||||||
|
int intel_pmu_init(void);
|
||||||
|
|
||||||
|
void init_debug_store_on_cpu(int cpu);
|
||||||
|
|
||||||
|
void fini_debug_store_on_cpu(int cpu);
|
||||||
|
|
||||||
|
void release_ds_buffers(void);
|
||||||
|
|
||||||
|
void reserve_ds_buffers(void);
|
||||||
|
|
||||||
|
extern struct event_constraint bts_constraint;
|
||||||
|
|
||||||
|
void intel_pmu_enable_bts(u64 config);
|
||||||
|
|
||||||
|
void intel_pmu_disable_bts(void);
|
||||||
|
|
||||||
|
int intel_pmu_drain_bts_buffer(void);
|
||||||
|
|
||||||
|
extern struct event_constraint intel_core2_pebs_event_constraints[];
|
||||||
|
|
||||||
|
extern struct event_constraint intel_atom_pebs_event_constraints[];
|
||||||
|
|
||||||
|
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
|
||||||
|
|
||||||
|
extern struct event_constraint intel_westmere_pebs_event_constraints[];
|
||||||
|
|
||||||
|
extern struct event_constraint intel_snb_pebs_event_constraints[];
|
||||||
|
|
||||||
|
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
|
||||||
|
|
||||||
|
void intel_pmu_pebs_enable(struct perf_event *event);
|
||||||
|
|
||||||
|
void intel_pmu_pebs_disable(struct perf_event *event);
|
||||||
|
|
||||||
|
void intel_pmu_pebs_enable_all(void);
|
||||||
|
|
||||||
|
void intel_pmu_pebs_disable_all(void);
|
||||||
|
|
||||||
|
void intel_ds_init(void);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_reset(void);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_enable(struct perf_event *event);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_disable(struct perf_event *event);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_enable_all(void);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_disable_all(void);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_read(void);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_init_core(void);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_init_nhm(void);
|
||||||
|
|
||||||
|
void intel_pmu_lbr_init_atom(void);
|
||||||
|
|
||||||
|
int p4_pmu_init(void);
|
||||||
|
|
||||||
|
int p6_pmu_init(void);
|
||||||
|
|
||||||
|
#else /* CONFIG_CPU_SUP_INTEL */
|
||||||
|
|
||||||
|
static inline void reserve_ds_buffers(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void release_ds_buffers(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int intel_pmu_init(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
@ -1,4 +1,10 @@
|
||||||
#ifdef CONFIG_CPU_SUP_AMD
|
#include <linux/perf_event.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <asm/apicdef.h>
|
||||||
|
|
||||||
|
#include "perf_event.h"
|
||||||
|
|
||||||
static __initconst const u64 amd_hw_cache_event_ids
|
static __initconst const u64 amd_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
|
@ -132,6 +138,19 @@ static int amd_pmu_hw_config(struct perf_event *event)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (event->attr.exclude_host && event->attr.exclude_guest)
|
||||||
|
/*
|
||||||
|
* When HO == GO == 1 the hardware treats that as GO == HO == 0
|
||||||
|
* and will count in both modes. We don't want to count in that
|
||||||
|
* case so we emulate no-counting by setting US = OS = 0.
|
||||||
|
*/
|
||||||
|
event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
|
||||||
|
ARCH_PERFMON_EVENTSEL_OS);
|
||||||
|
else if (event->attr.exclude_host)
|
||||||
|
event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
|
||||||
|
else if (event->attr.exclude_guest)
|
||||||
|
event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
|
||||||
|
|
||||||
if (event->attr.type != PERF_TYPE_RAW)
|
if (event->attr.type != PERF_TYPE_RAW)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -350,7 +369,7 @@ static void amd_pmu_cpu_starting(int cpu)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (nb->nb_id == nb_id) {
|
if (nb->nb_id == nb_id) {
|
||||||
kfree(cpuc->amd_nb);
|
cpuc->kfree_on_online = cpuc->amd_nb;
|
||||||
cpuc->amd_nb = nb;
|
cpuc->amd_nb = nb;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -392,7 +411,7 @@ static __initconst const struct x86_pmu amd_pmu = {
|
||||||
.perfctr = MSR_K7_PERFCTR0,
|
.perfctr = MSR_K7_PERFCTR0,
|
||||||
.event_map = amd_pmu_event_map,
|
.event_map = amd_pmu_event_map,
|
||||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||||
.num_counters = 4,
|
.num_counters = AMD64_NUM_COUNTERS,
|
||||||
.cntval_bits = 48,
|
.cntval_bits = 48,
|
||||||
.cntval_mask = (1ULL << 48) - 1,
|
.cntval_mask = (1ULL << 48) - 1,
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
|
@ -556,7 +575,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
|
||||||
.perfctr = MSR_F15H_PERF_CTR,
|
.perfctr = MSR_F15H_PERF_CTR,
|
||||||
.event_map = amd_pmu_event_map,
|
.event_map = amd_pmu_event_map,
|
||||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||||
.num_counters = 6,
|
.num_counters = AMD64_NUM_COUNTERS_F15H,
|
||||||
.cntval_bits = 48,
|
.cntval_bits = 48,
|
||||||
.cntval_mask = (1ULL << 48) - 1,
|
.cntval_mask = (1ULL << 48) - 1,
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
|
@ -573,7 +592,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static __init int amd_pmu_init(void)
|
__init int amd_pmu_init(void)
|
||||||
{
|
{
|
||||||
/* Performance-monitoring supported from K7 and later: */
|
/* Performance-monitoring supported from K7 and later: */
|
||||||
if (boot_cpu_data.x86 < 6)
|
if (boot_cpu_data.x86 < 6)
|
||||||
|
@ -602,12 +621,3 @@ static __init int amd_pmu_init(void)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_CPU_SUP_AMD */
|
|
||||||
|
|
||||||
static int amd_pmu_init(void)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -0,0 +1,294 @@
|
||||||
|
/*
|
||||||
|
* Performance events - AMD IBS
|
||||||
|
*
|
||||||
|
* Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
|
||||||
|
*
|
||||||
|
* For licencing details see kernel-base/COPYING
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/perf_event.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
|
|
||||||
|
#include <asm/apic.h>
|
||||||
|
|
||||||
|
static u32 ibs_caps;
|
||||||
|
|
||||||
|
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
|
||||||
|
|
||||||
|
static struct pmu perf_ibs;
|
||||||
|
|
||||||
|
static int perf_ibs_init(struct perf_event *event)
|
||||||
|
{
|
||||||
|
if (perf_ibs.type != event->attr.type)
|
||||||
|
return -ENOENT;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int perf_ibs_add(struct perf_event *event, int flags)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void perf_ibs_del(struct perf_event *event, int flags)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct pmu perf_ibs = {
|
||||||
|
.event_init= perf_ibs_init,
|
||||||
|
.add= perf_ibs_add,
|
||||||
|
.del= perf_ibs_del,
|
||||||
|
};
|
||||||
|
|
||||||
|
static __init int perf_event_ibs_init(void)
|
||||||
|
{
|
||||||
|
if (!ibs_caps)
|
||||||
|
return -ENODEV; /* ibs not supported by the cpu */
|
||||||
|
|
||||||
|
perf_pmu_register(&perf_ibs, "ibs", -1);
|
||||||
|
printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
|
||||||
|
|
||||||
|
static __init int perf_event_ibs_init(void) { return 0; }
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* IBS - apic initialization, for perf and oprofile */
|
||||||
|
|
||||||
|
static __init u32 __get_ibs_caps(void)
|
||||||
|
{
|
||||||
|
u32 caps;
|
||||||
|
unsigned int max_level;
|
||||||
|
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_IBS))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* check IBS cpuid feature flags */
|
||||||
|
max_level = cpuid_eax(0x80000000);
|
||||||
|
if (max_level < IBS_CPUID_FEATURES)
|
||||||
|
return IBS_CAPS_DEFAULT;
|
||||||
|
|
||||||
|
caps = cpuid_eax(IBS_CPUID_FEATURES);
|
||||||
|
if (!(caps & IBS_CAPS_AVAIL))
|
||||||
|
/* cpuid flags not valid */
|
||||||
|
return IBS_CAPS_DEFAULT;
|
||||||
|
|
||||||
|
return caps;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 get_ibs_caps(void)
|
||||||
|
{
|
||||||
|
return ibs_caps;
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPORT_SYMBOL(get_ibs_caps);
|
||||||
|
|
||||||
|
static inline int get_eilvt(int offset)
|
||||||
|
{
|
||||||
|
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int put_eilvt(int offset)
|
||||||
|
{
|
||||||
|
return !setup_APIC_eilvt(offset, 0, 0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check and reserve APIC extended interrupt LVT offset for IBS if available.
|
||||||
|
*/
|
||||||
|
static inline int ibs_eilvt_valid(void)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
u64 val;
|
||||||
|
int valid = 0;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
|
rdmsrl(MSR_AMD64_IBSCTL, val);
|
||||||
|
offset = val & IBSCTL_LVT_OFFSET_MASK;
|
||||||
|
|
||||||
|
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
|
||||||
|
pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
|
||||||
|
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!get_eilvt(offset)) {
|
||||||
|
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
|
||||||
|
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
valid = 1;
|
||||||
|
out:
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
|
return valid;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int setup_ibs_ctl(int ibs_eilvt_off)
|
||||||
|
{
|
||||||
|
struct pci_dev *cpu_cfg;
|
||||||
|
int nodes;
|
||||||
|
u32 value = 0;
|
||||||
|
|
||||||
|
nodes = 0;
|
||||||
|
cpu_cfg = NULL;
|
||||||
|
do {
|
||||||
|
cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
|
||||||
|
PCI_DEVICE_ID_AMD_10H_NB_MISC,
|
||||||
|
cpu_cfg);
|
||||||
|
if (!cpu_cfg)
|
||||||
|
break;
|
||||||
|
++nodes;
|
||||||
|
pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
|
||||||
|
| IBSCTL_LVT_OFFSET_VALID);
|
||||||
|
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
|
||||||
|
if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
|
||||||
|
pci_dev_put(cpu_cfg);
|
||||||
|
printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
|
||||||
|
"IBSCTL = 0x%08x\n", value);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
} while (1);
|
||||||
|
|
||||||
|
if (!nodes) {
|
||||||
|
printk(KERN_DEBUG "No CPU node configured for IBS\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This runs only on the current cpu. We try to find an LVT offset and
|
||||||
|
* setup the local APIC. For this we must disable preemption. On
|
||||||
|
* success we initialize all nodes with this offset. This updates then
|
||||||
|
* the offset in the IBS_CTL per-node msr. The per-core APIC setup of
|
||||||
|
* the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
|
||||||
|
* is using the new offset.
|
||||||
|
*/
|
||||||
|
static int force_ibs_eilvt_setup(void)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
/* find the next free available EILVT entry, skip offset 0 */
|
||||||
|
for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
|
||||||
|
if (get_eilvt(offset))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
|
if (offset == APIC_EILVT_NR_MAX) {
|
||||||
|
printk(KERN_DEBUG "No EILVT entry available\n");
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = setup_ibs_ctl(offset);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (!ibs_eilvt_valid()) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
|
||||||
|
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
out:
|
||||||
|
preempt_disable();
|
||||||
|
put_eilvt(offset);
|
||||||
|
preempt_enable();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int get_ibs_lvt_offset(void)
|
||||||
|
{
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
rdmsrl(MSR_AMD64_IBSCTL, val);
|
||||||
|
if (!(val & IBSCTL_LVT_OFFSET_VALID))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return val & IBSCTL_LVT_OFFSET_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void setup_APIC_ibs(void *dummy)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
|
||||||
|
offset = get_ibs_lvt_offset();
|
||||||
|
if (offset < 0)
|
||||||
|
goto failed;
|
||||||
|
|
||||||
|
if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
|
||||||
|
return;
|
||||||
|
failed:
|
||||||
|
pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
|
||||||
|
smp_processor_id());
|
||||||
|
}
|
||||||
|
|
||||||
|
static void clear_APIC_ibs(void *dummy)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
|
||||||
|
offset = get_ibs_lvt_offset();
|
||||||
|
if (offset >= 0)
|
||||||
|
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cpuinit
|
||||||
|
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||||
|
{
|
||||||
|
switch (action & ~CPU_TASKS_FROZEN) {
|
||||||
|
case CPU_STARTING:
|
||||||
|
setup_APIC_ibs(NULL);
|
||||||
|
break;
|
||||||
|
case CPU_DYING:
|
||||||
|
clear_APIC_ibs(NULL);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init int amd_ibs_init(void)
|
||||||
|
{
|
||||||
|
u32 caps;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
caps = __get_ibs_caps();
|
||||||
|
if (!caps)
|
||||||
|
return -ENODEV; /* ibs not supported by the cpu */
|
||||||
|
|
||||||
|
if (!ibs_eilvt_valid()) {
|
||||||
|
ret = force_ibs_eilvt_setup();
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Failed to setup IBS, %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
get_online_cpus();
|
||||||
|
ibs_caps = caps;
|
||||||
|
/* make ibs_caps visible to other cpus: */
|
||||||
|
smp_mb();
|
||||||
|
perf_cpu_notifier(perf_ibs_cpu_notifier);
|
||||||
|
smp_call_function(setup_APIC_ibs, NULL, 1);
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
|
return perf_event_ibs_init();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
|
||||||
|
device_initcall(amd_ibs_init);
|
|
@ -1,16 +1,19 @@
|
||||||
#ifdef CONFIG_CPU_SUP_INTEL
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Per core/cpu state
|
* Per core/cpu state
|
||||||
*
|
*
|
||||||
* Used to coordinate shared registers between HT threads or
|
* Used to coordinate shared registers between HT threads or
|
||||||
* among events on a single PMU.
|
* among events on a single PMU.
|
||||||
*/
|
*/
|
||||||
struct intel_shared_regs {
|
|
||||||
struct er_account regs[EXTRA_REG_MAX];
|
#include <linux/stddef.h>
|
||||||
int refcnt; /* per-core: #HT threads */
|
#include <linux/types.h>
|
||||||
unsigned core_id; /* per-core: core id */
|
#include <linux/init.h>
|
||||||
};
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
#include <asm/hardirq.h>
|
||||||
|
#include <asm/apic.h>
|
||||||
|
|
||||||
|
#include "perf_event.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel PerfMon, used on Core and later.
|
* Intel PerfMon, used on Core and later.
|
||||||
|
@ -746,7 +749,8 @@ static void intel_pmu_enable_all(int added)
|
||||||
|
|
||||||
intel_pmu_pebs_enable_all();
|
intel_pmu_pebs_enable_all();
|
||||||
intel_pmu_lbr_enable_all();
|
intel_pmu_lbr_enable_all();
|
||||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
|
||||||
|
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
|
||||||
|
|
||||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
||||||
struct perf_event *event =
|
struct perf_event *event =
|
||||||
|
@ -869,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
||||||
static void intel_pmu_disable_event(struct perf_event *event)
|
static void intel_pmu_disable_event(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||||
intel_pmu_disable_bts();
|
intel_pmu_disable_bts();
|
||||||
|
@ -876,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
|
||||||
|
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
|
||||||
|
|
||||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||||
intel_pmu_disable_fixed(hwc);
|
intel_pmu_disable_fixed(hwc);
|
||||||
return;
|
return;
|
||||||
|
@ -921,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
||||||
static void intel_pmu_enable_event(struct perf_event *event)
|
static void intel_pmu_enable_event(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||||
if (!__this_cpu_read(cpu_hw_events.enabled))
|
if (!__this_cpu_read(cpu_hw_events.enabled))
|
||||||
|
@ -930,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (event->attr.exclude_host)
|
||||||
|
cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
|
||||||
|
if (event->attr.exclude_guest)
|
||||||
|
cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
|
||||||
|
|
||||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||||
intel_pmu_enable_fixed(hwc);
|
intel_pmu_enable_fixed(hwc);
|
||||||
return;
|
return;
|
||||||
|
@ -945,7 +959,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
||||||
* Save and restart an expired event. Called by NMI contexts,
|
* Save and restart an expired event. Called by NMI contexts,
|
||||||
* so it has to be careful about preempting normal event ops:
|
* so it has to be careful about preempting normal event ops:
|
||||||
*/
|
*/
|
||||||
static int intel_pmu_save_and_restart(struct perf_event *event)
|
int intel_pmu_save_and_restart(struct perf_event *event)
|
||||||
{
|
{
|
||||||
x86_perf_event_update(event);
|
x86_perf_event_update(event);
|
||||||
return x86_perf_event_set_period(event);
|
return x86_perf_event_set_period(event);
|
||||||
|
@ -1197,6 +1211,21 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct event_constraint *
|
||||||
|
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct event_constraint *c;
|
||||||
|
|
||||||
|
if (x86_pmu.event_constraints) {
|
||||||
|
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
||||||
|
if ((event->hw.config & c->cmask) == c->code)
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &unconstrained;
|
||||||
|
}
|
||||||
|
|
||||||
static struct event_constraint *
|
static struct event_constraint *
|
||||||
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||||
{
|
{
|
||||||
|
@ -1284,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
|
||||||
|
{
|
||||||
|
if (x86_pmu.guest_get_msrs)
|
||||||
|
return x86_pmu.guest_get_msrs(nr);
|
||||||
|
*nr = 0;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
|
||||||
|
|
||||||
|
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
|
||||||
|
|
||||||
|
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
|
||||||
|
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
|
||||||
|
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
|
||||||
|
|
||||||
|
*nr = 1;
|
||||||
|
return arr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
struct perf_event *event = cpuc->events[idx];
|
||||||
|
|
||||||
|
arr[idx].msr = x86_pmu_config_addr(idx);
|
||||||
|
arr[idx].host = arr[idx].guest = 0;
|
||||||
|
|
||||||
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
arr[idx].host = arr[idx].guest =
|
||||||
|
event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||||
|
|
||||||
|
if (event->attr.exclude_host)
|
||||||
|
arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||||
|
else if (event->attr.exclude_guest)
|
||||||
|
arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
*nr = x86_pmu.num_counters;
|
||||||
|
return arr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void core_pmu_enable_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
if (!event->attr.exclude_host)
|
||||||
|
x86_pmu_enable_event(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void core_pmu_enable_all(int added)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
|
||||||
|
|
||||||
|
if (!test_bit(idx, cpuc->active_mask) ||
|
||||||
|
cpuc->events[idx]->attr.exclude_host)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static __initconst const struct x86_pmu core_pmu = {
|
static __initconst const struct x86_pmu core_pmu = {
|
||||||
.name = "core",
|
.name = "core",
|
||||||
.handle_irq = x86_pmu_handle_irq,
|
.handle_irq = x86_pmu_handle_irq,
|
||||||
.disable_all = x86_pmu_disable_all,
|
.disable_all = x86_pmu_disable_all,
|
||||||
.enable_all = x86_pmu_enable_all,
|
.enable_all = core_pmu_enable_all,
|
||||||
.enable = x86_pmu_enable_event,
|
.enable = core_pmu_enable_event,
|
||||||
.disable = x86_pmu_disable_event,
|
.disable = x86_pmu_disable_event,
|
||||||
.hw_config = x86_pmu_hw_config,
|
.hw_config = x86_pmu_hw_config,
|
||||||
.schedule_events = x86_schedule_events,
|
.schedule_events = x86_schedule_events,
|
||||||
|
@ -1307,9 +1408,10 @@ static __initconst const struct x86_pmu core_pmu = {
|
||||||
.get_event_constraints = intel_get_event_constraints,
|
.get_event_constraints = intel_get_event_constraints,
|
||||||
.put_event_constraints = intel_put_event_constraints,
|
.put_event_constraints = intel_put_event_constraints,
|
||||||
.event_constraints = intel_core_event_constraints,
|
.event_constraints = intel_core_event_constraints,
|
||||||
|
.guest_get_msrs = core_guest_get_msrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct intel_shared_regs *allocate_shared_regs(int cpu)
|
struct intel_shared_regs *allocate_shared_regs(int cpu)
|
||||||
{
|
{
|
||||||
struct intel_shared_regs *regs;
|
struct intel_shared_regs *regs;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1362,7 +1464,7 @@ static void intel_pmu_cpu_starting(int cpu)
|
||||||
|
|
||||||
pc = per_cpu(cpu_hw_events, i).shared_regs;
|
pc = per_cpu(cpu_hw_events, i).shared_regs;
|
||||||
if (pc && pc->core_id == core_id) {
|
if (pc && pc->core_id == core_id) {
|
||||||
kfree(cpuc->shared_regs);
|
cpuc->kfree_on_online = cpuc->shared_regs;
|
||||||
cpuc->shared_regs = pc;
|
cpuc->shared_regs = pc;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1413,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
|
||||||
.cpu_prepare = intel_pmu_cpu_prepare,
|
.cpu_prepare = intel_pmu_cpu_prepare,
|
||||||
.cpu_starting = intel_pmu_cpu_starting,
|
.cpu_starting = intel_pmu_cpu_starting,
|
||||||
.cpu_dying = intel_pmu_cpu_dying,
|
.cpu_dying = intel_pmu_cpu_dying,
|
||||||
|
.guest_get_msrs = intel_guest_get_msrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void intel_clovertown_quirks(void)
|
static void intel_clovertown_quirks(void)
|
||||||
|
@ -1441,7 +1544,7 @@ static void intel_clovertown_quirks(void)
|
||||||
x86_pmu.pebs_constraints = NULL;
|
x86_pmu.pebs_constraints = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int intel_pmu_init(void)
|
__init int intel_pmu_init(void)
|
||||||
{
|
{
|
||||||
union cpuid10_edx edx;
|
union cpuid10_edx edx;
|
||||||
union cpuid10_eax eax;
|
union cpuid10_eax eax;
|
||||||
|
@ -1597,7 +1700,7 @@ static __init int intel_pmu_init(void)
|
||||||
intel_pmu_lbr_init_nhm();
|
intel_pmu_lbr_init_nhm();
|
||||||
|
|
||||||
x86_pmu.event_constraints = intel_snb_event_constraints;
|
x86_pmu.event_constraints = intel_snb_event_constraints;
|
||||||
x86_pmu.pebs_constraints = intel_snb_pebs_events;
|
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
|
||||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||||
/* all extra regs are per-cpu when HT is on */
|
/* all extra regs are per-cpu when HT is on */
|
||||||
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
||||||
|
@ -1628,16 +1731,3 @@ static __init int intel_pmu_init(void)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_CPU_SUP_INTEL */
|
|
||||||
|
|
||||||
static int intel_pmu_init(void)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct intel_shared_regs *allocate_shared_regs(int cpu)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
||||||
|
|
|
@ -1,7 +1,10 @@
|
||||||
#ifdef CONFIG_CPU_SUP_INTEL
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
/* The maximal number of PEBS events: */
|
#include <asm/perf_event.h>
|
||||||
#define MAX_PEBS_EVENTS 4
|
|
||||||
|
#include "perf_event.h"
|
||||||
|
|
||||||
/* The size of a BTS record in bytes: */
|
/* The size of a BTS record in bytes: */
|
||||||
#define BTS_RECORD_SIZE 24
|
#define BTS_RECORD_SIZE 24
|
||||||
|
@ -37,24 +40,7 @@ struct pebs_record_nhm {
|
||||||
u64 status, dla, dse, lat;
|
u64 status, dla, dse, lat;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
void init_debug_store_on_cpu(int cpu)
|
||||||
* A debug store configuration.
|
|
||||||
*
|
|
||||||
* We only support architectures that use 64bit fields.
|
|
||||||
*/
|
|
||||||
struct debug_store {
|
|
||||||
u64 bts_buffer_base;
|
|
||||||
u64 bts_index;
|
|
||||||
u64 bts_absolute_maximum;
|
|
||||||
u64 bts_interrupt_threshold;
|
|
||||||
u64 pebs_buffer_base;
|
|
||||||
u64 pebs_index;
|
|
||||||
u64 pebs_absolute_maximum;
|
|
||||||
u64 pebs_interrupt_threshold;
|
|
||||||
u64 pebs_event_reset[MAX_PEBS_EVENTS];
|
|
||||||
};
|
|
||||||
|
|
||||||
static void init_debug_store_on_cpu(int cpu)
|
|
||||||
{
|
{
|
||||||
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
||||||
|
|
||||||
|
@ -66,7 +52,7 @@ static void init_debug_store_on_cpu(int cpu)
|
||||||
(u32)((u64)(unsigned long)ds >> 32));
|
(u32)((u64)(unsigned long)ds >> 32));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fini_debug_store_on_cpu(int cpu)
|
void fini_debug_store_on_cpu(int cpu)
|
||||||
{
|
{
|
||||||
if (!per_cpu(cpu_hw_events, cpu).ds)
|
if (!per_cpu(cpu_hw_events, cpu).ds)
|
||||||
return;
|
return;
|
||||||
|
@ -175,7 +161,7 @@ static void release_ds_buffer(int cpu)
|
||||||
kfree(ds);
|
kfree(ds);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_ds_buffers(void)
|
void release_ds_buffers(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
@ -194,7 +180,7 @@ static void release_ds_buffers(void)
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reserve_ds_buffers(void)
|
void reserve_ds_buffers(void)
|
||||||
{
|
{
|
||||||
int bts_err = 0, pebs_err = 0;
|
int bts_err = 0, pebs_err = 0;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
@ -260,10 +246,10 @@ static void reserve_ds_buffers(void)
|
||||||
* BTS
|
* BTS
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static struct event_constraint bts_constraint =
|
struct event_constraint bts_constraint =
|
||||||
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
|
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
|
||||||
|
|
||||||
static void intel_pmu_enable_bts(u64 config)
|
void intel_pmu_enable_bts(u64 config)
|
||||||
{
|
{
|
||||||
unsigned long debugctlmsr;
|
unsigned long debugctlmsr;
|
||||||
|
|
||||||
|
@ -282,7 +268,7 @@ static void intel_pmu_enable_bts(u64 config)
|
||||||
update_debugctlmsr(debugctlmsr);
|
update_debugctlmsr(debugctlmsr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_disable_bts(void)
|
void intel_pmu_disable_bts(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
unsigned long debugctlmsr;
|
unsigned long debugctlmsr;
|
||||||
|
@ -299,7 +285,7 @@ static void intel_pmu_disable_bts(void)
|
||||||
update_debugctlmsr(debugctlmsr);
|
update_debugctlmsr(debugctlmsr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_pmu_drain_bts_buffer(void)
|
int intel_pmu_drain_bts_buffer(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct debug_store *ds = cpuc->ds;
|
struct debug_store *ds = cpuc->ds;
|
||||||
|
@ -361,7 +347,7 @@ static int intel_pmu_drain_bts_buffer(void)
|
||||||
/*
|
/*
|
||||||
* PEBS
|
* PEBS
|
||||||
*/
|
*/
|
||||||
static struct event_constraint intel_core2_pebs_event_constraints[] = {
|
struct event_constraint intel_core2_pebs_event_constraints[] = {
|
||||||
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
|
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
|
||||||
INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
|
INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
|
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
|
||||||
|
@ -370,14 +356,14 @@ static struct event_constraint intel_core2_pebs_event_constraints[] = {
|
||||||
EVENT_CONSTRAINT_END
|
EVENT_CONSTRAINT_END
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct event_constraint intel_atom_pebs_event_constraints[] = {
|
struct event_constraint intel_atom_pebs_event_constraints[] = {
|
||||||
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
|
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
|
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
|
||||||
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
||||||
EVENT_CONSTRAINT_END
|
EVENT_CONSTRAINT_END
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
|
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
|
||||||
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
|
||||||
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
|
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
|
||||||
|
@ -392,7 +378,7 @@ static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
|
||||||
EVENT_CONSTRAINT_END
|
EVENT_CONSTRAINT_END
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct event_constraint intel_westmere_pebs_event_constraints[] = {
|
struct event_constraint intel_westmere_pebs_event_constraints[] = {
|
||||||
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
|
||||||
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
|
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
|
||||||
|
@ -407,7 +393,7 @@ static struct event_constraint intel_westmere_pebs_event_constraints[] = {
|
||||||
EVENT_CONSTRAINT_END
|
EVENT_CONSTRAINT_END
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct event_constraint intel_snb_pebs_events[] = {
|
struct event_constraint intel_snb_pebs_event_constraints[] = {
|
||||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
|
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
|
INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
|
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
|
||||||
|
@ -428,8 +414,7 @@ static struct event_constraint intel_snb_pebs_events[] = {
|
||||||
EVENT_CONSTRAINT_END
|
EVENT_CONSTRAINT_END
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct event_constraint *
|
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
|
||||||
intel_pebs_constraints(struct perf_event *event)
|
|
||||||
{
|
{
|
||||||
struct event_constraint *c;
|
struct event_constraint *c;
|
||||||
|
|
||||||
|
@ -446,7 +431,7 @@ intel_pebs_constraints(struct perf_event *event)
|
||||||
return &emptyconstraint;
|
return &emptyconstraint;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_pebs_enable(struct perf_event *event)
|
void intel_pmu_pebs_enable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
@ -460,7 +445,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event)
|
||||||
intel_pmu_lbr_enable(event);
|
intel_pmu_lbr_enable(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_pebs_disable(struct perf_event *event)
|
void intel_pmu_pebs_disable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
@ -475,7 +460,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event)
|
||||||
intel_pmu_lbr_disable(event);
|
intel_pmu_lbr_disable(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_pebs_enable_all(void)
|
void intel_pmu_pebs_enable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
@ -483,7 +468,7 @@ static void intel_pmu_pebs_enable_all(void)
|
||||||
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_pebs_disable_all(void)
|
void intel_pmu_pebs_disable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
@ -576,8 +561,6 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_pmu_save_and_restart(struct perf_event *event);
|
|
||||||
|
|
||||||
static void __intel_pmu_pebs_event(struct perf_event *event,
|
static void __intel_pmu_pebs_event(struct perf_event *event,
|
||||||
struct pt_regs *iregs, void *__pebs)
|
struct pt_regs *iregs, void *__pebs)
|
||||||
{
|
{
|
||||||
|
@ -716,7 +699,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
||||||
* BTS, PEBS probe and setup
|
* BTS, PEBS probe and setup
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void intel_ds_init(void)
|
void intel_ds_init(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* No support for 32bit formats
|
* No support for 32bit formats
|
||||||
|
@ -749,15 +732,3 @@ static void intel_ds_init(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_CPU_SUP_INTEL */
|
|
||||||
|
|
||||||
static void reserve_ds_buffers(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void release_ds_buffers(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
#ifdef CONFIG_CPU_SUP_INTEL
|
#include <linux/perf_event.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#include <asm/perf_event.h>
|
||||||
|
#include <asm/msr.h>
|
||||||
|
|
||||||
|
#include "perf_event.h"
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
LBR_FORMAT_32 = 0x00,
|
LBR_FORMAT_32 = 0x00,
|
||||||
|
@ -48,7 +54,7 @@ static void intel_pmu_lbr_reset_64(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_reset(void)
|
void intel_pmu_lbr_reset(void)
|
||||||
{
|
{
|
||||||
if (!x86_pmu.lbr_nr)
|
if (!x86_pmu.lbr_nr)
|
||||||
return;
|
return;
|
||||||
|
@ -59,7 +65,7 @@ static void intel_pmu_lbr_reset(void)
|
||||||
intel_pmu_lbr_reset_64();
|
intel_pmu_lbr_reset_64();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_enable(struct perf_event *event)
|
void intel_pmu_lbr_enable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
@ -81,7 +87,7 @@ static void intel_pmu_lbr_enable(struct perf_event *event)
|
||||||
cpuc->lbr_users++;
|
cpuc->lbr_users++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_disable(struct perf_event *event)
|
void intel_pmu_lbr_disable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
@ -95,7 +101,7 @@ static void intel_pmu_lbr_disable(struct perf_event *event)
|
||||||
__intel_pmu_lbr_disable();
|
__intel_pmu_lbr_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_enable_all(void)
|
void intel_pmu_lbr_enable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
@ -103,7 +109,7 @@ static void intel_pmu_lbr_enable_all(void)
|
||||||
__intel_pmu_lbr_enable();
|
__intel_pmu_lbr_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_disable_all(void)
|
void intel_pmu_lbr_disable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
@ -178,7 +184,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
|
||||||
cpuc->lbr_stack.nr = i;
|
cpuc->lbr_stack.nr = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_read(void)
|
void intel_pmu_lbr_read(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
@ -191,7 +197,7 @@ static void intel_pmu_lbr_read(void)
|
||||||
intel_pmu_lbr_read_64(cpuc);
|
intel_pmu_lbr_read_64(cpuc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_init_core(void)
|
void intel_pmu_lbr_init_core(void)
|
||||||
{
|
{
|
||||||
x86_pmu.lbr_nr = 4;
|
x86_pmu.lbr_nr = 4;
|
||||||
x86_pmu.lbr_tos = 0x01c9;
|
x86_pmu.lbr_tos = 0x01c9;
|
||||||
|
@ -199,7 +205,7 @@ static void intel_pmu_lbr_init_core(void)
|
||||||
x86_pmu.lbr_to = 0x60;
|
x86_pmu.lbr_to = 0x60;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_init_nhm(void)
|
void intel_pmu_lbr_init_nhm(void)
|
||||||
{
|
{
|
||||||
x86_pmu.lbr_nr = 16;
|
x86_pmu.lbr_nr = 16;
|
||||||
x86_pmu.lbr_tos = 0x01c9;
|
x86_pmu.lbr_tos = 0x01c9;
|
||||||
|
@ -207,12 +213,10 @@ static void intel_pmu_lbr_init_nhm(void)
|
||||||
x86_pmu.lbr_to = 0x6c0;
|
x86_pmu.lbr_to = 0x6c0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_lbr_init_atom(void)
|
void intel_pmu_lbr_init_atom(void)
|
||||||
{
|
{
|
||||||
x86_pmu.lbr_nr = 8;
|
x86_pmu.lbr_nr = 8;
|
||||||
x86_pmu.lbr_tos = 0x01c9;
|
x86_pmu.lbr_tos = 0x01c9;
|
||||||
x86_pmu.lbr_from = 0x40;
|
x86_pmu.lbr_from = 0x40;
|
||||||
x86_pmu.lbr_to = 0x60;
|
x86_pmu.lbr_to = 0x60;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
||||||
|
|
|
@ -7,9 +7,13 @@
|
||||||
* For licencing details see kernel-base/COPYING
|
* For licencing details see kernel-base/COPYING
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_SUP_INTEL
|
#include <linux/perf_event.h>
|
||||||
|
|
||||||
#include <asm/perf_event_p4.h>
|
#include <asm/perf_event_p4.h>
|
||||||
|
#include <asm/hardirq.h>
|
||||||
|
#include <asm/apic.h>
|
||||||
|
|
||||||
|
#include "perf_event.h"
|
||||||
|
|
||||||
#define P4_CNTR_LIMIT 3
|
#define P4_CNTR_LIMIT 3
|
||||||
/*
|
/*
|
||||||
|
@ -1303,7 +1307,7 @@ static __initconst const struct x86_pmu p4_pmu = {
|
||||||
.perfctr_second_write = 1,
|
.perfctr_second_write = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static __init int p4_pmu_init(void)
|
__init int p4_pmu_init(void)
|
||||||
{
|
{
|
||||||
unsigned int low, high;
|
unsigned int low, high;
|
||||||
|
|
||||||
|
@ -1326,5 +1330,3 @@ static __init int p4_pmu_init(void)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
#ifdef CONFIG_CPU_SUP_INTEL
|
#include <linux/perf_event.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#include "perf_event.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not sure about some of these
|
* Not sure about some of these
|
||||||
|
@ -114,7 +117,7 @@ static __initconst const struct x86_pmu p6_pmu = {
|
||||||
.event_constraints = p6_event_constraints,
|
.event_constraints = p6_event_constraints,
|
||||||
};
|
};
|
||||||
|
|
||||||
static __init int p6_pmu_init(void)
|
__init int p6_pmu_init(void)
|
||||||
{
|
{
|
||||||
switch (boot_cpu_data.x86_model) {
|
switch (boot_cpu_data.x86_model) {
|
||||||
case 1:
|
case 1:
|
||||||
|
@ -138,5 +141,3 @@ static __init int p6_pmu_init(void)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
||||||
|
|
|
@ -32,15 +32,12 @@ int in_crash_kexec;
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
|
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
|
||||||
|
|
||||||
static void kdump_nmi_callback(int cpu, struct die_args *args)
|
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct pt_regs *regs;
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
struct pt_regs fixed_regs;
|
struct pt_regs fixed_regs;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
regs = args->regs;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
if (!user_mode_vm(regs)) {
|
if (!user_mode_vm(regs)) {
|
||||||
crash_fixup_ss_esp(&fixed_regs, regs);
|
crash_fixup_ss_esp(&fixed_regs, regs);
|
||||||
|
|
|
@ -42,7 +42,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_jump_label_text_poke_early(jump_label_t addr)
|
void __init_or_module arch_jump_label_text_poke_early(jump_label_t addr)
|
||||||
{
|
{
|
||||||
text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5],
|
text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5],
|
||||||
JUMP_LABEL_NOP_SIZE);
|
JUMP_LABEL_NOP_SIZE);
|
||||||
|
|
|
@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
|
||||||
|
|
||||||
static int was_in_debug_nmi[NR_CPUS];
|
static int was_in_debug_nmi[NR_CPUS];
|
||||||
|
|
||||||
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
|
static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct pt_regs *regs = args->regs;
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case DIE_NMI:
|
case NMI_LOCAL:
|
||||||
if (atomic_read(&kgdb_active) != -1) {
|
if (atomic_read(&kgdb_active) != -1) {
|
||||||
/* KGDB CPU roundup */
|
/* KGDB CPU roundup */
|
||||||
kgdb_nmicallback(raw_smp_processor_id(), regs);
|
kgdb_nmicallback(raw_smp_processor_id(), regs);
|
||||||
was_in_debug_nmi[raw_smp_processor_id()] = 1;
|
was_in_debug_nmi[raw_smp_processor_id()] = 1;
|
||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
return NOTIFY_DONE;
|
break;
|
||||||
|
|
||||||
case DIE_NMIUNKNOWN:
|
case NMI_UNKNOWN:
|
||||||
if (was_in_debug_nmi[raw_smp_processor_id()]) {
|
if (was_in_debug_nmi[raw_smp_processor_id()]) {
|
||||||
was_in_debug_nmi[raw_smp_processor_id()] = 0;
|
was_in_debug_nmi[raw_smp_processor_id()] = 0;
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
return NOTIFY_DONE;
|
break;
|
||||||
|
default:
|
||||||
|
/* do nothing */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return NMI_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
|
||||||
|
{
|
||||||
|
struct pt_regs *regs = args->regs;
|
||||||
|
|
||||||
|
switch (cmd) {
|
||||||
case DIE_DEBUG:
|
case DIE_DEBUG:
|
||||||
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
|
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
|
@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
|
||||||
|
|
||||||
static struct notifier_block kgdb_notifier = {
|
static struct notifier_block kgdb_notifier = {
|
||||||
.notifier_call = kgdb_notify,
|
.notifier_call = kgdb_notify,
|
||||||
|
|
||||||
/*
|
|
||||||
* Lowest-prio notifier priority, we want to be notified last:
|
|
||||||
*/
|
|
||||||
.priority = NMI_LOCAL_LOW_PRIOR,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
|
||||||
*/
|
*/
|
||||||
int kgdb_arch_init(void)
|
int kgdb_arch_init(void)
|
||||||
{
|
{
|
||||||
return register_die_notifier(&kgdb_notifier);
|
int retval;
|
||||||
|
|
||||||
|
retval = register_die_notifier(&kgdb_notifier);
|
||||||
|
if (retval)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
|
||||||
|
0, "kgdb");
|
||||||
|
if (retval)
|
||||||
|
goto out1;
|
||||||
|
|
||||||
|
retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
|
||||||
|
0, "kgdb");
|
||||||
|
|
||||||
|
if (retval)
|
||||||
|
goto out2;
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
|
||||||
|
out2:
|
||||||
|
unregister_nmi_handler(NMI_LOCAL, "kgdb");
|
||||||
|
out1:
|
||||||
|
unregister_die_notifier(&kgdb_notifier);
|
||||||
|
out:
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kgdb_hw_overflow_handler(struct perf_event *event,
|
static void kgdb_hw_overflow_handler(struct perf_event *event,
|
||||||
|
@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
|
||||||
breakinfo[i].pev = NULL;
|
breakinfo[i].pev = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
|
||||||
|
unregister_nmi_handler(NMI_LOCAL, "kgdb");
|
||||||
unregister_die_notifier(&kgdb_notifier);
|
unregister_die_notifier(&kgdb_notifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,10 +75,11 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||||
/*
|
/*
|
||||||
* Undefined/reserved opcodes, conditional jump, Opcode Extension
|
* Undefined/reserved opcodes, conditional jump, Opcode Extension
|
||||||
* Groups, and some special opcodes can not boost.
|
* Groups, and some special opcodes can not boost.
|
||||||
* This is non-const to keep gcc from statically optimizing it out, as
|
* This is non-const and volatile to keep gcc from statically
|
||||||
* variable_test_bit makes gcc think only *(unsigned long*) is used.
|
* optimizing it out, as variable_test_bit makes gcc think only
|
||||||
|
* *(unsigned long*) is used.
|
||||||
*/
|
*/
|
||||||
static u32 twobyte_is_boostable[256 / 32] = {
|
static volatile u32 twobyte_is_boostable[256 / 32] = {
|
||||||
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
|
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
|
||||||
/* ---------------------------------------------- */
|
/* ---------------------------------------------- */
|
||||||
W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
|
W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
|
||||||
|
|
|
@ -0,0 +1,433 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
|
||||||
|
* Copyright (C) 2011 Don Zickus Red Hat, Inc.
|
||||||
|
*
|
||||||
|
* Pentium III FXSR, SSE support
|
||||||
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Handle hardware traps and faults.
|
||||||
|
*/
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/kprobes.h>
|
||||||
|
#include <linux/kdebug.h>
|
||||||
|
#include <linux/nmi.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/hardirq.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
#include <linux/mca.h>
|
||||||
|
|
||||||
|
#if defined(CONFIG_EDAC)
|
||||||
|
#include <linux/edac.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <linux/atomic.h>
|
||||||
|
#include <asm/traps.h>
|
||||||
|
#include <asm/mach_traps.h>
|
||||||
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
|
#define NMI_MAX_NAMELEN 16
|
||||||
|
struct nmiaction {
|
||||||
|
struct list_head list;
|
||||||
|
nmi_handler_t handler;
|
||||||
|
unsigned int flags;
|
||||||
|
char *name;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct nmi_desc {
|
||||||
|
spinlock_t lock;
|
||||||
|
struct list_head head;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nmi_desc nmi_desc[NMI_MAX] =
|
||||||
|
{
|
||||||
|
{
|
||||||
|
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
|
||||||
|
.head = LIST_HEAD_INIT(nmi_desc[0].head),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
|
||||||
|
.head = LIST_HEAD_INIT(nmi_desc[1].head),
|
||||||
|
},
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
struct nmi_stats {
|
||||||
|
unsigned int normal;
|
||||||
|
unsigned int unknown;
|
||||||
|
unsigned int external;
|
||||||
|
unsigned int swallow;
|
||||||
|
};
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
|
||||||
|
|
||||||
|
static int ignore_nmis;
|
||||||
|
|
||||||
|
int unknown_nmi_panic;
|
||||||
|
/*
|
||||||
|
* Prevent NMI reason port (0x61) being accessed simultaneously, can
|
||||||
|
* only be used in NMI handler.
|
||||||
|
*/
|
||||||
|
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
|
||||||
|
|
||||||
|
static int __init setup_unknown_nmi_panic(char *str)
|
||||||
|
{
|
||||||
|
unknown_nmi_panic = 1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
|
||||||
|
|
||||||
|
#define nmi_to_desc(type) (&nmi_desc[type])
|
||||||
|
|
||||||
|
static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
|
||||||
|
{
|
||||||
|
struct nmi_desc *desc = nmi_to_desc(type);
|
||||||
|
struct nmiaction *a;
|
||||||
|
int handled=0;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NMIs are edge-triggered, which means if you have enough
|
||||||
|
* of them concurrently, you can lose some because only one
|
||||||
|
* can be latched at any given time. Walk the whole list
|
||||||
|
* to handle those situations.
|
||||||
|
*/
|
||||||
|
list_for_each_entry_rcu(a, &desc->head, list)
|
||||||
|
handled += a->handler(type, regs);
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
/* return total number of NMI events handled */
|
||||||
|
return handled;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __setup_nmi(unsigned int type, struct nmiaction *action)
|
||||||
|
{
|
||||||
|
struct nmi_desc *desc = nmi_to_desc(type);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* most handlers of type NMI_UNKNOWN never return because
|
||||||
|
* they just assume the NMI is theirs. Just a sanity check
|
||||||
|
* to manage expectations
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* some handlers need to be executed first otherwise a fake
|
||||||
|
* event confuses some handlers (kdump uses this flag)
|
||||||
|
*/
|
||||||
|
if (action->flags & NMI_FLAG_FIRST)
|
||||||
|
list_add_rcu(&action->list, &desc->head);
|
||||||
|
else
|
||||||
|
list_add_tail_rcu(&action->list, &desc->head);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nmiaction *__free_nmi(unsigned int type, const char *name)
|
||||||
|
{
|
||||||
|
struct nmi_desc *desc = nmi_to_desc(type);
|
||||||
|
struct nmiaction *n;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(n, &desc->head, list) {
|
||||||
|
/*
|
||||||
|
* the name passed in to describe the nmi handler
|
||||||
|
* is used as the lookup key
|
||||||
|
*/
|
||||||
|
if (!strcmp(n->name, name)) {
|
||||||
|
WARN(in_nmi(),
|
||||||
|
"Trying to free NMI (%s) from NMI context!\n", n->name);
|
||||||
|
list_del_rcu(&n->list);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
synchronize_rcu();
|
||||||
|
return (n);
|
||||||
|
}
|
||||||
|
|
||||||
|
int register_nmi_handler(unsigned int type, nmi_handler_t handler,
|
||||||
|
unsigned long nmiflags, const char *devname)
|
||||||
|
{
|
||||||
|
struct nmiaction *action;
|
||||||
|
int retval = -ENOMEM;
|
||||||
|
|
||||||
|
if (!handler)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
|
||||||
|
if (!action)
|
||||||
|
goto fail_action;
|
||||||
|
|
||||||
|
action->handler = handler;
|
||||||
|
action->flags = nmiflags;
|
||||||
|
action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
|
||||||
|
if (!action->name)
|
||||||
|
goto fail_action_name;
|
||||||
|
|
||||||
|
retval = __setup_nmi(type, action);
|
||||||
|
|
||||||
|
if (retval)
|
||||||
|
goto fail_setup_nmi;
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
|
||||||
|
fail_setup_nmi:
|
||||||
|
kfree(action->name);
|
||||||
|
fail_action_name:
|
||||||
|
kfree(action);
|
||||||
|
fail_action:
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(register_nmi_handler);
|
||||||
|
|
||||||
|
void unregister_nmi_handler(unsigned int type, const char *name)
|
||||||
|
{
|
||||||
|
struct nmiaction *a;
|
||||||
|
|
||||||
|
a = __free_nmi(type, name);
|
||||||
|
if (a) {
|
||||||
|
kfree(a->name);
|
||||||
|
kfree(a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
|
||||||
|
|
||||||
|
static notrace __kprobes void
|
||||||
|
pci_serr_error(unsigned char reason, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
|
||||||
|
reason, smp_processor_id());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On some machines, PCI SERR line is used to report memory
|
||||||
|
* errors. EDAC makes use of it.
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_EDAC)
|
||||||
|
if (edac_handler_set()) {
|
||||||
|
edac_atomic_assert_error();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (panic_on_unrecovered_nmi)
|
||||||
|
panic("NMI: Not continuing");
|
||||||
|
|
||||||
|
pr_emerg("Dazed and confused, but trying to continue\n");
|
||||||
|
|
||||||
|
/* Clear and disable the PCI SERR error line. */
|
||||||
|
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
|
||||||
|
outb(reason, NMI_REASON_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static notrace __kprobes void
|
||||||
|
io_check_error(unsigned char reason, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
pr_emerg(
|
||||||
|
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
|
||||||
|
reason, smp_processor_id());
|
||||||
|
show_registers(regs);
|
||||||
|
|
||||||
|
if (panic_on_io_nmi)
|
||||||
|
panic("NMI IOCK error: Not continuing");
|
||||||
|
|
||||||
|
/* Re-enable the IOCK line, wait for a few seconds */
|
||||||
|
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
|
||||||
|
outb(reason, NMI_REASON_PORT);
|
||||||
|
|
||||||
|
i = 20000;
|
||||||
|
while (--i) {
|
||||||
|
touch_nmi_watchdog();
|
||||||
|
udelay(100);
|
||||||
|
}
|
||||||
|
|
||||||
|
reason &= ~NMI_REASON_CLEAR_IOCHK;
|
||||||
|
outb(reason, NMI_REASON_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static notrace __kprobes void
|
||||||
|
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
int handled;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use 'false' as back-to-back NMIs are dealt with one level up.
|
||||||
|
* Of course this makes having multiple 'unknown' handlers useless
|
||||||
|
* as only the first one is ever run (unless it can actually determine
|
||||||
|
* if it caused the NMI)
|
||||||
|
*/
|
||||||
|
handled = nmi_handle(NMI_UNKNOWN, regs, false);
|
||||||
|
if (handled) {
|
||||||
|
__this_cpu_add(nmi_stats.unknown, handled);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
__this_cpu_add(nmi_stats.unknown, 1);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MCA
|
||||||
|
/*
|
||||||
|
* Might actually be able to figure out what the guilty party
|
||||||
|
* is:
|
||||||
|
*/
|
||||||
|
if (MCA_bus) {
|
||||||
|
mca_handle_nmi();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
|
||||||
|
reason, smp_processor_id());
|
||||||
|
|
||||||
|
pr_emerg("Do you have a strange power saving mode enabled?\n");
|
||||||
|
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
|
||||||
|
panic("NMI: Not continuing");
|
||||||
|
|
||||||
|
pr_emerg("Dazed and confused, but trying to continue\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(bool, swallow_nmi);
|
||||||
|
static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
|
||||||
|
|
||||||
|
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned char reason = 0;
|
||||||
|
int handled;
|
||||||
|
bool b2b = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CPU-specific NMI must be processed before non-CPU-specific
|
||||||
|
* NMI, otherwise we may lose it, because the CPU-specific
|
||||||
|
* NMI can not be detected/processed on other CPUs.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Back-to-back NMIs are interesting because they can either
|
||||||
|
* be two NMI or more than two NMIs (any thing over two is dropped
|
||||||
|
* due to NMI being edge-triggered). If this is the second half
|
||||||
|
* of the back-to-back NMI, assume we dropped things and process
|
||||||
|
* more handlers. Otherwise reset the 'swallow' NMI behaviour
|
||||||
|
*/
|
||||||
|
if (regs->ip == __this_cpu_read(last_nmi_rip))
|
||||||
|
b2b = true;
|
||||||
|
else
|
||||||
|
__this_cpu_write(swallow_nmi, false);
|
||||||
|
|
||||||
|
__this_cpu_write(last_nmi_rip, regs->ip);
|
||||||
|
|
||||||
|
handled = nmi_handle(NMI_LOCAL, regs, b2b);
|
||||||
|
__this_cpu_add(nmi_stats.normal, handled);
|
||||||
|
if (handled) {
|
||||||
|
/*
|
||||||
|
* There are cases when a NMI handler handles multiple
|
||||||
|
* events in the current NMI. One of these events may
|
||||||
|
* be queued for in the next NMI. Because the event is
|
||||||
|
* already handled, the next NMI will result in an unknown
|
||||||
|
* NMI. Instead lets flag this for a potential NMI to
|
||||||
|
* swallow.
|
||||||
|
*/
|
||||||
|
if (handled > 1)
|
||||||
|
__this_cpu_write(swallow_nmi, true);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
|
||||||
|
raw_spin_lock(&nmi_reason_lock);
|
||||||
|
reason = get_nmi_reason();
|
||||||
|
|
||||||
|
if (reason & NMI_REASON_MASK) {
|
||||||
|
if (reason & NMI_REASON_SERR)
|
||||||
|
pci_serr_error(reason, regs);
|
||||||
|
else if (reason & NMI_REASON_IOCHK)
|
||||||
|
io_check_error(reason, regs);
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
/*
|
||||||
|
* Reassert NMI in case it became active
|
||||||
|
* meanwhile as it's edge-triggered:
|
||||||
|
*/
|
||||||
|
reassert_nmi();
|
||||||
|
#endif
|
||||||
|
__this_cpu_add(nmi_stats.external, 1);
|
||||||
|
raw_spin_unlock(&nmi_reason_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
raw_spin_unlock(&nmi_reason_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only one NMI can be latched at a time. To handle
|
||||||
|
* this we may process multiple nmi handlers at once to
|
||||||
|
* cover the case where an NMI is dropped. The downside
|
||||||
|
* to this approach is we may process an NMI prematurely,
|
||||||
|
* while its real NMI is sitting latched. This will cause
|
||||||
|
* an unknown NMI on the next run of the NMI processing.
|
||||||
|
*
|
||||||
|
* We tried to flag that condition above, by setting the
|
||||||
|
* swallow_nmi flag when we process more than one event.
|
||||||
|
* This condition is also only present on the second half
|
||||||
|
* of a back-to-back NMI, so we flag that condition too.
|
||||||
|
*
|
||||||
|
* If both are true, we assume we already processed this
|
||||||
|
* NMI previously and we swallow it. Otherwise we reset
|
||||||
|
* the logic.
|
||||||
|
*
|
||||||
|
* There are scenarios where we may accidentally swallow
|
||||||
|
* a 'real' unknown NMI. For example, while processing
|
||||||
|
* a perf NMI another perf NMI comes in along with a
|
||||||
|
* 'real' unknown NMI. These two NMIs get combined into
|
||||||
|
* one (as descibed above). When the next NMI gets
|
||||||
|
* processed, it will be flagged by perf as handled, but
|
||||||
|
* noone will know that there was a 'real' unknown NMI sent
|
||||||
|
* also. As a result it gets swallowed. Or if the first
|
||||||
|
* perf NMI returns two events handled then the second
|
||||||
|
* NMI will get eaten by the logic below, again losing a
|
||||||
|
* 'real' unknown NMI. But this is the best we can do
|
||||||
|
* for now.
|
||||||
|
*/
|
||||||
|
if (b2b && __this_cpu_read(swallow_nmi))
|
||||||
|
__this_cpu_add(nmi_stats.swallow, 1);
|
||||||
|
else
|
||||||
|
unknown_nmi_error(reason, regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
dotraplinkage notrace __kprobes void
|
||||||
|
do_nmi(struct pt_regs *regs, long error_code)
|
||||||
|
{
|
||||||
|
nmi_enter();
|
||||||
|
|
||||||
|
inc_irq_stat(__nmi_count);
|
||||||
|
|
||||||
|
if (!ignore_nmis)
|
||||||
|
default_do_nmi(regs);
|
||||||
|
|
||||||
|
nmi_exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
void stop_nmi(void)
|
||||||
|
{
|
||||||
|
ignore_nmis++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void restart_nmi(void)
|
||||||
|
{
|
||||||
|
ignore_nmis--;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* reset the back-to-back NMI logic */
|
||||||
|
void local_touch_nmi(void)
|
||||||
|
{
|
||||||
|
__this_cpu_write(last_nmi_rip, 0);
|
||||||
|
}
|
|
@ -57,6 +57,7 @@
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/syscalls.h>
|
#include <asm/syscalls.h>
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||||
|
|
||||||
|
@ -107,6 +108,7 @@ void cpu_idle(void)
|
||||||
if (cpu_is_offline(cpu))
|
if (cpu_is_offline(cpu))
|
||||||
play_dead();
|
play_dead();
|
||||||
|
|
||||||
|
local_touch_nmi();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
/* Don't trace irqs off for idle */
|
/* Don't trace irqs off for idle */
|
||||||
stop_critical_timings();
|
stop_critical_timings();
|
||||||
|
|
|
@ -51,6 +51,7 @@
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/syscalls.h>
|
#include <asm/syscalls.h>
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
asmlinkage extern void ret_from_fork(void);
|
asmlinkage extern void ret_from_fork(void);
|
||||||
|
|
||||||
|
@ -133,6 +134,7 @@ void cpu_idle(void)
|
||||||
* from here on, until they go to idle.
|
* from here on, until they go to idle.
|
||||||
* Otherwise, idle callbacks can misfire.
|
* Otherwise, idle callbacks can misfire.
|
||||||
*/
|
*/
|
||||||
|
local_touch_nmi();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
enter_idle();
|
enter_idle();
|
||||||
/* Don't trace irqs off for idle */
|
/* Don't trace irqs off for idle */
|
||||||
|
|
|
@ -464,7 +464,7 @@ static inline void kb_wait(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmxoff_nmi(int cpu, struct die_args *args)
|
static void vmxoff_nmi(int cpu, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
cpu_emergency_vmxoff();
|
cpu_emergency_vmxoff();
|
||||||
}
|
}
|
||||||
|
@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback;
|
||||||
|
|
||||||
static atomic_t waiting_for_crash_ipi;
|
static atomic_t waiting_for_crash_ipi;
|
||||||
|
|
||||||
static int crash_nmi_callback(struct notifier_block *self,
|
static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
|
||||||
unsigned long val, void *data)
|
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (val != DIE_NMI)
|
|
||||||
return NOTIFY_OK;
|
|
||||||
|
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
/* Don't do anything if this handler is invoked on crashing cpu.
|
/* Don't do anything if this handler is invoked on crashing cpu.
|
||||||
|
@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self,
|
||||||
* an NMI if system was initially booted with nmi_watchdog parameter.
|
* an NMI if system was initially booted with nmi_watchdog parameter.
|
||||||
*/
|
*/
|
||||||
if (cpu == crashing_cpu)
|
if (cpu == crashing_cpu)
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
shootdown_callback(cpu, (struct die_args *)data);
|
shootdown_callback(cpu, regs);
|
||||||
|
|
||||||
atomic_dec(&waiting_for_crash_ipi);
|
atomic_dec(&waiting_for_crash_ipi);
|
||||||
/* Assume hlt works */
|
/* Assume hlt works */
|
||||||
|
@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self,
|
||||||
for (;;)
|
for (;;)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
||||||
return 1;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void smp_send_nmi_allbutself(void)
|
static void smp_send_nmi_allbutself(void)
|
||||||
|
@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void)
|
||||||
apic->send_IPI_allbutself(NMI_VECTOR);
|
apic->send_IPI_allbutself(NMI_VECTOR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block crash_nmi_nb = {
|
|
||||||
.notifier_call = crash_nmi_callback,
|
|
||||||
/* we want to be the first one called */
|
|
||||||
.priority = NMI_LOCAL_HIGH_PRIOR+1,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Halt all other CPUs, calling the specified function on each of them
|
/* Halt all other CPUs, calling the specified function on each of them
|
||||||
*
|
*
|
||||||
* This function can be used to halt all other CPUs on crash
|
* This function can be used to halt all other CPUs on crash
|
||||||
|
@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
|
||||||
|
|
||||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||||
/* Would it be better to replace the trap vector here? */
|
/* Would it be better to replace the trap vector here? */
|
||||||
if (register_die_notifier(&crash_nmi_nb))
|
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
|
||||||
|
NMI_FLAG_FIRST, "crash"))
|
||||||
return; /* return what? */
|
return; /* return what? */
|
||||||
/* Ensure the new callback function is set before sending
|
/* Ensure the new callback function is set before sending
|
||||||
* out the NMI
|
* out the NMI
|
||||||
|
|
|
@ -81,15 +81,6 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
|
||||||
DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
||||||
EXPORT_SYMBOL_GPL(used_vectors);
|
EXPORT_SYMBOL_GPL(used_vectors);
|
||||||
|
|
||||||
static int ignore_nmis;
|
|
||||||
|
|
||||||
int unknown_nmi_panic;
|
|
||||||
/*
|
|
||||||
* Prevent NMI reason port (0x61) being accessed simultaneously, can
|
|
||||||
* only be used in NMI handler.
|
|
||||||
*/
|
|
||||||
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
|
|
||||||
|
|
||||||
static inline void conditional_sti(struct pt_regs *regs)
|
static inline void conditional_sti(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (regs->flags & X86_EFLAGS_IF)
|
if (regs->flags & X86_EFLAGS_IF)
|
||||||
|
@ -307,152 +298,6 @@ gp_in_kernel:
|
||||||
die("general protection fault", regs, error_code);
|
die("general protection fault", regs, error_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init setup_unknown_nmi_panic(char *str)
|
|
||||||
{
|
|
||||||
unknown_nmi_panic = 1;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
|
|
||||||
|
|
||||||
static notrace __kprobes void
|
|
||||||
pci_serr_error(unsigned char reason, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
|
|
||||||
reason, smp_processor_id());
|
|
||||||
|
|
||||||
/*
|
|
||||||
* On some machines, PCI SERR line is used to report memory
|
|
||||||
* errors. EDAC makes use of it.
|
|
||||||
*/
|
|
||||||
#if defined(CONFIG_EDAC)
|
|
||||||
if (edac_handler_set()) {
|
|
||||||
edac_atomic_assert_error();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (panic_on_unrecovered_nmi)
|
|
||||||
panic("NMI: Not continuing");
|
|
||||||
|
|
||||||
pr_emerg("Dazed and confused, but trying to continue\n");
|
|
||||||
|
|
||||||
/* Clear and disable the PCI SERR error line. */
|
|
||||||
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
|
|
||||||
outb(reason, NMI_REASON_PORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static notrace __kprobes void
|
|
||||||
io_check_error(unsigned char reason, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
pr_emerg(
|
|
||||||
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
|
|
||||||
reason, smp_processor_id());
|
|
||||||
show_registers(regs);
|
|
||||||
|
|
||||||
if (panic_on_io_nmi)
|
|
||||||
panic("NMI IOCK error: Not continuing");
|
|
||||||
|
|
||||||
/* Re-enable the IOCK line, wait for a few seconds */
|
|
||||||
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
|
|
||||||
outb(reason, NMI_REASON_PORT);
|
|
||||||
|
|
||||||
i = 20000;
|
|
||||||
while (--i) {
|
|
||||||
touch_nmi_watchdog();
|
|
||||||
udelay(100);
|
|
||||||
}
|
|
||||||
|
|
||||||
reason &= ~NMI_REASON_CLEAR_IOCHK;
|
|
||||||
outb(reason, NMI_REASON_PORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static notrace __kprobes void
|
|
||||||
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
|
|
||||||
NOTIFY_STOP)
|
|
||||||
return;
|
|
||||||
#ifdef CONFIG_MCA
|
|
||||||
/*
|
|
||||||
* Might actually be able to figure out what the guilty party
|
|
||||||
* is:
|
|
||||||
*/
|
|
||||||
if (MCA_bus) {
|
|
||||||
mca_handle_nmi();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
|
|
||||||
reason, smp_processor_id());
|
|
||||||
|
|
||||||
pr_emerg("Do you have a strange power saving mode enabled?\n");
|
|
||||||
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
|
|
||||||
panic("NMI: Not continuing");
|
|
||||||
|
|
||||||
pr_emerg("Dazed and confused, but trying to continue\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
unsigned char reason = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* CPU-specific NMI must be processed before non-CPU-specific
|
|
||||||
* NMI, otherwise we may lose it, because the CPU-specific
|
|
||||||
* NMI can not be detected/processed on other CPUs.
|
|
||||||
*/
|
|
||||||
if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
|
|
||||||
raw_spin_lock(&nmi_reason_lock);
|
|
||||||
reason = get_nmi_reason();
|
|
||||||
|
|
||||||
if (reason & NMI_REASON_MASK) {
|
|
||||||
if (reason & NMI_REASON_SERR)
|
|
||||||
pci_serr_error(reason, regs);
|
|
||||||
else if (reason & NMI_REASON_IOCHK)
|
|
||||||
io_check_error(reason, regs);
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
/*
|
|
||||||
* Reassert NMI in case it became active
|
|
||||||
* meanwhile as it's edge-triggered:
|
|
||||||
*/
|
|
||||||
reassert_nmi();
|
|
||||||
#endif
|
|
||||||
raw_spin_unlock(&nmi_reason_lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
raw_spin_unlock(&nmi_reason_lock);
|
|
||||||
|
|
||||||
unknown_nmi_error(reason, regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
dotraplinkage notrace __kprobes void
|
|
||||||
do_nmi(struct pt_regs *regs, long error_code)
|
|
||||||
{
|
|
||||||
nmi_enter();
|
|
||||||
|
|
||||||
inc_irq_stat(__nmi_count);
|
|
||||||
|
|
||||||
if (!ignore_nmis)
|
|
||||||
default_do_nmi(regs);
|
|
||||||
|
|
||||||
nmi_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
void stop_nmi(void)
|
|
||||||
{
|
|
||||||
ignore_nmis++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void restart_nmi(void)
|
|
||||||
{
|
|
||||||
ignore_nmis--;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* May run on IST stack. */
|
/* May run on IST stack. */
|
||||||
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
|
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
|
||||||
{
|
{
|
||||||
|
|
|
@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int profile_exceptions_notify(struct notifier_block *self,
|
static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
|
||||||
unsigned long val, void *data)
|
|
||||||
{
|
{
|
||||||
struct die_args *args = (struct die_args *)data;
|
if (ctr_running)
|
||||||
int ret = NOTIFY_DONE;
|
model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
|
||||||
|
else if (!nmi_enabled)
|
||||||
switch (val) {
|
return NMI_DONE;
|
||||||
case DIE_NMI:
|
else
|
||||||
if (ctr_running)
|
model->stop(&__get_cpu_var(cpu_msrs));
|
||||||
model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
|
return NMI_HANDLED;
|
||||||
else if (!nmi_enabled)
|
|
||||||
break;
|
|
||||||
else
|
|
||||||
model->stop(&__get_cpu_var(cpu_msrs));
|
|
||||||
ret = NOTIFY_STOP;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nmi_cpu_save_registers(struct op_msrs *msrs)
|
static void nmi_cpu_save_registers(struct op_msrs *msrs)
|
||||||
|
@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy)
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block profile_exceptions_nb = {
|
|
||||||
.notifier_call = profile_exceptions_notify,
|
|
||||||
.next = NULL,
|
|
||||||
.priority = NMI_LOCAL_LOW_PRIOR,
|
|
||||||
};
|
|
||||||
|
|
||||||
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
|
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
|
||||||
{
|
{
|
||||||
struct op_msr *counters = msrs->counters;
|
struct op_msr *counters = msrs->counters;
|
||||||
|
@ -402,8 +385,6 @@ static void nmi_cpu_shutdown(void *dummy)
|
||||||
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
|
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
|
||||||
apic_write(APIC_LVTERR, v);
|
apic_write(APIC_LVTERR, v);
|
||||||
nmi_cpu_restore_registers(msrs);
|
nmi_cpu_restore_registers(msrs);
|
||||||
if (model->cpu_down)
|
|
||||||
model->cpu_down();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nmi_cpu_up(void *dummy)
|
static void nmi_cpu_up(void *dummy)
|
||||||
|
@ -508,7 +489,8 @@ static int nmi_setup(void)
|
||||||
ctr_running = 0;
|
ctr_running = 0;
|
||||||
/* make variables visible to the nmi handler: */
|
/* make variables visible to the nmi handler: */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
err = register_die_notifier(&profile_exceptions_nb);
|
err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
|
||||||
|
0, "oprofile");
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -538,7 +520,7 @@ static void nmi_shutdown(void)
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
/* make variables visible to the nmi handler: */
|
/* make variables visible to the nmi handler: */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
unregister_die_notifier(&profile_exceptions_nb);
|
unregister_nmi_handler(NMI_LOCAL, "oprofile");
|
||||||
msrs = &get_cpu_var(cpu_msrs);
|
msrs = &get_cpu_var(cpu_msrs);
|
||||||
model->shutdown(msrs);
|
model->shutdown(msrs);
|
||||||
free_msrs();
|
free_msrs();
|
||||||
|
|
|
@ -18,32 +18,16 @@
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
|
||||||
static int profile_timer_exceptions_notify(struct notifier_block *self,
|
static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
|
||||||
unsigned long val, void *data)
|
|
||||||
{
|
{
|
||||||
struct die_args *args = (struct die_args *)data;
|
oprofile_add_sample(regs, 0);
|
||||||
int ret = NOTIFY_DONE;
|
return NMI_HANDLED;
|
||||||
|
|
||||||
switch (val) {
|
|
||||||
case DIE_NMI:
|
|
||||||
oprofile_add_sample(args->regs, 0);
|
|
||||||
ret = NOTIFY_STOP;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block profile_timer_exceptions_nb = {
|
|
||||||
.notifier_call = profile_timer_exceptions_notify,
|
|
||||||
.next = NULL,
|
|
||||||
.priority = NMI_LOW_PRIOR,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int timer_start(void)
|
static int timer_start(void)
|
||||||
{
|
{
|
||||||
if (register_die_notifier(&profile_timer_exceptions_nb))
|
if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
|
||||||
|
0, "oprofile-timer"))
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -51,7 +35,7 @@ static int timer_start(void)
|
||||||
|
|
||||||
static void timer_stop(void)
|
static void timer_stop(void)
|
||||||
{
|
{
|
||||||
unregister_die_notifier(&profile_timer_exceptions_nb);
|
unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
|
||||||
synchronize_sched(); /* Allow already-started NMIs to complete. */
|
synchronize_sched(); /* Allow already-started NMIs to complete. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,6 @@
|
||||||
#include "op_x86_model.h"
|
#include "op_x86_model.h"
|
||||||
#include "op_counter.h"
|
#include "op_counter.h"
|
||||||
|
|
||||||
#define NUM_COUNTERS 4
|
|
||||||
#define NUM_COUNTERS_F15H 6
|
|
||||||
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
||||||
#define NUM_VIRT_COUNTERS 32
|
#define NUM_VIRT_COUNTERS 32
|
||||||
#else
|
#else
|
||||||
|
@ -69,35 +67,6 @@ struct ibs_state {
|
||||||
static struct ibs_config ibs_config;
|
static struct ibs_config ibs_config;
|
||||||
static struct ibs_state ibs_state;
|
static struct ibs_state ibs_state;
|
||||||
|
|
||||||
/*
|
|
||||||
* IBS cpuid feature detection
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define IBS_CPUID_FEATURES 0x8000001b
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
|
|
||||||
* bit 0 is used to indicate the existence of IBS.
|
|
||||||
*/
|
|
||||||
#define IBS_CAPS_AVAIL (1U<<0)
|
|
||||||
#define IBS_CAPS_FETCHSAM (1U<<1)
|
|
||||||
#define IBS_CAPS_OPSAM (1U<<2)
|
|
||||||
#define IBS_CAPS_RDWROPCNT (1U<<3)
|
|
||||||
#define IBS_CAPS_OPCNT (1U<<4)
|
|
||||||
#define IBS_CAPS_BRNTRGT (1U<<5)
|
|
||||||
#define IBS_CAPS_OPCNTEXT (1U<<6)
|
|
||||||
|
|
||||||
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
|
|
||||||
| IBS_CAPS_FETCHSAM \
|
|
||||||
| IBS_CAPS_OPSAM)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* IBS APIC setup
|
|
||||||
*/
|
|
||||||
#define IBSCTL 0x1cc
|
|
||||||
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
|
|
||||||
#define IBSCTL_LVT_OFFSET_MASK 0x0F
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IBS randomization macros
|
* IBS randomization macros
|
||||||
*/
|
*/
|
||||||
|
@ -105,27 +74,6 @@ static struct ibs_state ibs_state;
|
||||||
#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
|
#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
|
||||||
#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
|
#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
|
||||||
|
|
||||||
static u32 get_ibs_caps(void)
|
|
||||||
{
|
|
||||||
u32 ibs_caps;
|
|
||||||
unsigned int max_level;
|
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_IBS))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* check IBS cpuid feature flags */
|
|
||||||
max_level = cpuid_eax(0x80000000);
|
|
||||||
if (max_level < IBS_CPUID_FEATURES)
|
|
||||||
return IBS_CAPS_DEFAULT;
|
|
||||||
|
|
||||||
ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
|
|
||||||
if (!(ibs_caps & IBS_CAPS_AVAIL))
|
|
||||||
/* cpuid flags not valid */
|
|
||||||
return IBS_CAPS_DEFAULT;
|
|
||||||
|
|
||||||
return ibs_caps;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 16-bit Linear Feedback Shift Register (LFSR)
|
* 16-bit Linear Feedback Shift Register (LFSR)
|
||||||
*
|
*
|
||||||
|
@ -316,81 +264,6 @@ static void op_amd_stop_ibs(void)
|
||||||
wrmsrl(MSR_AMD64_IBSOPCTL, 0);
|
wrmsrl(MSR_AMD64_IBSOPCTL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int get_eilvt(int offset)
|
|
||||||
{
|
|
||||||
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int put_eilvt(int offset)
|
|
||||||
{
|
|
||||||
return !setup_APIC_eilvt(offset, 0, 0, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int ibs_eilvt_valid(void)
|
|
||||||
{
|
|
||||||
int offset;
|
|
||||||
u64 val;
|
|
||||||
int valid = 0;
|
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
|
|
||||||
rdmsrl(MSR_AMD64_IBSCTL, val);
|
|
||||||
offset = val & IBSCTL_LVT_OFFSET_MASK;
|
|
||||||
|
|
||||||
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
|
|
||||||
pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
|
|
||||||
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!get_eilvt(offset)) {
|
|
||||||
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
|
|
||||||
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
valid = 1;
|
|
||||||
out:
|
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
return valid;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int get_ibs_offset(void)
|
|
||||||
{
|
|
||||||
u64 val;
|
|
||||||
|
|
||||||
rdmsrl(MSR_AMD64_IBSCTL, val);
|
|
||||||
if (!(val & IBSCTL_LVT_OFFSET_VALID))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return val & IBSCTL_LVT_OFFSET_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void setup_APIC_ibs(void)
|
|
||||||
{
|
|
||||||
int offset;
|
|
||||||
|
|
||||||
offset = get_ibs_offset();
|
|
||||||
if (offset < 0)
|
|
||||||
goto failed;
|
|
||||||
|
|
||||||
if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
|
|
||||||
return;
|
|
||||||
failed:
|
|
||||||
pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n",
|
|
||||||
smp_processor_id());
|
|
||||||
}
|
|
||||||
|
|
||||||
static void clear_APIC_ibs(void)
|
|
||||||
{
|
|
||||||
int offset;
|
|
||||||
|
|
||||||
offset = get_ibs_offset();
|
|
||||||
if (offset >= 0)
|
|
||||||
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
||||||
|
|
||||||
static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
|
static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
|
||||||
|
@ -439,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* both registers must be reserved */
|
/* both registers must be reserved */
|
||||||
if (num_counters == NUM_COUNTERS_F15H) {
|
if (num_counters == AMD64_NUM_COUNTERS_F15H) {
|
||||||
msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
|
msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
|
||||||
msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
|
msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
|
||||||
} else {
|
} else {
|
||||||
|
@ -504,15 +377,6 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
|
||||||
val |= op_x86_get_ctrl(model, &counter_config[virt]);
|
val |= op_x86_get_ctrl(model, &counter_config[virt]);
|
||||||
wrmsrl(msrs->controls[i].addr, val);
|
wrmsrl(msrs->controls[i].addr, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ibs_caps)
|
|
||||||
setup_APIC_ibs();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void op_amd_cpu_shutdown(void)
|
|
||||||
{
|
|
||||||
if (ibs_caps)
|
|
||||||
clear_APIC_ibs();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int op_amd_check_ctrs(struct pt_regs * const regs,
|
static int op_amd_check_ctrs(struct pt_regs * const regs,
|
||||||
|
@ -575,86 +439,6 @@ static void op_amd_stop(struct op_msrs const * const msrs)
|
||||||
op_amd_stop_ibs();
|
op_amd_stop_ibs();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int setup_ibs_ctl(int ibs_eilvt_off)
|
|
||||||
{
|
|
||||||
struct pci_dev *cpu_cfg;
|
|
||||||
int nodes;
|
|
||||||
u32 value = 0;
|
|
||||||
|
|
||||||
nodes = 0;
|
|
||||||
cpu_cfg = NULL;
|
|
||||||
do {
|
|
||||||
cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
|
|
||||||
PCI_DEVICE_ID_AMD_10H_NB_MISC,
|
|
||||||
cpu_cfg);
|
|
||||||
if (!cpu_cfg)
|
|
||||||
break;
|
|
||||||
++nodes;
|
|
||||||
pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
|
|
||||||
| IBSCTL_LVT_OFFSET_VALID);
|
|
||||||
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
|
|
||||||
if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
|
|
||||||
pci_dev_put(cpu_cfg);
|
|
||||||
printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
|
|
||||||
"IBSCTL = 0x%08x\n", value);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
} while (1);
|
|
||||||
|
|
||||||
if (!nodes) {
|
|
||||||
printk(KERN_DEBUG "No CPU node configured for IBS\n");
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This runs only on the current cpu. We try to find an LVT offset and
|
|
||||||
* setup the local APIC. For this we must disable preemption. On
|
|
||||||
* success we initialize all nodes with this offset. This updates then
|
|
||||||
* the offset in the IBS_CTL per-node msr. The per-core APIC setup of
|
|
||||||
* the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
|
|
||||||
* amd_cpu_shutdown() using the new offset.
|
|
||||||
*/
|
|
||||||
static int force_ibs_eilvt_setup(void)
|
|
||||||
{
|
|
||||||
int offset;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
/* find the next free available EILVT entry, skip offset 0 */
|
|
||||||
for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
|
|
||||||
if (get_eilvt(offset))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
if (offset == APIC_EILVT_NR_MAX) {
|
|
||||||
printk(KERN_DEBUG "No EILVT entry available\n");
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = setup_ibs_ctl(offset);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (!ibs_eilvt_valid()) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
|
|
||||||
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
out:
|
|
||||||
preempt_disable();
|
|
||||||
put_eilvt(offset);
|
|
||||||
preempt_enable();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check and reserve APIC extended interrupt LVT offset for IBS if
|
* check and reserve APIC extended interrupt LVT offset for IBS if
|
||||||
* available
|
* available
|
||||||
|
@ -667,17 +451,6 @@ static void init_ibs(void)
|
||||||
if (!ibs_caps)
|
if (!ibs_caps)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (ibs_eilvt_valid())
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (!force_ibs_eilvt_setup())
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* Failed to setup ibs */
|
|
||||||
ibs_caps = 0;
|
|
||||||
return;
|
|
||||||
|
|
||||||
out:
|
|
||||||
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
|
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -741,9 +514,9 @@ static int op_amd_init(struct oprofile_operations *ops)
|
||||||
ops->create_files = setup_ibs_files;
|
ops->create_files = setup_ibs_files;
|
||||||
|
|
||||||
if (boot_cpu_data.x86 == 0x15) {
|
if (boot_cpu_data.x86 == 0x15) {
|
||||||
num_counters = NUM_COUNTERS_F15H;
|
num_counters = AMD64_NUM_COUNTERS_F15H;
|
||||||
} else {
|
} else {
|
||||||
num_counters = NUM_COUNTERS;
|
num_counters = AMD64_NUM_COUNTERS;
|
||||||
}
|
}
|
||||||
|
|
||||||
op_amd_spec.num_counters = num_counters;
|
op_amd_spec.num_counters = num_counters;
|
||||||
|
@ -760,7 +533,6 @@ struct op_x86_model_spec op_amd_spec = {
|
||||||
.init = op_amd_init,
|
.init = op_amd_init,
|
||||||
.fill_in_addresses = &op_amd_fill_in_addresses,
|
.fill_in_addresses = &op_amd_fill_in_addresses,
|
||||||
.setup_ctrs = &op_amd_setup_ctrs,
|
.setup_ctrs = &op_amd_setup_ctrs,
|
||||||
.cpu_down = &op_amd_cpu_shutdown,
|
|
||||||
.check_ctrs = &op_amd_check_ctrs,
|
.check_ctrs = &op_amd_check_ctrs,
|
||||||
.start = &op_amd_start,
|
.start = &op_amd_start,
|
||||||
.stop = &op_amd_stop,
|
.stop = &op_amd_stop,
|
||||||
|
|
|
@ -28,7 +28,7 @@ static int counter_width = 32;
|
||||||
|
|
||||||
#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
|
#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
|
||||||
|
|
||||||
static u64 *reset_value;
|
static u64 reset_value[OP_MAX_COUNTER];
|
||||||
|
|
||||||
static void ppro_shutdown(struct op_msrs const * const msrs)
|
static void ppro_shutdown(struct op_msrs const * const msrs)
|
||||||
{
|
{
|
||||||
|
@ -40,10 +40,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
|
||||||
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
|
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
|
||||||
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
|
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
|
||||||
}
|
}
|
||||||
if (reset_value) {
|
|
||||||
kfree(reset_value);
|
|
||||||
reset_value = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ppro_fill_in_addresses(struct op_msrs * const msrs)
|
static int ppro_fill_in_addresses(struct op_msrs * const msrs)
|
||||||
|
@ -79,13 +75,6 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
|
||||||
u64 val;
|
u64 val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!reset_value) {
|
|
||||||
reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
|
|
||||||
GFP_ATOMIC);
|
|
||||||
if (!reset_value)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpu_has_arch_perfmon) {
|
if (cpu_has_arch_perfmon) {
|
||||||
union cpuid10_eax eax;
|
union cpuid10_eax eax;
|
||||||
eax.full = cpuid_eax(0xa);
|
eax.full = cpuid_eax(0xa);
|
||||||
|
@ -141,13 +130,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
|
||||||
u64 val;
|
u64 val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/*
|
|
||||||
* This can happen if perf counters are in use when
|
|
||||||
* we steal the die notifier NMI.
|
|
||||||
*/
|
|
||||||
if (unlikely(!reset_value))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
for (i = 0; i < num_counters; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if (!reset_value[i])
|
if (!reset_value[i])
|
||||||
continue;
|
continue;
|
||||||
|
@ -158,7 +140,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
|
||||||
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
|
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
/* Only P6 based Pentium M need to re-unmask the apic vector but it
|
/* Only P6 based Pentium M need to re-unmask the apic vector but it
|
||||||
* doesn't hurt other P6 variant */
|
* doesn't hurt other P6 variant */
|
||||||
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
|
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
|
||||||
|
@ -179,8 +160,6 @@ static void ppro_start(struct op_msrs const * const msrs)
|
||||||
u64 val;
|
u64 val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!reset_value)
|
|
||||||
return;
|
|
||||||
for (i = 0; i < num_counters; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if (reset_value[i]) {
|
if (reset_value[i]) {
|
||||||
rdmsrl(msrs->controls[i].addr, val);
|
rdmsrl(msrs->controls[i].addr, val);
|
||||||
|
@ -196,8 +175,6 @@ static void ppro_stop(struct op_msrs const * const msrs)
|
||||||
u64 val;
|
u64 val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!reset_value)
|
|
||||||
return;
|
|
||||||
for (i = 0; i < num_counters; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if (!reset_value[i])
|
if (!reset_value[i])
|
||||||
continue;
|
continue;
|
||||||
|
@ -242,7 +219,7 @@ static void arch_perfmon_setup_counters(void)
|
||||||
eax.split.bit_width = 40;
|
eax.split.bit_width = 40;
|
||||||
}
|
}
|
||||||
|
|
||||||
num_counters = eax.split.num_counters;
|
num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
|
||||||
|
|
||||||
op_arch_perfmon_spec.num_counters = num_counters;
|
op_arch_perfmon_spec.num_counters = num_counters;
|
||||||
op_arch_perfmon_spec.num_controls = num_counters;
|
op_arch_perfmon_spec.num_controls = num_counters;
|
||||||
|
|
|
@ -43,7 +43,6 @@ struct op_x86_model_spec {
|
||||||
int (*fill_in_addresses)(struct op_msrs * const msrs);
|
int (*fill_in_addresses)(struct op_msrs * const msrs);
|
||||||
void (*setup_ctrs)(struct op_x86_model_spec const *model,
|
void (*setup_ctrs)(struct op_x86_model_spec const *model,
|
||||||
struct op_msrs const * const msrs);
|
struct op_msrs const * const msrs);
|
||||||
void (*cpu_down)(void);
|
|
||||||
int (*check_ctrs)(struct pt_regs * const regs,
|
int (*check_ctrs)(struct pt_regs * const regs,
|
||||||
struct op_msrs const * const msrs);
|
struct op_msrs const * const msrs);
|
||||||
void (*start)(struct op_msrs const * const msrs);
|
void (*start)(struct op_msrs const * const msrs);
|
||||||
|
|
|
@ -50,6 +50,7 @@
|
||||||
#include <acpi/hed.h>
|
#include <acpi/hed.h>
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
#include "apei-internal.h"
|
#include "apei-internal.h"
|
||||||
|
|
||||||
|
@ -749,15 +750,11 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ghes_notify_nmi(struct notifier_block *this,
|
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
||||||
unsigned long cmd, void *data)
|
|
||||||
{
|
{
|
||||||
struct ghes *ghes, *ghes_global = NULL;
|
struct ghes *ghes, *ghes_global = NULL;
|
||||||
int sev, sev_global = -1;
|
int sev, sev_global = -1;
|
||||||
int ret = NOTIFY_DONE;
|
int ret = NMI_DONE;
|
||||||
|
|
||||||
if (cmd != DIE_NMI)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
raw_spin_lock(&ghes_nmi_lock);
|
raw_spin_lock(&ghes_nmi_lock);
|
||||||
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
|
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
|
||||||
|
@ -770,10 +767,10 @@ static int ghes_notify_nmi(struct notifier_block *this,
|
||||||
sev_global = sev;
|
sev_global = sev;
|
||||||
ghes_global = ghes;
|
ghes_global = ghes;
|
||||||
}
|
}
|
||||||
ret = NOTIFY_STOP;
|
ret = NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == NOTIFY_DONE)
|
if (ret == NMI_DONE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (sev_global >= GHES_SEV_PANIC) {
|
if (sev_global >= GHES_SEV_PANIC) {
|
||||||
|
@ -825,10 +822,6 @@ static struct notifier_block ghes_notifier_sci = {
|
||||||
.notifier_call = ghes_notify_sci,
|
.notifier_call = ghes_notify_sci,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct notifier_block ghes_notifier_nmi = {
|
|
||||||
.notifier_call = ghes_notify_nmi,
|
|
||||||
};
|
|
||||||
|
|
||||||
static unsigned long ghes_esource_prealloc_size(
|
static unsigned long ghes_esource_prealloc_size(
|
||||||
const struct acpi_hest_generic *generic)
|
const struct acpi_hest_generic *generic)
|
||||||
{
|
{
|
||||||
|
@ -918,7 +911,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
|
||||||
ghes_estatus_pool_expand(len);
|
ghes_estatus_pool_expand(len);
|
||||||
mutex_lock(&ghes_list_mutex);
|
mutex_lock(&ghes_list_mutex);
|
||||||
if (list_empty(&ghes_nmi))
|
if (list_empty(&ghes_nmi))
|
||||||
register_die_notifier(&ghes_notifier_nmi);
|
register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
|
||||||
|
"ghes");
|
||||||
list_add_rcu(&ghes->list, &ghes_nmi);
|
list_add_rcu(&ghes->list, &ghes_nmi);
|
||||||
mutex_unlock(&ghes_list_mutex);
|
mutex_unlock(&ghes_list_mutex);
|
||||||
break;
|
break;
|
||||||
|
@ -964,7 +958,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
|
||||||
mutex_lock(&ghes_list_mutex);
|
mutex_lock(&ghes_list_mutex);
|
||||||
list_del_rcu(&ghes->list);
|
list_del_rcu(&ghes->list);
|
||||||
if (list_empty(&ghes_nmi))
|
if (list_empty(&ghes_nmi))
|
||||||
unregister_die_notifier(&ghes_notifier_nmi);
|
unregister_nmi_handler(NMI_LOCAL, "ghes");
|
||||||
mutex_unlock(&ghes_list_mutex);
|
mutex_unlock(&ghes_list_mutex);
|
||||||
/*
|
/*
|
||||||
* To synchronize with NMI handler, ghes can only be
|
* To synchronize with NMI handler, ghes can only be
|
||||||
|
|
|
@ -65,6 +65,7 @@
|
||||||
* mechanism for it at that time.
|
* mechanism for it at that time.
|
||||||
*/
|
*/
|
||||||
#include <asm/kdebug.h>
|
#include <asm/kdebug.h>
|
||||||
|
#include <asm/nmi.h>
|
||||||
#define HAVE_DIE_NMI
|
#define HAVE_DIE_NMI
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1077,17 +1078,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
|
||||||
|
|
||||||
#ifdef HAVE_DIE_NMI
|
#ifdef HAVE_DIE_NMI
|
||||||
static int
|
static int
|
||||||
ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
|
ipmi_nmi(unsigned int val, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct die_args *args = data;
|
|
||||||
|
|
||||||
if (val != DIE_NMIUNKNOWN)
|
|
||||||
return NOTIFY_OK;
|
|
||||||
|
|
||||||
/* Hack, if it's a memory or I/O error, ignore it. */
|
|
||||||
if (args->err & 0xc0)
|
|
||||||
return NOTIFY_OK;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we get here, it's an NMI that's not a memory or I/O
|
* If we get here, it's an NMI that's not a memory or I/O
|
||||||
* error. We can't truly tell if it's from IPMI or not
|
* error. We can't truly tell if it's from IPMI or not
|
||||||
|
@ -1097,15 +1089,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
|
||||||
|
|
||||||
if (testing_nmi) {
|
if (testing_nmi) {
|
||||||
testing_nmi = 2;
|
testing_nmi = 2;
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we are not expecting a timeout, ignore it. */
|
/* If we are not expecting a timeout, ignore it. */
|
||||||
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
|
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
|
||||||
return NOTIFY_OK;
|
return NMI_DONE;
|
||||||
|
|
||||||
if (preaction_val != WDOG_PRETIMEOUT_NMI)
|
if (preaction_val != WDOG_PRETIMEOUT_NMI)
|
||||||
return NOTIFY_OK;
|
return NMI_DONE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If no one else handled the NMI, we assume it was the IPMI
|
* If no one else handled the NMI, we assume it was the IPMI
|
||||||
|
@ -1120,12 +1112,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
|
||||||
panic(PFX "pre-timeout");
|
panic(PFX "pre-timeout");
|
||||||
}
|
}
|
||||||
|
|
||||||
return NOTIFY_STOP;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block ipmi_nmi_handler = {
|
|
||||||
.notifier_call = ipmi_nmi
|
|
||||||
};
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int wdog_reboot_handler(struct notifier_block *this,
|
static int wdog_reboot_handler(struct notifier_block *this,
|
||||||
|
@ -1290,7 +1278,8 @@ static void check_parms(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (do_nmi && !nmi_handler_registered) {
|
if (do_nmi && !nmi_handler_registered) {
|
||||||
rv = register_die_notifier(&ipmi_nmi_handler);
|
rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
|
||||||
|
"ipmi");
|
||||||
if (rv) {
|
if (rv) {
|
||||||
printk(KERN_WARNING PFX
|
printk(KERN_WARNING PFX
|
||||||
"Can't register nmi handler\n");
|
"Can't register nmi handler\n");
|
||||||
|
@ -1298,7 +1287,7 @@ static void check_parms(void)
|
||||||
} else
|
} else
|
||||||
nmi_handler_registered = 1;
|
nmi_handler_registered = 1;
|
||||||
} else if (!do_nmi && nmi_handler_registered) {
|
} else if (!do_nmi && nmi_handler_registered) {
|
||||||
unregister_die_notifier(&ipmi_nmi_handler);
|
unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
|
||||||
nmi_handler_registered = 0;
|
nmi_handler_registered = 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1336,7 +1325,7 @@ static int __init ipmi_wdog_init(void)
|
||||||
if (rv) {
|
if (rv) {
|
||||||
#ifdef HAVE_DIE_NMI
|
#ifdef HAVE_DIE_NMI
|
||||||
if (nmi_handler_registered)
|
if (nmi_handler_registered)
|
||||||
unregister_die_notifier(&ipmi_nmi_handler);
|
unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
|
||||||
#endif
|
#endif
|
||||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||||
&wdog_panic_notifier);
|
&wdog_panic_notifier);
|
||||||
|
@ -1357,7 +1346,7 @@ static void __exit ipmi_wdog_exit(void)
|
||||||
|
|
||||||
#ifdef HAVE_DIE_NMI
|
#ifdef HAVE_DIE_NMI
|
||||||
if (nmi_handler_registered)
|
if (nmi_handler_registered)
|
||||||
unregister_die_notifier(&ipmi_nmi_handler);
|
unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#endif /* CONFIG_HPWDT_NMI_DECODING */
|
#endif /* CONFIG_HPWDT_NMI_DECODING */
|
||||||
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
#define HPWDT_VERSION "1.3.0"
|
#define HPWDT_VERSION "1.3.0"
|
||||||
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
|
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
|
||||||
|
@ -477,15 +478,11 @@ static int hpwdt_time_left(void)
|
||||||
/*
|
/*
|
||||||
* NMI Handler
|
* NMI Handler
|
||||||
*/
|
*/
|
||||||
static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
|
static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
|
||||||
void *data)
|
|
||||||
{
|
{
|
||||||
unsigned long rom_pl;
|
unsigned long rom_pl;
|
||||||
static int die_nmi_called;
|
static int die_nmi_called;
|
||||||
|
|
||||||
if (ulReason != DIE_NMIUNKNOWN)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (!hpwdt_nmi_decoding)
|
if (!hpwdt_nmi_decoding)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -508,7 +505,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
|
||||||
"Management Log for details.\n");
|
"Management Log for details.\n");
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return NOTIFY_OK;
|
return NMI_DONE;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_HPWDT_NMI_DECODING */
|
#endif /* CONFIG_HPWDT_NMI_DECODING */
|
||||||
|
|
||||||
|
@ -648,13 +645,6 @@ static struct miscdevice hpwdt_miscdev = {
|
||||||
.fops = &hpwdt_fops,
|
.fops = &hpwdt_fops,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_HPWDT_NMI_DECODING
|
|
||||||
static struct notifier_block die_notifier = {
|
|
||||||
.notifier_call = hpwdt_pretimeout,
|
|
||||||
.priority = 0,
|
|
||||||
};
|
|
||||||
#endif /* CONFIG_HPWDT_NMI_DECODING */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Init & Exit
|
* Init & Exit
|
||||||
*/
|
*/
|
||||||
|
@ -740,10 +730,9 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
|
||||||
* die notify list to handle a critical NMI. The default is to
|
* die notify list to handle a critical NMI. The default is to
|
||||||
* be last so other users of the NMI signal can function.
|
* be last so other users of the NMI signal can function.
|
||||||
*/
|
*/
|
||||||
if (priority)
|
retval = register_nmi_handler(NMI_UNKNOWN, hpwdt_pretimeout,
|
||||||
die_notifier.priority = 0x7FFFFFFF;
|
(priority) ? NMI_FLAG_FIRST : 0,
|
||||||
|
"hpwdt");
|
||||||
retval = register_die_notifier(&die_notifier);
|
|
||||||
if (retval != 0) {
|
if (retval != 0) {
|
||||||
dev_warn(&dev->dev,
|
dev_warn(&dev->dev,
|
||||||
"Unable to register a die notifier (err=%d).\n",
|
"Unable to register a die notifier (err=%d).\n",
|
||||||
|
@ -763,7 +752,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
|
||||||
|
|
||||||
static void hpwdt_exit_nmi_decoding(void)
|
static void hpwdt_exit_nmi_decoding(void)
|
||||||
{
|
{
|
||||||
unregister_die_notifier(&die_notifier);
|
unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
|
||||||
if (cru_rom_addr)
|
if (cru_rom_addr)
|
||||||
iounmap(cru_rom_addr);
|
iounmap(cru_rom_addr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -580,9 +580,6 @@ int unregister_module_notifier(struct notifier_block * nb);
|
||||||
|
|
||||||
extern void print_modules(void);
|
extern void print_modules(void);
|
||||||
|
|
||||||
extern void module_update_tracepoints(void);
|
|
||||||
extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
|
|
||||||
|
|
||||||
#else /* !CONFIG_MODULES... */
|
#else /* !CONFIG_MODULES... */
|
||||||
#define EXPORT_SYMBOL(sym)
|
#define EXPORT_SYMBOL(sym)
|
||||||
#define EXPORT_SYMBOL_GPL(sym)
|
#define EXPORT_SYMBOL_GPL(sym)
|
||||||
|
@ -698,15 +695,6 @@ static inline int unregister_module_notifier(struct notifier_block * nb)
|
||||||
static inline void print_modules(void)
|
static inline void print_modules(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void module_update_tracepoints(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MODULES */
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
|
|
|
@ -220,7 +220,10 @@ struct perf_event_attr {
|
||||||
mmap_data : 1, /* non-exec mmap data */
|
mmap_data : 1, /* non-exec mmap data */
|
||||||
sample_id_all : 1, /* sample_type all events */
|
sample_id_all : 1, /* sample_type all events */
|
||||||
|
|
||||||
__reserved_1 : 45;
|
exclude_host : 1, /* don't count in host */
|
||||||
|
exclude_guest : 1, /* don't count in guest */
|
||||||
|
|
||||||
|
__reserved_1 : 43;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
__u32 wakeup_events; /* wakeup every n events */
|
__u32 wakeup_events; /* wakeup every n events */
|
||||||
|
|
|
@ -154,6 +154,8 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
|
||||||
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
|
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
|
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
|
|
||||||
|
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
|
||||||
|
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
|
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
|
||||||
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
|
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
|
||||||
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
|
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
|
|
|
@ -15,5 +15,6 @@
|
||||||
extern u64 notrace trace_clock_local(void);
|
extern u64 notrace trace_clock_local(void);
|
||||||
extern u64 notrace trace_clock(void);
|
extern u64 notrace trace_clock(void);
|
||||||
extern u64 notrace trace_clock_global(void);
|
extern u64 notrace trace_clock_global(void);
|
||||||
|
extern u64 notrace trace_clock_counter(void);
|
||||||
|
|
||||||
#endif /* _LINUX_TRACE_CLOCK_H */
|
#endif /* _LINUX_TRACE_CLOCK_H */
|
||||||
|
|
|
@ -54,8 +54,18 @@ extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
|
||||||
void *data);
|
void *data);
|
||||||
extern void tracepoint_probe_update_all(void);
|
extern void tracepoint_probe_update_all(void);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MODULES
|
||||||
|
struct tp_module {
|
||||||
|
struct list_head list;
|
||||||
|
unsigned int num_tracepoints;
|
||||||
|
struct tracepoint * const *tracepoints_ptrs;
|
||||||
|
};
|
||||||
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
struct tracepoint_iter {
|
struct tracepoint_iter {
|
||||||
struct module *module;
|
#ifdef CONFIG_MODULES
|
||||||
|
struct tp_module *module;
|
||||||
|
#endif /* CONFIG_MODULES */
|
||||||
struct tracepoint * const *tracepoint;
|
struct tracepoint * const *tracepoint;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -63,8 +73,6 @@ extern void tracepoint_iter_start(struct tracepoint_iter *iter);
|
||||||
extern void tracepoint_iter_next(struct tracepoint_iter *iter);
|
extern void tracepoint_iter_next(struct tracepoint_iter *iter);
|
||||||
extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
|
extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
|
||||||
extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
|
extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
|
||||||
extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
|
|
||||||
struct tracepoint * const *begin, struct tracepoint * const *end);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tracepoint_synchronize_unregister must be called between the last tracepoint
|
* tracepoint_synchronize_unregister must be called between the last tracepoint
|
||||||
|
@ -78,17 +86,6 @@ static inline void tracepoint_synchronize_unregister(void)
|
||||||
|
|
||||||
#define PARAMS(args...) args
|
#define PARAMS(args...) args
|
||||||
|
|
||||||
#ifdef CONFIG_TRACEPOINTS
|
|
||||||
extern
|
|
||||||
void tracepoint_update_probe_range(struct tracepoint * const *begin,
|
|
||||||
struct tracepoint * const *end);
|
|
||||||
#else
|
|
||||||
static inline
|
|
||||||
void tracepoint_update_probe_range(struct tracepoint * const *begin,
|
|
||||||
struct tracepoint * const *end)
|
|
||||||
{ }
|
|
||||||
#endif /* CONFIG_TRACEPOINTS */
|
|
||||||
|
|
||||||
#endif /* _LINUX_TRACEPOINT_H */
|
#endif /* _LINUX_TRACEPOINT_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -711,6 +711,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
|
||||||
#undef __perf_count
|
#undef __perf_count
|
||||||
#define __perf_count(c) __count = (c)
|
#define __perf_count(c) __count = (c)
|
||||||
|
|
||||||
|
#undef TP_perf_assign
|
||||||
|
#define TP_perf_assign(args...) args
|
||||||
|
|
||||||
#undef DECLARE_EVENT_CLASS
|
#undef DECLARE_EVENT_CLASS
|
||||||
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
||||||
static notrace void \
|
static notrace void \
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/suspend.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/anon_inodes.h>
|
#include <linux/anon_inodes.h>
|
||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
|
@ -5758,6 +5759,7 @@ struct pmu *perf_init_event(struct perf_event *event)
|
||||||
pmu = idr_find(&pmu_idr, event->attr.type);
|
pmu = idr_find(&pmu_idr, event->attr.type);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (pmu) {
|
if (pmu) {
|
||||||
|
event->pmu = pmu;
|
||||||
ret = pmu->event_init(event);
|
ret = pmu->event_init(event);
|
||||||
if (ret)
|
if (ret)
|
||||||
pmu = ERR_PTR(ret);
|
pmu = ERR_PTR(ret);
|
||||||
|
@ -5765,6 +5767,7 @@ struct pmu *perf_init_event(struct perf_event *event)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||||
|
event->pmu = pmu;
|
||||||
ret = pmu->event_init(event);
|
ret = pmu->event_init(event);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -5891,8 +5894,6 @@ done:
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
event->pmu = pmu;
|
|
||||||
|
|
||||||
if (!event->parent) {
|
if (!event->parent) {
|
||||||
if (event->attach_state & PERF_ATTACH_TASK)
|
if (event->attach_state & PERF_ATTACH_TASK)
|
||||||
jump_label_inc(&perf_sched_events);
|
jump_label_inc(&perf_sched_events);
|
||||||
|
@ -6852,7 +6853,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
|
||||||
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
||||||
|
|
||||||
mutex_lock(&swhash->hlist_mutex);
|
mutex_lock(&swhash->hlist_mutex);
|
||||||
if (swhash->hlist_refcount > 0) {
|
if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
|
||||||
struct swevent_hlist *hlist;
|
struct swevent_hlist *hlist;
|
||||||
|
|
||||||
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
|
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
|
||||||
|
@ -6941,7 +6942,14 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
unsigned int cpu = (long)hcpu;
|
unsigned int cpu = (long)hcpu;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
/*
|
||||||
|
* Ignore suspend/resume action, the perf_pm_notifier will
|
||||||
|
* take care of that.
|
||||||
|
*/
|
||||||
|
if (action & CPU_TASKS_FROZEN)
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
switch (action) {
|
||||||
|
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
case CPU_DOWN_FAILED:
|
case CPU_DOWN_FAILED:
|
||||||
|
@ -6960,6 +6968,90 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void perf_pm_resume_cpu(void *unused)
|
||||||
|
{
|
||||||
|
struct perf_cpu_context *cpuctx;
|
||||||
|
struct perf_event_context *ctx;
|
||||||
|
struct pmu *pmu;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
idx = srcu_read_lock(&pmus_srcu);
|
||||||
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||||
|
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||||
|
ctx = cpuctx->task_ctx;
|
||||||
|
|
||||||
|
perf_ctx_lock(cpuctx, ctx);
|
||||||
|
perf_pmu_disable(cpuctx->ctx.pmu);
|
||||||
|
|
||||||
|
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
|
||||||
|
if (ctx)
|
||||||
|
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
|
||||||
|
|
||||||
|
perf_pmu_enable(cpuctx->ctx.pmu);
|
||||||
|
perf_ctx_unlock(cpuctx, ctx);
|
||||||
|
}
|
||||||
|
srcu_read_unlock(&pmus_srcu, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void perf_pm_suspend_cpu(void *unused)
|
||||||
|
{
|
||||||
|
struct perf_cpu_context *cpuctx;
|
||||||
|
struct perf_event_context *ctx;
|
||||||
|
struct pmu *pmu;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
idx = srcu_read_lock(&pmus_srcu);
|
||||||
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||||
|
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||||
|
ctx = cpuctx->task_ctx;
|
||||||
|
|
||||||
|
perf_ctx_lock(cpuctx, ctx);
|
||||||
|
perf_pmu_disable(cpuctx->ctx.pmu);
|
||||||
|
|
||||||
|
perf_event_sched_in(cpuctx, ctx, current);
|
||||||
|
|
||||||
|
perf_pmu_enable(cpuctx->ctx.pmu);
|
||||||
|
perf_ctx_unlock(cpuctx, ctx);
|
||||||
|
}
|
||||||
|
srcu_read_unlock(&pmus_srcu, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int perf_resume(void)
|
||||||
|
{
|
||||||
|
get_online_cpus();
|
||||||
|
smp_call_function(perf_pm_resume_cpu, NULL, 1);
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int perf_suspend(void)
|
||||||
|
{
|
||||||
|
get_online_cpus();
|
||||||
|
smp_call_function(perf_pm_suspend_cpu, NULL, 1);
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
|
||||||
|
{
|
||||||
|
switch (action) {
|
||||||
|
case PM_POST_HIBERNATION:
|
||||||
|
case PM_POST_SUSPEND:
|
||||||
|
return perf_resume();
|
||||||
|
case PM_HIBERNATION_PREPARE:
|
||||||
|
case PM_SUSPEND_PREPARE:
|
||||||
|
return perf_suspend();
|
||||||
|
default:
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block perf_pm_notifier = {
|
||||||
|
.notifier_call = perf_pm,
|
||||||
|
};
|
||||||
|
|
||||||
void __init perf_event_init(void)
|
void __init perf_event_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -6974,6 +7066,7 @@ void __init perf_event_init(void)
|
||||||
perf_tp_register();
|
perf_tp_register();
|
||||||
perf_cpu_notifier(perf_cpu_notify);
|
perf_cpu_notifier(perf_cpu_notify);
|
||||||
register_reboot_notifier(&perf_reboot_notifier);
|
register_reboot_notifier(&perf_reboot_notifier);
|
||||||
|
register_pm_notifier(&perf_pm_notifier);
|
||||||
|
|
||||||
ret = init_hw_breakpoint();
|
ret = init_hw_breakpoint();
|
||||||
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|
||||||
|
|
|
@ -3487,50 +3487,3 @@ void module_layout(struct module *mod,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(module_layout);
|
EXPORT_SYMBOL(module_layout);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_TRACEPOINTS
|
|
||||||
void module_update_tracepoints(void)
|
|
||||||
{
|
|
||||||
struct module *mod;
|
|
||||||
|
|
||||||
mutex_lock(&module_mutex);
|
|
||||||
list_for_each_entry(mod, &modules, list)
|
|
||||||
if (!mod->taints)
|
|
||||||
tracepoint_update_probe_range(mod->tracepoints_ptrs,
|
|
||||||
mod->tracepoints_ptrs + mod->num_tracepoints);
|
|
||||||
mutex_unlock(&module_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Returns 0 if current not found.
|
|
||||||
* Returns 1 if current found.
|
|
||||||
*/
|
|
||||||
int module_get_iter_tracepoints(struct tracepoint_iter *iter)
|
|
||||||
{
|
|
||||||
struct module *iter_mod;
|
|
||||||
int found = 0;
|
|
||||||
|
|
||||||
mutex_lock(&module_mutex);
|
|
||||||
list_for_each_entry(iter_mod, &modules, list) {
|
|
||||||
if (!iter_mod->taints) {
|
|
||||||
/*
|
|
||||||
* Sorted module list
|
|
||||||
*/
|
|
||||||
if (iter_mod < iter->module)
|
|
||||||
continue;
|
|
||||||
else if (iter_mod > iter->module)
|
|
||||||
iter->tracepoint = NULL;
|
|
||||||
found = tracepoint_get_iter_range(&iter->tracepoint,
|
|
||||||
iter_mod->tracepoints_ptrs,
|
|
||||||
iter_mod->tracepoints_ptrs
|
|
||||||
+ iter_mod->num_tracepoints);
|
|
||||||
if (found) {
|
|
||||||
iter->module = iter_mod;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mutex_unlock(&module_mutex);
|
|
||||||
return found;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -15,6 +15,8 @@ ifdef CONFIG_TRACING_BRANCHES
|
||||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
CFLAGS_trace_events_filter.o := -I$(src)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Make the trace clocks available generally: it's infrastructure
|
# Make the trace clocks available generally: it's infrastructure
|
||||||
# relied on by ptrace for example:
|
# relied on by ptrace for example:
|
||||||
|
|
|
@ -3862,6 +3862,14 @@ void ftrace_kill(void)
|
||||||
clear_ftrace_function();
|
clear_ftrace_function();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test if ftrace is dead or not.
|
||||||
|
*/
|
||||||
|
int ftrace_is_dead(void)
|
||||||
|
{
|
||||||
|
return ftrace_disabled;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* register_ftrace_function - register a function for profiling
|
* register_ftrace_function - register a function for profiling
|
||||||
* @ops - ops structure that holds the function for profiling.
|
* @ops - ops structure that holds the function for profiling.
|
||||||
|
|
|
@ -488,12 +488,14 @@ struct ring_buffer_per_cpu {
|
||||||
struct buffer_page *reader_page;
|
struct buffer_page *reader_page;
|
||||||
unsigned long lost_events;
|
unsigned long lost_events;
|
||||||
unsigned long last_overrun;
|
unsigned long last_overrun;
|
||||||
|
local_t entries_bytes;
|
||||||
local_t commit_overrun;
|
local_t commit_overrun;
|
||||||
local_t overrun;
|
local_t overrun;
|
||||||
local_t entries;
|
local_t entries;
|
||||||
local_t committing;
|
local_t committing;
|
||||||
local_t commits;
|
local_t commits;
|
||||||
unsigned long read;
|
unsigned long read;
|
||||||
|
unsigned long read_bytes;
|
||||||
u64 write_stamp;
|
u64 write_stamp;
|
||||||
u64 read_stamp;
|
u64 read_stamp;
|
||||||
};
|
};
|
||||||
|
@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
* the counters.
|
* the counters.
|
||||||
*/
|
*/
|
||||||
local_add(entries, &cpu_buffer->overrun);
|
local_add(entries, &cpu_buffer->overrun);
|
||||||
|
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The entries will be zeroed out when we move the
|
* The entries will be zeroed out when we move the
|
||||||
|
@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
event = __rb_page_index(tail_page, tail);
|
event = __rb_page_index(tail_page, tail);
|
||||||
kmemcheck_annotate_bitfield(event, bitfield);
|
kmemcheck_annotate_bitfield(event, bitfield);
|
||||||
|
|
||||||
|
/* account for padding bytes */
|
||||||
|
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save the original length to the meta data.
|
* Save the original length to the meta data.
|
||||||
* This will be used by the reader to add lost event
|
* This will be used by the reader to add lost event
|
||||||
|
@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
if (!tail)
|
if (!tail)
|
||||||
tail_page->page->time_stamp = ts;
|
tail_page->page->time_stamp = ts;
|
||||||
|
|
||||||
|
/* account for these added bytes */
|
||||||
|
local_add(length, &cpu_buffer->entries_bytes);
|
||||||
|
|
||||||
return event;
|
return event;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
|
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
|
||||||
unsigned long write_mask =
|
unsigned long write_mask =
|
||||||
local_read(&bpage->write) & ~RB_WRITE_MASK;
|
local_read(&bpage->write) & ~RB_WRITE_MASK;
|
||||||
|
unsigned long event_length = rb_event_length(event);
|
||||||
/*
|
/*
|
||||||
* This is on the tail page. It is possible that
|
* This is on the tail page. It is possible that
|
||||||
* a write could come in and move the tail page
|
* a write could come in and move the tail page
|
||||||
|
@ -2085,8 +2095,11 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
old_index += write_mask;
|
old_index += write_mask;
|
||||||
new_index += write_mask;
|
new_index += write_mask;
|
||||||
index = local_cmpxchg(&bpage->write, old_index, new_index);
|
index = local_cmpxchg(&bpage->write, old_index, new_index);
|
||||||
if (index == old_index)
|
if (index == old_index) {
|
||||||
|
/* update counters */
|
||||||
|
local_sub(event_length, &cpu_buffer->entries_bytes);
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* could not discard */
|
/* could not discard */
|
||||||
|
@ -2660,6 +2673,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
|
||||||
(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
|
(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
|
||||||
|
* @buffer: The ring buffer
|
||||||
|
* @cpu: The per CPU buffer to read from.
|
||||||
|
*/
|
||||||
|
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
|
struct buffer_page *bpage;
|
||||||
|
unsigned long ret;
|
||||||
|
|
||||||
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cpu_buffer = buffer->buffers[cpu];
|
||||||
|
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||||
|
/*
|
||||||
|
* if the tail is on reader_page, oldest time stamp is on the reader
|
||||||
|
* page
|
||||||
|
*/
|
||||||
|
if (cpu_buffer->tail_page == cpu_buffer->reader_page)
|
||||||
|
bpage = cpu_buffer->reader_page;
|
||||||
|
else
|
||||||
|
bpage = rb_set_head_page(cpu_buffer);
|
||||||
|
ret = bpage->page->time_stamp;
|
||||||
|
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
|
||||||
|
* @buffer: The ring buffer
|
||||||
|
* @cpu: The per CPU buffer to read from.
|
||||||
|
*/
|
||||||
|
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
|
||||||
|
{
|
||||||
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
|
unsigned long ret;
|
||||||
|
|
||||||
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cpu_buffer = buffer->buffers[cpu];
|
||||||
|
ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
||||||
* @buffer: The ring buffer
|
* @buffer: The ring buffer
|
||||||
|
@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
|
||||||
cpu_buffer->reader_page->read = 0;
|
cpu_buffer->reader_page->read = 0;
|
||||||
|
|
||||||
local_set(&cpu_buffer->commit_overrun, 0);
|
local_set(&cpu_buffer->commit_overrun, 0);
|
||||||
|
local_set(&cpu_buffer->entries_bytes, 0);
|
||||||
local_set(&cpu_buffer->overrun, 0);
|
local_set(&cpu_buffer->overrun, 0);
|
||||||
local_set(&cpu_buffer->entries, 0);
|
local_set(&cpu_buffer->entries, 0);
|
||||||
local_set(&cpu_buffer->committing, 0);
|
local_set(&cpu_buffer->committing, 0);
|
||||||
local_set(&cpu_buffer->commits, 0);
|
local_set(&cpu_buffer->commits, 0);
|
||||||
cpu_buffer->read = 0;
|
cpu_buffer->read = 0;
|
||||||
|
cpu_buffer->read_bytes = 0;
|
||||||
|
|
||||||
cpu_buffer->write_stamp = 0;
|
cpu_buffer->write_stamp = 0;
|
||||||
cpu_buffer->read_stamp = 0;
|
cpu_buffer->read_stamp = 0;
|
||||||
|
@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
||||||
} else {
|
} else {
|
||||||
/* update the entry counter */
|
/* update the entry counter */
|
||||||
cpu_buffer->read += rb_page_entries(reader);
|
cpu_buffer->read += rb_page_entries(reader);
|
||||||
|
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
|
||||||
|
|
||||||
/* swap the pages */
|
/* swap the pages */
|
||||||
rb_init_page(bpage);
|
rb_init_page(bpage);
|
||||||
|
|
|
@ -435,6 +435,7 @@ static struct {
|
||||||
} trace_clocks[] = {
|
} trace_clocks[] = {
|
||||||
{ trace_clock_local, "local" },
|
{ trace_clock_local, "local" },
|
||||||
{ trace_clock_global, "global" },
|
{ trace_clock_global, "global" },
|
||||||
|
{ trace_clock_counter, "counter" },
|
||||||
};
|
};
|
||||||
|
|
||||||
int trace_clock_id;
|
int trace_clock_id;
|
||||||
|
@ -2159,6 +2160,14 @@ void trace_default_header(struct seq_file *m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void test_ftrace_alive(struct seq_file *m)
|
||||||
|
{
|
||||||
|
if (!ftrace_is_dead())
|
||||||
|
return;
|
||||||
|
seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
|
||||||
|
seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
|
||||||
|
}
|
||||||
|
|
||||||
static int s_show(struct seq_file *m, void *v)
|
static int s_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct trace_iterator *iter = v;
|
struct trace_iterator *iter = v;
|
||||||
|
@ -2168,6 +2177,7 @@ static int s_show(struct seq_file *m, void *v)
|
||||||
if (iter->tr) {
|
if (iter->tr) {
|
||||||
seq_printf(m, "# tracer: %s\n", iter->trace->name);
|
seq_printf(m, "# tracer: %s\n", iter->trace->name);
|
||||||
seq_puts(m, "#\n");
|
seq_puts(m, "#\n");
|
||||||
|
test_ftrace_alive(m);
|
||||||
}
|
}
|
||||||
if (iter->trace && iter->trace->print_header)
|
if (iter->trace && iter->trace->print_header)
|
||||||
iter->trace->print_header(m);
|
iter->trace->print_header(m);
|
||||||
|
@ -2710,9 +2720,9 @@ static const char readme_msg[] =
|
||||||
"# cat /sys/kernel/debug/tracing/trace_options\n"
|
"# cat /sys/kernel/debug/tracing/trace_options\n"
|
||||||
"noprint-parent nosym-offset nosym-addr noverbose\n"
|
"noprint-parent nosym-offset nosym-addr noverbose\n"
|
||||||
"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
|
"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
|
||||||
"# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
|
"# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
|
||||||
"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
|
"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
|
||||||
"# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
|
"# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
|
||||||
;
|
;
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
|
@ -3568,6 +3578,30 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
tracing_total_entries_read(struct file *filp, char __user *ubuf,
|
||||||
|
size_t cnt, loff_t *ppos)
|
||||||
|
{
|
||||||
|
struct trace_array *tr = filp->private_data;
|
||||||
|
char buf[64];
|
||||||
|
int r, cpu;
|
||||||
|
unsigned long size = 0, expanded_size = 0;
|
||||||
|
|
||||||
|
mutex_lock(&trace_types_lock);
|
||||||
|
for_each_tracing_cpu(cpu) {
|
||||||
|
size += tr->entries >> 10;
|
||||||
|
if (!ring_buffer_expanded)
|
||||||
|
expanded_size += trace_buf_size >> 10;
|
||||||
|
}
|
||||||
|
if (ring_buffer_expanded)
|
||||||
|
r = sprintf(buf, "%lu\n", size);
|
||||||
|
else
|
||||||
|
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
|
||||||
|
mutex_unlock(&trace_types_lock);
|
||||||
|
|
||||||
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
|
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
|
||||||
size_t cnt, loff_t *ppos)
|
size_t cnt, loff_t *ppos)
|
||||||
|
@ -3594,22 +3628,24 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mark_printk(const char *fmt, ...)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
va_list args;
|
|
||||||
va_start(args, fmt);
|
|
||||||
ret = trace_vprintk(0, fmt, args);
|
|
||||||
va_end(args);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
tracing_mark_write(struct file *filp, const char __user *ubuf,
|
tracing_mark_write(struct file *filp, const char __user *ubuf,
|
||||||
size_t cnt, loff_t *fpos)
|
size_t cnt, loff_t *fpos)
|
||||||
{
|
{
|
||||||
char *buf;
|
unsigned long addr = (unsigned long)ubuf;
|
||||||
size_t written;
|
struct ring_buffer_event *event;
|
||||||
|
struct ring_buffer *buffer;
|
||||||
|
struct print_entry *entry;
|
||||||
|
unsigned long irq_flags;
|
||||||
|
struct page *pages[2];
|
||||||
|
int nr_pages = 1;
|
||||||
|
ssize_t written;
|
||||||
|
void *page1;
|
||||||
|
void *page2;
|
||||||
|
int offset;
|
||||||
|
int size;
|
||||||
|
int len;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (tracing_disabled)
|
if (tracing_disabled)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -3617,28 +3653,81 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
|
||||||
if (cnt > TRACE_BUF_SIZE)
|
if (cnt > TRACE_BUF_SIZE)
|
||||||
cnt = TRACE_BUF_SIZE;
|
cnt = TRACE_BUF_SIZE;
|
||||||
|
|
||||||
buf = kmalloc(cnt + 2, GFP_KERNEL);
|
/*
|
||||||
if (buf == NULL)
|
* Userspace is injecting traces into the kernel trace buffer.
|
||||||
return -ENOMEM;
|
* We want to be as non intrusive as possible.
|
||||||
|
* To do so, we do not want to allocate any special buffers
|
||||||
|
* or take any locks, but instead write the userspace data
|
||||||
|
* straight into the ring buffer.
|
||||||
|
*
|
||||||
|
* First we need to pin the userspace buffer into memory,
|
||||||
|
* which, most likely it is, because it just referenced it.
|
||||||
|
* But there's no guarantee that it is. By using get_user_pages_fast()
|
||||||
|
* and kmap_atomic/kunmap_atomic() we can get access to the
|
||||||
|
* pages directly. We then write the data directly into the
|
||||||
|
* ring buffer.
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
|
||||||
|
|
||||||
if (copy_from_user(buf, ubuf, cnt)) {
|
/* check if we cross pages */
|
||||||
kfree(buf);
|
if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
|
||||||
return -EFAULT;
|
nr_pages = 2;
|
||||||
|
|
||||||
|
offset = addr & (PAGE_SIZE - 1);
|
||||||
|
addr &= PAGE_MASK;
|
||||||
|
|
||||||
|
ret = get_user_pages_fast(addr, nr_pages, 0, pages);
|
||||||
|
if (ret < nr_pages) {
|
||||||
|
while (--ret >= 0)
|
||||||
|
put_page(pages[ret]);
|
||||||
|
written = -EFAULT;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
if (buf[cnt-1] != '\n') {
|
|
||||||
buf[cnt] = '\n';
|
|
||||||
buf[cnt+1] = '\0';
|
|
||||||
} else
|
|
||||||
buf[cnt] = '\0';
|
|
||||||
|
|
||||||
written = mark_printk("%s", buf);
|
page1 = kmap_atomic(pages[0]);
|
||||||
kfree(buf);
|
if (nr_pages == 2)
|
||||||
|
page2 = kmap_atomic(pages[1]);
|
||||||
|
|
||||||
|
local_save_flags(irq_flags);
|
||||||
|
size = sizeof(*entry) + cnt + 2; /* possible \n added */
|
||||||
|
buffer = global_trace.buffer;
|
||||||
|
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
|
||||||
|
irq_flags, preempt_count());
|
||||||
|
if (!event) {
|
||||||
|
/* Ring buffer disabled, return as if not open for write */
|
||||||
|
written = -EBADF;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = ring_buffer_event_data(event);
|
||||||
|
entry->ip = _THIS_IP_;
|
||||||
|
|
||||||
|
if (nr_pages == 2) {
|
||||||
|
len = PAGE_SIZE - offset;
|
||||||
|
memcpy(&entry->buf, page1 + offset, len);
|
||||||
|
memcpy(&entry->buf[len], page2, cnt - len);
|
||||||
|
} else
|
||||||
|
memcpy(&entry->buf, page1 + offset, cnt);
|
||||||
|
|
||||||
|
if (entry->buf[cnt - 1] != '\n') {
|
||||||
|
entry->buf[cnt] = '\n';
|
||||||
|
entry->buf[cnt + 1] = '\0';
|
||||||
|
} else
|
||||||
|
entry->buf[cnt] = '\0';
|
||||||
|
|
||||||
|
ring_buffer_unlock_commit(buffer, event);
|
||||||
|
|
||||||
|
written = cnt;
|
||||||
|
|
||||||
*fpos += written;
|
*fpos += written;
|
||||||
|
|
||||||
/* don't tell userspace we wrote more - it might confuse them */
|
out_unlock:
|
||||||
if (written > cnt)
|
if (nr_pages == 2)
|
||||||
written = cnt;
|
kunmap_atomic(page2);
|
||||||
|
kunmap_atomic(page1);
|
||||||
|
while (nr_pages > 0)
|
||||||
|
put_page(pages[--nr_pages]);
|
||||||
|
out:
|
||||||
return written;
|
return written;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3739,6 +3828,12 @@ static const struct file_operations tracing_entries_fops = {
|
||||||
.llseek = generic_file_llseek,
|
.llseek = generic_file_llseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct file_operations tracing_total_entries_fops = {
|
||||||
|
.open = tracing_open_generic,
|
||||||
|
.read = tracing_total_entries_read,
|
||||||
|
.llseek = generic_file_llseek,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct file_operations tracing_free_buffer_fops = {
|
static const struct file_operations tracing_free_buffer_fops = {
|
||||||
.write = tracing_free_buffer_write,
|
.write = tracing_free_buffer_write,
|
||||||
.release = tracing_free_buffer_release,
|
.release = tracing_free_buffer_release,
|
||||||
|
@ -3808,8 +3903,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||||
if (info->read < PAGE_SIZE)
|
if (info->read < PAGE_SIZE)
|
||||||
goto read;
|
goto read;
|
||||||
|
|
||||||
info->read = 0;
|
|
||||||
|
|
||||||
trace_access_lock(info->cpu);
|
trace_access_lock(info->cpu);
|
||||||
ret = ring_buffer_read_page(info->tr->buffer,
|
ret = ring_buffer_read_page(info->tr->buffer,
|
||||||
&info->spare,
|
&info->spare,
|
||||||
|
@ -3819,6 +3912,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
info->read = 0;
|
||||||
|
|
||||||
read:
|
read:
|
||||||
size = PAGE_SIZE - info->read;
|
size = PAGE_SIZE - info->read;
|
||||||
if (size > count)
|
if (size > count)
|
||||||
|
@ -4026,6 +4121,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
|
||||||
struct trace_array *tr = &global_trace;
|
struct trace_array *tr = &global_trace;
|
||||||
struct trace_seq *s;
|
struct trace_seq *s;
|
||||||
unsigned long cnt;
|
unsigned long cnt;
|
||||||
|
unsigned long long t;
|
||||||
|
unsigned long usec_rem;
|
||||||
|
|
||||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||||
if (!s)
|
if (!s)
|
||||||
|
@ -4042,6 +4139,17 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
|
||||||
cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
|
cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
|
||||||
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
|
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
|
||||||
|
|
||||||
|
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
|
||||||
|
trace_seq_printf(s, "bytes: %ld\n", cnt);
|
||||||
|
|
||||||
|
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
|
||||||
|
usec_rem = do_div(t, USEC_PER_SEC);
|
||||||
|
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
|
||||||
|
|
||||||
|
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
|
||||||
|
usec_rem = do_div(t, USEC_PER_SEC);
|
||||||
|
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
|
||||||
|
|
||||||
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
|
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
|
||||||
|
|
||||||
kfree(s);
|
kfree(s);
|
||||||
|
@ -4450,6 +4558,9 @@ static __init int tracer_init_debugfs(void)
|
||||||
trace_create_file("buffer_size_kb", 0644, d_tracer,
|
trace_create_file("buffer_size_kb", 0644, d_tracer,
|
||||||
&global_trace, &tracing_entries_fops);
|
&global_trace, &tracing_entries_fops);
|
||||||
|
|
||||||
|
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
|
||||||
|
&global_trace, &tracing_total_entries_fops);
|
||||||
|
|
||||||
trace_create_file("free_buffer", 0644, d_tracer,
|
trace_create_file("free_buffer", 0644, d_tracer,
|
||||||
&global_trace, &tracing_free_buffer_fops);
|
&global_trace, &tracing_free_buffer_fops);
|
||||||
|
|
||||||
|
@ -4566,6 +4677,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
||||||
|
|
||||||
tracing_off();
|
tracing_off();
|
||||||
|
|
||||||
|
/* Did function tracer already get disabled? */
|
||||||
|
if (ftrace_is_dead()) {
|
||||||
|
printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
|
||||||
|
printk("# MAY BE MISSING FUNCTION EVENTS\n");
|
||||||
|
}
|
||||||
|
|
||||||
if (disable_tracing)
|
if (disable_tracing)
|
||||||
ftrace_kill();
|
ftrace_kill();
|
||||||
|
|
||||||
|
|
|
@ -579,11 +579,13 @@ static inline int ftrace_trace_task(struct task_struct *task)
|
||||||
|
|
||||||
return test_tsk_trace_trace(task);
|
return test_tsk_trace_trace(task);
|
||||||
}
|
}
|
||||||
|
extern int ftrace_is_dead(void);
|
||||||
#else
|
#else
|
||||||
static inline int ftrace_trace_task(struct task_struct *task)
|
static inline int ftrace_trace_task(struct task_struct *task)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
static inline int ftrace_is_dead(void) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -761,16 +763,10 @@ struct filter_pred {
|
||||||
filter_pred_fn_t fn;
|
filter_pred_fn_t fn;
|
||||||
u64 val;
|
u64 val;
|
||||||
struct regex regex;
|
struct regex regex;
|
||||||
/*
|
unsigned short *ops;
|
||||||
* Leaf nodes use field_name, ops is used by AND and OR
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||||
* nodes. The field_name is always freed when freeing a pred.
|
struct ftrace_event_field *field;
|
||||||
* We can overload field_name for ops and have it freed
|
#endif
|
||||||
* as well.
|
|
||||||
*/
|
|
||||||
union {
|
|
||||||
char *field_name;
|
|
||||||
unsigned short *ops;
|
|
||||||
};
|
|
||||||
int offset;
|
int offset;
|
||||||
int not;
|
int not;
|
||||||
int op;
|
int op;
|
||||||
|
|
|
@ -113,3 +113,15 @@ u64 notrace trace_clock_global(void)
|
||||||
|
|
||||||
return now;
|
return now;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static atomic64_t trace_counter;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* trace_clock_counter(): simply an atomic counter.
|
||||||
|
* Use the trace_counter "counter" for cases where you do not care
|
||||||
|
* about timings, but are interested in strict ordering.
|
||||||
|
*/
|
||||||
|
u64 notrace trace_clock_counter(void)
|
||||||
|
{
|
||||||
|
return atomic64_add_return(1, &trace_counter);
|
||||||
|
}
|
||||||
|
|
|
@ -381,6 +381,63 @@ get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
|
||||||
return pred;
|
return pred;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum walk_return {
|
||||||
|
WALK_PRED_ABORT,
|
||||||
|
WALK_PRED_PARENT,
|
||||||
|
WALK_PRED_DEFAULT,
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef int (*filter_pred_walkcb_t) (enum move_type move,
|
||||||
|
struct filter_pred *pred,
|
||||||
|
int *err, void *data);
|
||||||
|
|
||||||
|
static int walk_pred_tree(struct filter_pred *preds,
|
||||||
|
struct filter_pred *root,
|
||||||
|
filter_pred_walkcb_t cb, void *data)
|
||||||
|
{
|
||||||
|
struct filter_pred *pred = root;
|
||||||
|
enum move_type move = MOVE_DOWN;
|
||||||
|
int done = 0;
|
||||||
|
|
||||||
|
if (!preds)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
do {
|
||||||
|
int err = 0, ret;
|
||||||
|
|
||||||
|
ret = cb(move, pred, &err, data);
|
||||||
|
if (ret == WALK_PRED_ABORT)
|
||||||
|
return err;
|
||||||
|
if (ret == WALK_PRED_PARENT)
|
||||||
|
goto get_parent;
|
||||||
|
|
||||||
|
switch (move) {
|
||||||
|
case MOVE_DOWN:
|
||||||
|
if (pred->left != FILTER_PRED_INVALID) {
|
||||||
|
pred = &preds[pred->left];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
goto get_parent;
|
||||||
|
case MOVE_UP_FROM_LEFT:
|
||||||
|
pred = &preds[pred->right];
|
||||||
|
move = MOVE_DOWN;
|
||||||
|
continue;
|
||||||
|
case MOVE_UP_FROM_RIGHT:
|
||||||
|
get_parent:
|
||||||
|
if (pred == root)
|
||||||
|
break;
|
||||||
|
pred = get_pred_parent(pred, preds,
|
||||||
|
pred->parent,
|
||||||
|
&move);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
done = 1;
|
||||||
|
} while (!done);
|
||||||
|
|
||||||
|
/* We are fine. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A series of AND or ORs where found together. Instead of
|
* A series of AND or ORs where found together. Instead of
|
||||||
* climbing up and down the tree branches, an array of the
|
* climbing up and down the tree branches, an array of the
|
||||||
|
@ -410,99 +467,91 @@ static int process_ops(struct filter_pred *preds,
|
||||||
|
|
||||||
for (i = 0; i < op->val; i++) {
|
for (i = 0; i < op->val; i++) {
|
||||||
pred = &preds[op->ops[i]];
|
pred = &preds[op->ops[i]];
|
||||||
match = pred->fn(pred, rec);
|
if (!WARN_ON_ONCE(!pred->fn))
|
||||||
|
match = pred->fn(pred, rec);
|
||||||
if (!!match == type)
|
if (!!match == type)
|
||||||
return match;
|
return match;
|
||||||
}
|
}
|
||||||
return match;
|
return match;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct filter_match_preds_data {
|
||||||
|
struct filter_pred *preds;
|
||||||
|
int match;
|
||||||
|
void *rec;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
|
||||||
|
int *err, void *data)
|
||||||
|
{
|
||||||
|
struct filter_match_preds_data *d = data;
|
||||||
|
|
||||||
|
*err = 0;
|
||||||
|
switch (move) {
|
||||||
|
case MOVE_DOWN:
|
||||||
|
/* only AND and OR have children */
|
||||||
|
if (pred->left != FILTER_PRED_INVALID) {
|
||||||
|
/* If ops is set, then it was folded. */
|
||||||
|
if (!pred->ops)
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
/* We can treat folded ops as a leaf node */
|
||||||
|
d->match = process_ops(d->preds, pred, d->rec);
|
||||||
|
} else {
|
||||||
|
if (!WARN_ON_ONCE(!pred->fn))
|
||||||
|
d->match = pred->fn(pred, d->rec);
|
||||||
|
}
|
||||||
|
|
||||||
|
return WALK_PRED_PARENT;
|
||||||
|
case MOVE_UP_FROM_LEFT:
|
||||||
|
/*
|
||||||
|
* Check for short circuits.
|
||||||
|
*
|
||||||
|
* Optimization: !!match == (pred->op == OP_OR)
|
||||||
|
* is the same as:
|
||||||
|
* if ((match && pred->op == OP_OR) ||
|
||||||
|
* (!match && pred->op == OP_AND))
|
||||||
|
*/
|
||||||
|
if (!!d->match == (pred->op == OP_OR))
|
||||||
|
return WALK_PRED_PARENT;
|
||||||
|
break;
|
||||||
|
case MOVE_UP_FROM_RIGHT:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
/* return 1 if event matches, 0 otherwise (discard) */
|
/* return 1 if event matches, 0 otherwise (discard) */
|
||||||
int filter_match_preds(struct event_filter *filter, void *rec)
|
int filter_match_preds(struct event_filter *filter, void *rec)
|
||||||
{
|
{
|
||||||
int match = -1;
|
|
||||||
enum move_type move = MOVE_DOWN;
|
|
||||||
struct filter_pred *preds;
|
struct filter_pred *preds;
|
||||||
struct filter_pred *pred;
|
|
||||||
struct filter_pred *root;
|
struct filter_pred *root;
|
||||||
int n_preds;
|
struct filter_match_preds_data data = {
|
||||||
int done = 0;
|
/* match is currently meaningless */
|
||||||
|
.match = -1,
|
||||||
|
.rec = rec,
|
||||||
|
};
|
||||||
|
int n_preds, ret;
|
||||||
|
|
||||||
/* no filter is considered a match */
|
/* no filter is considered a match */
|
||||||
if (!filter)
|
if (!filter)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
n_preds = filter->n_preds;
|
n_preds = filter->n_preds;
|
||||||
|
|
||||||
if (!n_preds)
|
if (!n_preds)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* n_preds, root and filter->preds are protect with preemption disabled.
|
* n_preds, root and filter->preds are protect with preemption disabled.
|
||||||
*/
|
*/
|
||||||
preds = rcu_dereference_sched(filter->preds);
|
|
||||||
root = rcu_dereference_sched(filter->root);
|
root = rcu_dereference_sched(filter->root);
|
||||||
if (!root)
|
if (!root)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
pred = root;
|
data.preds = preds = rcu_dereference_sched(filter->preds);
|
||||||
|
ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
|
||||||
/* match is currently meaningless */
|
WARN_ON(ret);
|
||||||
match = -1;
|
return data.match;
|
||||||
|
|
||||||
do {
|
|
||||||
switch (move) {
|
|
||||||
case MOVE_DOWN:
|
|
||||||
/* only AND and OR have children */
|
|
||||||
if (pred->left != FILTER_PRED_INVALID) {
|
|
||||||
/* If ops is set, then it was folded. */
|
|
||||||
if (!pred->ops) {
|
|
||||||
/* keep going to down the left side */
|
|
||||||
pred = &preds[pred->left];
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* We can treat folded ops as a leaf node */
|
|
||||||
match = process_ops(preds, pred, rec);
|
|
||||||
} else
|
|
||||||
match = pred->fn(pred, rec);
|
|
||||||
/* If this pred is the only pred */
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_LEFT:
|
|
||||||
/*
|
|
||||||
* Check for short circuits.
|
|
||||||
*
|
|
||||||
* Optimization: !!match == (pred->op == OP_OR)
|
|
||||||
* is the same as:
|
|
||||||
* if ((match && pred->op == OP_OR) ||
|
|
||||||
* (!match && pred->op == OP_AND))
|
|
||||||
*/
|
|
||||||
if (!!match == (pred->op == OP_OR)) {
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* now go down the right side of the tree. */
|
|
||||||
pred = &preds[pred->right];
|
|
||||||
move = MOVE_DOWN;
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_RIGHT:
|
|
||||||
/* We finished this equation. */
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
done = 1;
|
|
||||||
} while (!done);
|
|
||||||
|
|
||||||
return match;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(filter_match_preds);
|
EXPORT_SYMBOL_GPL(filter_match_preds);
|
||||||
|
|
||||||
|
@ -628,22 +677,6 @@ find_event_field(struct ftrace_event_call *call, char *name)
|
||||||
return __find_event_field(head, name);
|
return __find_event_field(head, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_free_pred(struct filter_pred *pred)
|
|
||||||
{
|
|
||||||
if (!pred)
|
|
||||||
return;
|
|
||||||
|
|
||||||
kfree(pred->field_name);
|
|
||||||
kfree(pred);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void filter_clear_pred(struct filter_pred *pred)
|
|
||||||
{
|
|
||||||
kfree(pred->field_name);
|
|
||||||
pred->field_name = NULL;
|
|
||||||
pred->regex.len = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
|
static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
|
||||||
{
|
{
|
||||||
stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
|
stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
|
||||||
|
@ -689,20 +722,13 @@ __pop_pred_stack(struct pred_stack *stack)
|
||||||
static int filter_set_pred(struct event_filter *filter,
|
static int filter_set_pred(struct event_filter *filter,
|
||||||
int idx,
|
int idx,
|
||||||
struct pred_stack *stack,
|
struct pred_stack *stack,
|
||||||
struct filter_pred *src,
|
struct filter_pred *src)
|
||||||
filter_pred_fn_t fn)
|
|
||||||
{
|
{
|
||||||
struct filter_pred *dest = &filter->preds[idx];
|
struct filter_pred *dest = &filter->preds[idx];
|
||||||
struct filter_pred *left;
|
struct filter_pred *left;
|
||||||
struct filter_pred *right;
|
struct filter_pred *right;
|
||||||
|
|
||||||
*dest = *src;
|
*dest = *src;
|
||||||
if (src->field_name) {
|
|
||||||
dest->field_name = kstrdup(src->field_name, GFP_KERNEL);
|
|
||||||
if (!dest->field_name)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
dest->fn = fn;
|
|
||||||
dest->index = idx;
|
dest->index = idx;
|
||||||
|
|
||||||
if (dest->op == OP_OR || dest->op == OP_AND) {
|
if (dest->op == OP_OR || dest->op == OP_AND) {
|
||||||
|
@ -743,11 +769,7 @@ static int filter_set_pred(struct event_filter *filter,
|
||||||
|
|
||||||
static void __free_preds(struct event_filter *filter)
|
static void __free_preds(struct event_filter *filter)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
|
|
||||||
if (filter->preds) {
|
if (filter->preds) {
|
||||||
for (i = 0; i < filter->a_preds; i++)
|
|
||||||
kfree(filter->preds[i].field_name);
|
|
||||||
kfree(filter->preds);
|
kfree(filter->preds);
|
||||||
filter->preds = NULL;
|
filter->preds = NULL;
|
||||||
}
|
}
|
||||||
|
@ -840,23 +862,19 @@ static void filter_free_subsystem_filters(struct event_subsystem *system)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int filter_add_pred_fn(struct filter_parse_state *ps,
|
static int filter_add_pred(struct filter_parse_state *ps,
|
||||||
struct ftrace_event_call *call,
|
struct event_filter *filter,
|
||||||
struct event_filter *filter,
|
struct filter_pred *pred,
|
||||||
struct filter_pred *pred,
|
struct pred_stack *stack)
|
||||||
struct pred_stack *stack,
|
|
||||||
filter_pred_fn_t fn)
|
|
||||||
{
|
{
|
||||||
int idx, err;
|
int err;
|
||||||
|
|
||||||
if (WARN_ON(filter->n_preds == filter->a_preds)) {
|
if (WARN_ON(filter->n_preds == filter->a_preds)) {
|
||||||
parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
|
parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx = filter->n_preds;
|
err = filter_set_pred(filter, filter->n_preds, stack, pred);
|
||||||
filter_clear_pred(&filter->preds[idx]);
|
|
||||||
err = filter_set_pred(filter, idx, stack, pred, fn);
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -937,31 +955,15 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size,
|
||||||
return fn;
|
return fn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int filter_add_pred(struct filter_parse_state *ps,
|
static int init_pred(struct filter_parse_state *ps,
|
||||||
struct ftrace_event_call *call,
|
struct ftrace_event_field *field,
|
||||||
struct event_filter *filter,
|
struct filter_pred *pred)
|
||||||
struct filter_pred *pred,
|
|
||||||
struct pred_stack *stack,
|
|
||||||
bool dry_run)
|
|
||||||
{
|
{
|
||||||
struct ftrace_event_field *field;
|
filter_pred_fn_t fn = filter_pred_none;
|
||||||
filter_pred_fn_t fn;
|
|
||||||
unsigned long long val;
|
unsigned long long val;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
fn = pred->fn = filter_pred_none;
|
|
||||||
|
|
||||||
if (pred->op == OP_AND)
|
|
||||||
goto add_pred_fn;
|
|
||||||
else if (pred->op == OP_OR)
|
|
||||||
goto add_pred_fn;
|
|
||||||
|
|
||||||
field = find_event_field(call, pred->field_name);
|
|
||||||
if (!field) {
|
|
||||||
parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
pred->offset = field->offset;
|
pred->offset = field->offset;
|
||||||
|
|
||||||
if (!is_legal_op(field, pred->op)) {
|
if (!is_legal_op(field, pred->op)) {
|
||||||
|
@ -1001,9 +1003,7 @@ static int filter_add_pred(struct filter_parse_state *ps,
|
||||||
if (pred->op == OP_NE)
|
if (pred->op == OP_NE)
|
||||||
pred->not = 1;
|
pred->not = 1;
|
||||||
|
|
||||||
add_pred_fn:
|
pred->fn = fn;
|
||||||
if (!dry_run)
|
|
||||||
return filter_add_pred_fn(ps, call, filter, pred, stack, fn);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1302,39 +1302,37 @@ parse_operand:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
|
static struct filter_pred *create_pred(struct filter_parse_state *ps,
|
||||||
|
struct ftrace_event_call *call,
|
||||||
|
int op, char *operand1, char *operand2)
|
||||||
{
|
{
|
||||||
struct filter_pred *pred;
|
struct ftrace_event_field *field;
|
||||||
|
static struct filter_pred pred;
|
||||||
|
|
||||||
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
|
memset(&pred, 0, sizeof(pred));
|
||||||
if (!pred)
|
pred.op = op;
|
||||||
return NULL;
|
|
||||||
|
|
||||||
pred->field_name = kstrdup(operand1, GFP_KERNEL);
|
if (op == OP_AND || op == OP_OR)
|
||||||
if (!pred->field_name) {
|
return &pred;
|
||||||
kfree(pred);
|
|
||||||
|
if (!operand1 || !operand2) {
|
||||||
|
parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
strcpy(pred->regex.pattern, operand2);
|
field = find_event_field(call, operand1);
|
||||||
pred->regex.len = strlen(pred->regex.pattern);
|
if (!field) {
|
||||||
|
parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
|
||||||
pred->op = op;
|
|
||||||
|
|
||||||
return pred;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct filter_pred *create_logical_pred(int op)
|
|
||||||
{
|
|
||||||
struct filter_pred *pred;
|
|
||||||
|
|
||||||
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
|
|
||||||
if (!pred)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
pred->op = op;
|
strcpy(pred.regex.pattern, operand2);
|
||||||
|
pred.regex.len = strlen(pred.regex.pattern);
|
||||||
|
|
||||||
return pred;
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||||
|
pred.field = field;
|
||||||
|
#endif
|
||||||
|
return init_pred(ps, field, &pred) ? NULL : &pred;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_preds(struct filter_parse_state *ps)
|
static int check_preds(struct filter_parse_state *ps)
|
||||||
|
@ -1375,6 +1373,23 @@ static int count_preds(struct filter_parse_state *ps)
|
||||||
return n_preds;
|
return n_preds;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct check_pred_data {
|
||||||
|
int count;
|
||||||
|
int max;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
|
||||||
|
int *err, void *data)
|
||||||
|
{
|
||||||
|
struct check_pred_data *d = data;
|
||||||
|
|
||||||
|
if (WARN_ON(d->count++ > d->max)) {
|
||||||
|
*err = -EINVAL;
|
||||||
|
return WALK_PRED_ABORT;
|
||||||
|
}
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The tree is walked at filtering of an event. If the tree is not correctly
|
* The tree is walked at filtering of an event. If the tree is not correctly
|
||||||
* built, it may cause an infinite loop. Check here that the tree does
|
* built, it may cause an infinite loop. Check here that the tree does
|
||||||
|
@ -1383,107 +1398,76 @@ static int count_preds(struct filter_parse_state *ps)
|
||||||
static int check_pred_tree(struct event_filter *filter,
|
static int check_pred_tree(struct event_filter *filter,
|
||||||
struct filter_pred *root)
|
struct filter_pred *root)
|
||||||
{
|
{
|
||||||
struct filter_pred *preds;
|
struct check_pred_data data = {
|
||||||
struct filter_pred *pred;
|
/*
|
||||||
enum move_type move = MOVE_DOWN;
|
* The max that we can hit a node is three times.
|
||||||
int count = 0;
|
* Once going down, once coming up from left, and
|
||||||
int done = 0;
|
* once coming up from right. This is more than enough
|
||||||
int max;
|
* since leafs are only hit a single time.
|
||||||
|
*/
|
||||||
|
.max = 3 * filter->n_preds,
|
||||||
|
.count = 0,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
return walk_pred_tree(filter->preds, root,
|
||||||
* The max that we can hit a node is three times.
|
check_pred_tree_cb, &data);
|
||||||
* Once going down, once coming up from left, and
|
}
|
||||||
* once coming up from right. This is more than enough
|
|
||||||
* since leafs are only hit a single time.
|
|
||||||
*/
|
|
||||||
max = 3 * filter->n_preds;
|
|
||||||
|
|
||||||
preds = filter->preds;
|
static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
|
||||||
if (!preds)
|
int *err, void *data)
|
||||||
return -EINVAL;
|
{
|
||||||
pred = root;
|
int *count = data;
|
||||||
|
|
||||||
do {
|
if ((move == MOVE_DOWN) &&
|
||||||
if (WARN_ON(count++ > max))
|
(pred->left == FILTER_PRED_INVALID))
|
||||||
return -EINVAL;
|
(*count)++;
|
||||||
|
|
||||||
switch (move) {
|
return WALK_PRED_DEFAULT;
|
||||||
case MOVE_DOWN:
|
|
||||||
if (pred->left != FILTER_PRED_INVALID) {
|
|
||||||
pred = &preds[pred->left];
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* A leaf at the root is just a leaf in the tree */
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_LEFT:
|
|
||||||
pred = &preds[pred->right];
|
|
||||||
move = MOVE_DOWN;
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_RIGHT:
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
done = 1;
|
|
||||||
} while (!done);
|
|
||||||
|
|
||||||
/* We are fine. */
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
|
static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
|
||||||
{
|
{
|
||||||
struct filter_pred *pred;
|
int count = 0, ret;
|
||||||
enum move_type move = MOVE_DOWN;
|
|
||||||
int count = 0;
|
|
||||||
int done = 0;
|
|
||||||
|
|
||||||
pred = root;
|
|
||||||
|
|
||||||
do {
|
|
||||||
switch (move) {
|
|
||||||
case MOVE_DOWN:
|
|
||||||
if (pred->left != FILTER_PRED_INVALID) {
|
|
||||||
pred = &preds[pred->left];
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* A leaf at the root is just a leaf in the tree */
|
|
||||||
if (pred == root)
|
|
||||||
return 1;
|
|
||||||
count++;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_LEFT:
|
|
||||||
pred = &preds[pred->right];
|
|
||||||
move = MOVE_DOWN;
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_RIGHT:
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
done = 1;
|
|
||||||
} while (!done);
|
|
||||||
|
|
||||||
|
ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
|
||||||
|
WARN_ON(ret);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct fold_pred_data {
|
||||||
|
struct filter_pred *root;
|
||||||
|
int count;
|
||||||
|
int children;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
|
||||||
|
int *err, void *data)
|
||||||
|
{
|
||||||
|
struct fold_pred_data *d = data;
|
||||||
|
struct filter_pred *root = d->root;
|
||||||
|
|
||||||
|
if (move != MOVE_DOWN)
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
if (pred->left != FILTER_PRED_INVALID)
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
|
||||||
|
if (WARN_ON(d->count == d->children)) {
|
||||||
|
*err = -EINVAL;
|
||||||
|
return WALK_PRED_ABORT;
|
||||||
|
}
|
||||||
|
|
||||||
|
pred->index &= ~FILTER_PRED_FOLD;
|
||||||
|
root->ops[d->count++] = pred->index;
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
|
static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
|
||||||
{
|
{
|
||||||
struct filter_pred *pred;
|
struct fold_pred_data data = {
|
||||||
enum move_type move = MOVE_DOWN;
|
.root = root,
|
||||||
int count = 0;
|
.count = 0,
|
||||||
|
};
|
||||||
int children;
|
int children;
|
||||||
int done = 0;
|
|
||||||
|
|
||||||
/* No need to keep the fold flag */
|
/* No need to keep the fold flag */
|
||||||
root->index &= ~FILTER_PRED_FOLD;
|
root->index &= ~FILTER_PRED_FOLD;
|
||||||
|
@ -1501,37 +1485,26 @@ static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
root->val = children;
|
root->val = children;
|
||||||
|
data.children = children;
|
||||||
|
return walk_pred_tree(preds, root, fold_pred_cb, &data);
|
||||||
|
}
|
||||||
|
|
||||||
pred = root;
|
static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
|
||||||
do {
|
int *err, void *data)
|
||||||
switch (move) {
|
{
|
||||||
case MOVE_DOWN:
|
struct filter_pred *preds = data;
|
||||||
if (pred->left != FILTER_PRED_INVALID) {
|
|
||||||
pred = &preds[pred->left];
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (WARN_ON(count == children))
|
|
||||||
return -EINVAL;
|
|
||||||
pred->index &= ~FILTER_PRED_FOLD;
|
|
||||||
root->ops[count++] = pred->index;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_LEFT:
|
|
||||||
pred = &preds[pred->right];
|
|
||||||
move = MOVE_DOWN;
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_RIGHT:
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
done = 1;
|
|
||||||
} while (!done);
|
|
||||||
|
|
||||||
return 0;
|
if (move != MOVE_DOWN)
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
if (!(pred->index & FILTER_PRED_FOLD))
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
|
||||||
|
*err = fold_pred(preds, pred);
|
||||||
|
if (*err)
|
||||||
|
return WALK_PRED_ABORT;
|
||||||
|
|
||||||
|
/* eveyrhing below is folded, continue with parent */
|
||||||
|
return WALK_PRED_PARENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1542,51 +1515,8 @@ static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
|
||||||
static int fold_pred_tree(struct event_filter *filter,
|
static int fold_pred_tree(struct event_filter *filter,
|
||||||
struct filter_pred *root)
|
struct filter_pred *root)
|
||||||
{
|
{
|
||||||
struct filter_pred *preds;
|
return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
|
||||||
struct filter_pred *pred;
|
filter->preds);
|
||||||
enum move_type move = MOVE_DOWN;
|
|
||||||
int done = 0;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
preds = filter->preds;
|
|
||||||
if (!preds)
|
|
||||||
return -EINVAL;
|
|
||||||
pred = root;
|
|
||||||
|
|
||||||
do {
|
|
||||||
switch (move) {
|
|
||||||
case MOVE_DOWN:
|
|
||||||
if (pred->index & FILTER_PRED_FOLD) {
|
|
||||||
err = fold_pred(preds, pred);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
/* Folded nodes are like leafs */
|
|
||||||
} else if (pred->left != FILTER_PRED_INVALID) {
|
|
||||||
pred = &preds[pred->left];
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* A leaf at the root is just a leaf in the tree */
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_LEFT:
|
|
||||||
pred = &preds[pred->right];
|
|
||||||
move = MOVE_DOWN;
|
|
||||||
continue;
|
|
||||||
case MOVE_UP_FROM_RIGHT:
|
|
||||||
if (pred == root)
|
|
||||||
break;
|
|
||||||
pred = get_pred_parent(pred, preds,
|
|
||||||
pred->parent, &move);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
done = 1;
|
|
||||||
} while (!done);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int replace_preds(struct ftrace_event_call *call,
|
static int replace_preds(struct ftrace_event_call *call,
|
||||||
|
@ -1643,27 +1573,17 @@ static int replace_preds(struct ftrace_event_call *call,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (elt->op == OP_AND || elt->op == OP_OR) {
|
pred = create_pred(ps, call, elt->op, operand1, operand2);
|
||||||
pred = create_logical_pred(elt->op);
|
if (!pred) {
|
||||||
goto add_pred;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!operand1 || !operand2) {
|
|
||||||
parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
pred = create_pred(elt->op, operand1, operand2);
|
if (!dry_run) {
|
||||||
add_pred:
|
err = filter_add_pred(ps, filter, pred, &stack);
|
||||||
if (!pred) {
|
if (err)
|
||||||
err = -ENOMEM;
|
goto fail;
|
||||||
goto fail;
|
|
||||||
}
|
}
|
||||||
err = filter_add_pred(ps, call, filter, pred, &stack, dry_run);
|
|
||||||
filter_free_pred(pred);
|
|
||||||
if (err)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
operand1 = operand2 = NULL;
|
operand1 = operand2 = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1958,17 +1878,14 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||||
int err;
|
int err;
|
||||||
struct event_filter *filter;
|
struct event_filter *filter;
|
||||||
struct filter_parse_state *ps;
|
struct filter_parse_state *ps;
|
||||||
struct ftrace_event_call *call = NULL;
|
struct ftrace_event_call *call;
|
||||||
|
|
||||||
mutex_lock(&event_mutex);
|
mutex_lock(&event_mutex);
|
||||||
|
|
||||||
list_for_each_entry(call, &ftrace_events, list) {
|
call = event->tp_event;
|
||||||
if (call->event.type == event_id)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (&call->list == &ftrace_events)
|
if (!call)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
err = -EEXIST;
|
err = -EEXIST;
|
||||||
|
@ -2012,3 +1929,215 @@ out_unlock:
|
||||||
|
|
||||||
#endif /* CONFIG_PERF_EVENTS */
|
#endif /* CONFIG_PERF_EVENTS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/tracepoint.h>
|
||||||
|
|
||||||
|
#define CREATE_TRACE_POINTS
|
||||||
|
#include "trace_events_filter_test.h"
|
||||||
|
|
||||||
|
static int test_get_filter(char *filter_str, struct ftrace_event_call *call,
|
||||||
|
struct event_filter **pfilter)
|
||||||
|
{
|
||||||
|
struct event_filter *filter;
|
||||||
|
struct filter_parse_state *ps;
|
||||||
|
int err = -ENOMEM;
|
||||||
|
|
||||||
|
filter = __alloc_filter();
|
||||||
|
if (!filter)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ps = kzalloc(sizeof(*ps), GFP_KERNEL);
|
||||||
|
if (!ps)
|
||||||
|
goto free_filter;
|
||||||
|
|
||||||
|
parse_init(ps, filter_ops, filter_str);
|
||||||
|
err = filter_parse(ps);
|
||||||
|
if (err)
|
||||||
|
goto free_ps;
|
||||||
|
|
||||||
|
err = replace_preds(call, filter, ps, filter_str, false);
|
||||||
|
if (!err)
|
||||||
|
*pfilter = filter;
|
||||||
|
|
||||||
|
free_ps:
|
||||||
|
filter_opstack_clear(ps);
|
||||||
|
postfix_clear(ps);
|
||||||
|
kfree(ps);
|
||||||
|
|
||||||
|
free_filter:
|
||||||
|
if (err)
|
||||||
|
__free_filter(filter);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
|
||||||
|
{ \
|
||||||
|
.filter = FILTER, \
|
||||||
|
.rec = { .a = va, .b = vb, .c = vc, .d = vd, \
|
||||||
|
.e = ve, .f = vf, .g = vg, .h = vh }, \
|
||||||
|
.match = m, \
|
||||||
|
.not_visited = nvisit, \
|
||||||
|
}
|
||||||
|
#define YES 1
|
||||||
|
#define NO 0
|
||||||
|
|
||||||
|
static struct test_filter_data_t {
|
||||||
|
char *filter;
|
||||||
|
struct ftrace_raw_ftrace_test_filter rec;
|
||||||
|
int match;
|
||||||
|
char *not_visited;
|
||||||
|
} test_filter_data[] = {
|
||||||
|
#define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
|
||||||
|
"e == 1 && f == 1 && g == 1 && h == 1"
|
||||||
|
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
|
||||||
|
DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
|
||||||
|
DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
|
||||||
|
#undef FILTER
|
||||||
|
#define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
|
||||||
|
"e == 1 || f == 1 || g == 1 || h == 1"
|
||||||
|
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
|
||||||
|
DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
|
||||||
|
DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
|
||||||
|
#undef FILTER
|
||||||
|
#define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
|
||||||
|
"(e == 1 || f == 1) && (g == 1 || h == 1)"
|
||||||
|
DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
|
||||||
|
DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
|
||||||
|
DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
|
||||||
|
DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
|
||||||
|
#undef FILTER
|
||||||
|
#define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
|
||||||
|
"(e == 1 && f == 1) || (g == 1 && h == 1)"
|
||||||
|
DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
|
||||||
|
DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
|
||||||
|
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
|
||||||
|
#undef FILTER
|
||||||
|
#define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
|
||||||
|
"(e == 1 && f == 1) || (g == 1 && h == 1)"
|
||||||
|
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
|
||||||
|
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
|
||||||
|
DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
|
||||||
|
#undef FILTER
|
||||||
|
#define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
|
||||||
|
"(e == 1 || f == 1)) && (g == 1 || h == 1)"
|
||||||
|
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
|
||||||
|
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
|
||||||
|
DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
|
||||||
|
#undef FILTER
|
||||||
|
#define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
|
||||||
|
"(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
|
||||||
|
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
|
||||||
|
DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
|
||||||
|
DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
|
||||||
|
#undef FILTER
|
||||||
|
#define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
|
||||||
|
"(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
|
||||||
|
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
|
||||||
|
DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
|
||||||
|
DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
|
||||||
|
};
|
||||||
|
|
||||||
|
#undef DATA_REC
|
||||||
|
#undef FILTER
|
||||||
|
#undef YES
|
||||||
|
#undef NO
|
||||||
|
|
||||||
|
#define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
|
||||||
|
|
||||||
|
static int test_pred_visited;
|
||||||
|
|
||||||
|
static int test_pred_visited_fn(struct filter_pred *pred, void *event)
|
||||||
|
{
|
||||||
|
struct ftrace_event_field *field = pred->field;
|
||||||
|
|
||||||
|
test_pred_visited = 1;
|
||||||
|
printk(KERN_INFO "\npred visited %s\n", field->name);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
|
||||||
|
int *err, void *data)
|
||||||
|
{
|
||||||
|
char *fields = data;
|
||||||
|
|
||||||
|
if ((move == MOVE_DOWN) &&
|
||||||
|
(pred->left == FILTER_PRED_INVALID)) {
|
||||||
|
struct ftrace_event_field *field = pred->field;
|
||||||
|
|
||||||
|
if (!field) {
|
||||||
|
WARN(1, "all leafs should have field defined");
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
}
|
||||||
|
if (!strchr(fields, *field->name))
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
|
||||||
|
WARN_ON(!pred->fn);
|
||||||
|
pred->fn = test_pred_visited_fn;
|
||||||
|
}
|
||||||
|
return WALK_PRED_DEFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init int ftrace_test_event_filter(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
printk(KERN_INFO "Testing ftrace filter: ");
|
||||||
|
|
||||||
|
for (i = 0; i < DATA_CNT; i++) {
|
||||||
|
struct event_filter *filter = NULL;
|
||||||
|
struct test_filter_data_t *d = &test_filter_data[i];
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = test_get_filter(d->filter, &event_ftrace_test_filter,
|
||||||
|
&filter);
|
||||||
|
if (err) {
|
||||||
|
printk(KERN_INFO
|
||||||
|
"Failed to get filter for '%s', err %d\n",
|
||||||
|
d->filter, err);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The preemption disabling is not really needed for self
|
||||||
|
* tests, but the rcu dereference will complain without it.
|
||||||
|
*/
|
||||||
|
preempt_disable();
|
||||||
|
if (*d->not_visited)
|
||||||
|
walk_pred_tree(filter->preds, filter->root,
|
||||||
|
test_walk_pred_cb,
|
||||||
|
d->not_visited);
|
||||||
|
|
||||||
|
test_pred_visited = 0;
|
||||||
|
err = filter_match_preds(filter, &d->rec);
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
|
__free_filter(filter);
|
||||||
|
|
||||||
|
if (test_pred_visited) {
|
||||||
|
printk(KERN_INFO
|
||||||
|
"Failed, unwanted pred visited for filter %s\n",
|
||||||
|
d->filter);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err != d->match) {
|
||||||
|
printk(KERN_INFO
|
||||||
|
"Failed to match filter '%s', expected %d\n",
|
||||||
|
d->filter, d->match);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i == DATA_CNT)
|
||||||
|
printk(KERN_CONT "OK\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
late_initcall(ftrace_test_event_filter);
|
||||||
|
|
||||||
|
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
#undef TRACE_SYSTEM
|
||||||
|
#define TRACE_SYSTEM test
|
||||||
|
|
||||||
|
#if !defined(_TRACE_TEST_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||||
|
#define _TRACE_TEST_H
|
||||||
|
|
||||||
|
#include <linux/tracepoint.h>
|
||||||
|
|
||||||
|
TRACE_EVENT(ftrace_test_filter,
|
||||||
|
|
||||||
|
TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h),
|
||||||
|
|
||||||
|
TP_ARGS(a, b, c, d, e, f, g, h),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(int, a)
|
||||||
|
__field(int, b)
|
||||||
|
__field(int, c)
|
||||||
|
__field(int, d)
|
||||||
|
__field(int, e)
|
||||||
|
__field(int, f)
|
||||||
|
__field(int, g)
|
||||||
|
__field(int, h)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->a = a;
|
||||||
|
__entry->b = b;
|
||||||
|
__entry->c = c;
|
||||||
|
__entry->d = d;
|
||||||
|
__entry->e = e;
|
||||||
|
__entry->f = f;
|
||||||
|
__entry->g = g;
|
||||||
|
__entry->h = h;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("a %d, b %d, c %d, d %d, e %d, f %d, g %d, h %d",
|
||||||
|
__entry->a, __entry->b, __entry->c, __entry->d,
|
||||||
|
__entry->e, __entry->f, __entry->g, __entry->h)
|
||||||
|
);
|
||||||
|
|
||||||
|
#endif /* _TRACE_TEST_H || TRACE_HEADER_MULTI_READ */
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#undef TRACE_INCLUDE_FILE
|
||||||
|
#define TRACE_INCLUDE_PATH .
|
||||||
|
#define TRACE_INCLUDE_FILE trace_events_filter_test
|
||||||
|
|
||||||
|
/* This part must be outside protection */
|
||||||
|
#include <trace/define_trace.h>
|
|
@ -505,13 +505,13 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
||||||
#ifdef CONFIG_PREEMPT_TRACER
|
#ifdef CONFIG_PREEMPT_TRACER
|
||||||
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
||||||
{
|
{
|
||||||
if (preempt_trace())
|
if (preempt_trace() && !irq_trace())
|
||||||
stop_critical_timing(a0, a1);
|
stop_critical_timing(a0, a1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
||||||
{
|
{
|
||||||
if (preempt_trace())
|
if (preempt_trace() && !irq_trace())
|
||||||
start_critical_timing(a0, a1);
|
start_critical_timing(a0, a1);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PREEMPT_TRACER */
|
#endif /* CONFIG_PREEMPT_TRACER */
|
||||||
|
|
|
@ -836,11 +836,17 @@ static void __unregister_trace_probe(struct trace_probe *tp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unregister a trace_probe and probe_event: call with locking probe_lock */
|
/* Unregister a trace_probe and probe_event: call with locking probe_lock */
|
||||||
static void unregister_trace_probe(struct trace_probe *tp)
|
static int unregister_trace_probe(struct trace_probe *tp)
|
||||||
{
|
{
|
||||||
|
/* Enabled event can not be unregistered */
|
||||||
|
if (trace_probe_is_enabled(tp))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
__unregister_trace_probe(tp);
|
__unregister_trace_probe(tp);
|
||||||
list_del(&tp->list);
|
list_del(&tp->list);
|
||||||
unregister_probe_event(tp);
|
unregister_probe_event(tp);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register a trace_probe and probe_event */
|
/* Register a trace_probe and probe_event */
|
||||||
|
@ -854,7 +860,9 @@ static int register_trace_probe(struct trace_probe *tp)
|
||||||
/* Delete old (same name) event if exist */
|
/* Delete old (same name) event if exist */
|
||||||
old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
|
old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
|
||||||
if (old_tp) {
|
if (old_tp) {
|
||||||
unregister_trace_probe(old_tp);
|
ret = unregister_trace_probe(old_tp);
|
||||||
|
if (ret < 0)
|
||||||
|
goto end;
|
||||||
free_trace_probe(old_tp);
|
free_trace_probe(old_tp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -892,6 +900,7 @@ static int trace_probe_module_callback(struct notifier_block *nb,
|
||||||
mutex_lock(&probe_lock);
|
mutex_lock(&probe_lock);
|
||||||
list_for_each_entry(tp, &probe_list, list) {
|
list_for_each_entry(tp, &probe_list, list) {
|
||||||
if (trace_probe_within_module(tp, mod)) {
|
if (trace_probe_within_module(tp, mod)) {
|
||||||
|
/* Don't need to check busy - this should have gone. */
|
||||||
__unregister_trace_probe(tp);
|
__unregister_trace_probe(tp);
|
||||||
ret = __register_trace_probe(tp);
|
ret = __register_trace_probe(tp);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1205,10 +1214,11 @@ static int create_trace_probe(int argc, char **argv)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
/* delete an event */
|
/* delete an event */
|
||||||
unregister_trace_probe(tp);
|
ret = unregister_trace_probe(tp);
|
||||||
free_trace_probe(tp);
|
if (ret == 0)
|
||||||
|
free_trace_probe(tp);
|
||||||
mutex_unlock(&probe_lock);
|
mutex_unlock(&probe_lock);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc < 2) {
|
if (argc < 2) {
|
||||||
|
@ -1317,18 +1327,29 @@ error:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_all_trace_probes(void)
|
static int release_all_trace_probes(void)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp;
|
struct trace_probe *tp;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&probe_lock);
|
mutex_lock(&probe_lock);
|
||||||
|
/* Ensure no probe is in use. */
|
||||||
|
list_for_each_entry(tp, &probe_list, list)
|
||||||
|
if (trace_probe_is_enabled(tp)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
/* TODO: Use batch unregistration */
|
/* TODO: Use batch unregistration */
|
||||||
while (!list_empty(&probe_list)) {
|
while (!list_empty(&probe_list)) {
|
||||||
tp = list_entry(probe_list.next, struct trace_probe, list);
|
tp = list_entry(probe_list.next, struct trace_probe, list);
|
||||||
unregister_trace_probe(tp);
|
unregister_trace_probe(tp);
|
||||||
free_trace_probe(tp);
|
free_trace_probe(tp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
end:
|
||||||
mutex_unlock(&probe_lock);
|
mutex_unlock(&probe_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Probes listing interfaces */
|
/* Probes listing interfaces */
|
||||||
|
@ -1380,9 +1401,13 @@ static const struct seq_operations probes_seq_op = {
|
||||||
|
|
||||||
static int probes_open(struct inode *inode, struct file *file)
|
static int probes_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
if ((file->f_mode & FMODE_WRITE) &&
|
int ret;
|
||||||
(file->f_flags & O_TRUNC))
|
|
||||||
release_all_trace_probes();
|
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
|
||||||
|
ret = release_all_trace_probes();
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return seq_open(file, &probes_seq_op);
|
return seq_open(file, &probes_seq_op);
|
||||||
}
|
}
|
||||||
|
@ -2055,6 +2080,21 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||||
|
|
||||||
ret = target(1, 2, 3, 4, 5, 6);
|
ret = target(1, 2, 3, 4, 5, 6);
|
||||||
|
|
||||||
|
/* Disable trace points before removing it */
|
||||||
|
tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
|
||||||
|
if (WARN_ON_ONCE(tp == NULL)) {
|
||||||
|
pr_warning("error on getting test probe.\n");
|
||||||
|
warn++;
|
||||||
|
} else
|
||||||
|
disable_trace_probe(tp, TP_FLAG_TRACE);
|
||||||
|
|
||||||
|
tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||||
|
if (WARN_ON_ONCE(tp == NULL)) {
|
||||||
|
pr_warning("error on getting 2nd test probe.\n");
|
||||||
|
warn++;
|
||||||
|
} else
|
||||||
|
disable_trace_probe(tp, TP_FLAG_TRACE);
|
||||||
|
|
||||||
ret = command_trace_probe("-:testprobe");
|
ret = command_trace_probe("-:testprobe");
|
||||||
if (WARN_ON_ONCE(ret)) {
|
if (WARN_ON_ONCE(ret)) {
|
||||||
pr_warning("error on deleting a probe.\n");
|
pr_warning("error on deleting a probe.\n");
|
||||||
|
|
|
@ -59,18 +59,19 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmt = NULL;
|
||||||
tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL);
|
tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL);
|
||||||
if (tb_fmt)
|
if (tb_fmt) {
|
||||||
fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
|
fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
|
||||||
if (tb_fmt && fmt) {
|
if (fmt) {
|
||||||
list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
|
list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
|
||||||
strcpy(fmt, *iter);
|
strcpy(fmt, *iter);
|
||||||
tb_fmt->fmt = fmt;
|
tb_fmt->fmt = fmt;
|
||||||
*iter = tb_fmt->fmt;
|
} else
|
||||||
} else {
|
kfree(tb_fmt);
|
||||||
kfree(tb_fmt);
|
|
||||||
*iter = NULL;
|
|
||||||
}
|
}
|
||||||
|
*iter = fmt;
|
||||||
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&btrace_mutex);
|
mutex_unlock(&btrace_mutex);
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,11 +34,16 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[];
|
||||||
static const int tracepoint_debug;
|
static const int tracepoint_debug;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
|
* Tracepoints mutex protects the builtin and module tracepoints and the hash
|
||||||
* builtin and module tracepoints and the hash table.
|
* table, as well as the local module list.
|
||||||
*/
|
*/
|
||||||
static DEFINE_MUTEX(tracepoints_mutex);
|
static DEFINE_MUTEX(tracepoints_mutex);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MODULES
|
||||||
|
/* Local list of struct module */
|
||||||
|
static LIST_HEAD(tracepoint_module_list);
|
||||||
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tracepoint hash table, containing the active tracepoints.
|
* Tracepoint hash table, containing the active tracepoints.
|
||||||
* Protected by tracepoints_mutex.
|
* Protected by tracepoints_mutex.
|
||||||
|
@ -292,9 +297,10 @@ static void disable_tracepoint(struct tracepoint *elem)
|
||||||
* @end: end of the range
|
* @end: end of the range
|
||||||
*
|
*
|
||||||
* Updates the probe callback corresponding to a range of tracepoints.
|
* Updates the probe callback corresponding to a range of tracepoints.
|
||||||
|
* Called with tracepoints_mutex held.
|
||||||
*/
|
*/
|
||||||
void tracepoint_update_probe_range(struct tracepoint * const *begin,
|
static void tracepoint_update_probe_range(struct tracepoint * const *begin,
|
||||||
struct tracepoint * const *end)
|
struct tracepoint * const *end)
|
||||||
{
|
{
|
||||||
struct tracepoint * const *iter;
|
struct tracepoint * const *iter;
|
||||||
struct tracepoint_entry *mark_entry;
|
struct tracepoint_entry *mark_entry;
|
||||||
|
@ -302,7 +308,6 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
|
||||||
if (!begin)
|
if (!begin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&tracepoints_mutex);
|
|
||||||
for (iter = begin; iter < end; iter++) {
|
for (iter = begin; iter < end; iter++) {
|
||||||
mark_entry = get_tracepoint((*iter)->name);
|
mark_entry = get_tracepoint((*iter)->name);
|
||||||
if (mark_entry) {
|
if (mark_entry) {
|
||||||
|
@ -312,11 +317,27 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
|
||||||
disable_tracepoint(*iter);
|
disable_tracepoint(*iter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&tracepoints_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MODULES
|
||||||
|
void module_update_tracepoints(void)
|
||||||
|
{
|
||||||
|
struct tp_module *tp_mod;
|
||||||
|
|
||||||
|
list_for_each_entry(tp_mod, &tracepoint_module_list, list)
|
||||||
|
tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
|
||||||
|
tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
|
||||||
|
}
|
||||||
|
#else /* CONFIG_MODULES */
|
||||||
|
void module_update_tracepoints(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update probes, removing the faulty probes.
|
* Update probes, removing the faulty probes.
|
||||||
|
* Called with tracepoints_mutex held.
|
||||||
*/
|
*/
|
||||||
static void tracepoint_update_probes(void)
|
static void tracepoint_update_probes(void)
|
||||||
{
|
{
|
||||||
|
@ -359,11 +380,12 @@ int tracepoint_probe_register(const char *name, void *probe, void *data)
|
||||||
|
|
||||||
mutex_lock(&tracepoints_mutex);
|
mutex_lock(&tracepoints_mutex);
|
||||||
old = tracepoint_add_probe(name, probe, data);
|
old = tracepoint_add_probe(name, probe, data);
|
||||||
mutex_unlock(&tracepoints_mutex);
|
if (IS_ERR(old)) {
|
||||||
if (IS_ERR(old))
|
mutex_unlock(&tracepoints_mutex);
|
||||||
return PTR_ERR(old);
|
return PTR_ERR(old);
|
||||||
|
}
|
||||||
tracepoint_update_probes(); /* may update entry */
|
tracepoint_update_probes(); /* may update entry */
|
||||||
|
mutex_unlock(&tracepoints_mutex);
|
||||||
release_probes(old);
|
release_probes(old);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -402,11 +424,12 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data)
|
||||||
|
|
||||||
mutex_lock(&tracepoints_mutex);
|
mutex_lock(&tracepoints_mutex);
|
||||||
old = tracepoint_remove_probe(name, probe, data);
|
old = tracepoint_remove_probe(name, probe, data);
|
||||||
mutex_unlock(&tracepoints_mutex);
|
if (IS_ERR(old)) {
|
||||||
if (IS_ERR(old))
|
mutex_unlock(&tracepoints_mutex);
|
||||||
return PTR_ERR(old);
|
return PTR_ERR(old);
|
||||||
|
}
|
||||||
tracepoint_update_probes(); /* may update entry */
|
tracepoint_update_probes(); /* may update entry */
|
||||||
|
mutex_unlock(&tracepoints_mutex);
|
||||||
release_probes(old);
|
release_probes(old);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -489,9 +512,8 @@ void tracepoint_probe_update_all(void)
|
||||||
if (!list_empty(&old_probes))
|
if (!list_empty(&old_probes))
|
||||||
list_replace_init(&old_probes, &release_probes);
|
list_replace_init(&old_probes, &release_probes);
|
||||||
need_update = 0;
|
need_update = 0;
|
||||||
mutex_unlock(&tracepoints_mutex);
|
|
||||||
|
|
||||||
tracepoint_update_probes();
|
tracepoint_update_probes();
|
||||||
|
mutex_unlock(&tracepoints_mutex);
|
||||||
list_for_each_entry_safe(pos, next, &release_probes, u.list) {
|
list_for_each_entry_safe(pos, next, &release_probes, u.list) {
|
||||||
list_del(&pos->u.list);
|
list_del(&pos->u.list);
|
||||||
call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
|
call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
|
||||||
|
@ -509,7 +531,7 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
|
||||||
* Will return the first tracepoint in the range if the input tracepoint is
|
* Will return the first tracepoint in the range if the input tracepoint is
|
||||||
* NULL.
|
* NULL.
|
||||||
*/
|
*/
|
||||||
int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
|
static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
|
||||||
struct tracepoint * const *begin, struct tracepoint * const *end)
|
struct tracepoint * const *begin, struct tracepoint * const *end)
|
||||||
{
|
{
|
||||||
if (!*tracepoint && begin != end) {
|
if (!*tracepoint && begin != end) {
|
||||||
|
@ -520,11 +542,12 @@ int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_MODULES
|
||||||
static void tracepoint_get_iter(struct tracepoint_iter *iter)
|
static void tracepoint_get_iter(struct tracepoint_iter *iter)
|
||||||
{
|
{
|
||||||
int found = 0;
|
int found = 0;
|
||||||
|
struct tp_module *iter_mod;
|
||||||
|
|
||||||
/* Core kernel tracepoints */
|
/* Core kernel tracepoints */
|
||||||
if (!iter->module) {
|
if (!iter->module) {
|
||||||
|
@ -534,12 +557,43 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter)
|
||||||
if (found)
|
if (found)
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
/* tracepoints in modules. */
|
/* Tracepoints in modules */
|
||||||
found = module_get_iter_tracepoints(iter);
|
mutex_lock(&tracepoints_mutex);
|
||||||
|
list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
|
||||||
|
/*
|
||||||
|
* Sorted module list
|
||||||
|
*/
|
||||||
|
if (iter_mod < iter->module)
|
||||||
|
continue;
|
||||||
|
else if (iter_mod > iter->module)
|
||||||
|
iter->tracepoint = NULL;
|
||||||
|
found = tracepoint_get_iter_range(&iter->tracepoint,
|
||||||
|
iter_mod->tracepoints_ptrs,
|
||||||
|
iter_mod->tracepoints_ptrs
|
||||||
|
+ iter_mod->num_tracepoints);
|
||||||
|
if (found) {
|
||||||
|
iter->module = iter_mod;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&tracepoints_mutex);
|
||||||
end:
|
end:
|
||||||
if (!found)
|
if (!found)
|
||||||
tracepoint_iter_reset(iter);
|
tracepoint_iter_reset(iter);
|
||||||
}
|
}
|
||||||
|
#else /* CONFIG_MODULES */
|
||||||
|
static void tracepoint_get_iter(struct tracepoint_iter *iter)
|
||||||
|
{
|
||||||
|
int found = 0;
|
||||||
|
|
||||||
|
/* Core kernel tracepoints */
|
||||||
|
found = tracepoint_get_iter_range(&iter->tracepoint,
|
||||||
|
__start___tracepoints_ptrs,
|
||||||
|
__stop___tracepoints_ptrs);
|
||||||
|
if (!found)
|
||||||
|
tracepoint_iter_reset(iter);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
void tracepoint_iter_start(struct tracepoint_iter *iter)
|
void tracepoint_iter_start(struct tracepoint_iter *iter)
|
||||||
{
|
{
|
||||||
|
@ -566,26 +620,98 @@ EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
|
||||||
|
|
||||||
void tracepoint_iter_reset(struct tracepoint_iter *iter)
|
void tracepoint_iter_reset(struct tracepoint_iter *iter)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_MODULES
|
||||||
iter->module = NULL;
|
iter->module = NULL;
|
||||||
|
#endif /* CONFIG_MODULES */
|
||||||
iter->tracepoint = NULL;
|
iter->tracepoint = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
|
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
|
static int tracepoint_module_coming(struct module *mod)
|
||||||
|
{
|
||||||
|
struct tp_module *tp_mod, *iter;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We skip modules that tain the kernel, especially those with different
|
||||||
|
* module header (for forced load), to make sure we don't cause a crash.
|
||||||
|
*/
|
||||||
|
if (mod->taints)
|
||||||
|
return 0;
|
||||||
|
mutex_lock(&tracepoints_mutex);
|
||||||
|
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
|
||||||
|
if (!tp_mod) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
tp_mod->num_tracepoints = mod->num_tracepoints;
|
||||||
|
tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* tracepoint_module_list is kept sorted by struct module pointer
|
||||||
|
* address for iteration on tracepoints from a seq_file that can release
|
||||||
|
* the mutex between calls.
|
||||||
|
*/
|
||||||
|
list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
|
||||||
|
BUG_ON(iter == tp_mod); /* Should never be in the list twice */
|
||||||
|
if (iter < tp_mod) {
|
||||||
|
/* We belong to the location right after iter. */
|
||||||
|
list_add(&tp_mod->list, &iter->list);
|
||||||
|
goto module_added;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* We belong to the beginning of the list */
|
||||||
|
list_add(&tp_mod->list, &tracepoint_module_list);
|
||||||
|
module_added:
|
||||||
|
tracepoint_update_probe_range(mod->tracepoints_ptrs,
|
||||||
|
mod->tracepoints_ptrs + mod->num_tracepoints);
|
||||||
|
end:
|
||||||
|
mutex_unlock(&tracepoints_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tracepoint_module_going(struct module *mod)
|
||||||
|
{
|
||||||
|
struct tp_module *pos;
|
||||||
|
|
||||||
|
mutex_lock(&tracepoints_mutex);
|
||||||
|
tracepoint_update_probe_range(mod->tracepoints_ptrs,
|
||||||
|
mod->tracepoints_ptrs + mod->num_tracepoints);
|
||||||
|
list_for_each_entry(pos, &tracepoint_module_list, list) {
|
||||||
|
if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
|
||||||
|
list_del(&pos->list);
|
||||||
|
kfree(pos);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* In the case of modules that were tainted at "coming", we'll simply
|
||||||
|
* walk through the list without finding it. We cannot use the "tainted"
|
||||||
|
* flag on "going", in case a module taints the kernel only after being
|
||||||
|
* loaded.
|
||||||
|
*/
|
||||||
|
mutex_unlock(&tracepoints_mutex);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int tracepoint_module_notify(struct notifier_block *self,
|
int tracepoint_module_notify(struct notifier_block *self,
|
||||||
unsigned long val, void *data)
|
unsigned long val, void *data)
|
||||||
{
|
{
|
||||||
struct module *mod = data;
|
struct module *mod = data;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
switch (val) {
|
switch (val) {
|
||||||
case MODULE_STATE_COMING:
|
case MODULE_STATE_COMING:
|
||||||
|
ret = tracepoint_module_coming(mod);
|
||||||
|
break;
|
||||||
|
case MODULE_STATE_LIVE:
|
||||||
|
break;
|
||||||
case MODULE_STATE_GOING:
|
case MODULE_STATE_GOING:
|
||||||
tracepoint_update_probe_range(mod->tracepoints_ptrs,
|
ret = tracepoint_module_going(mod);
|
||||||
mod->tracepoints_ptrs + mod->num_tracepoints);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct notifier_block tracepoint_module_nb = {
|
struct notifier_block tracepoint_module_nb = {
|
||||||
|
@ -598,7 +724,6 @@ static int init_tracepoints(void)
|
||||||
return register_module_notifier(&tracepoint_module_nb);
|
return register_module_notifier(&tracepoint_module_nb);
|
||||||
}
|
}
|
||||||
__initcall(init_tracepoints);
|
__initcall(init_tracepoints);
|
||||||
|
|
||||||
#endif /* CONFIG_MODULES */
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
||||||
|
|
|
@ -321,7 +321,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||||
*/
|
*/
|
||||||
static int watchdog(void *unused)
|
static int watchdog(void *unused)
|
||||||
{
|
{
|
||||||
static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
||||||
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
||||||
|
|
||||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||||
|
@ -350,7 +350,8 @@ static int watchdog(void *unused)
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
|
param.sched_priority = 0;
|
||||||
|
sched_setscheduler(current, SCHED_NORMAL, ¶m);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -438,7 +439,7 @@ static int watchdog_enable(int cpu)
|
||||||
|
|
||||||
/* create the watchdog thread */
|
/* create the watchdog thread */
|
||||||
if (!p) {
|
if (!p) {
|
||||||
p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
|
p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
|
||||||
if (IS_ERR(p)) {
|
if (IS_ERR(p)) {
|
||||||
printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
|
printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
|
|
|
@ -72,6 +72,19 @@ OPTIONS
|
||||||
CPUs are specified with -: 0-2. Default is to report samples on all
|
CPUs are specified with -: 0-2. Default is to report samples on all
|
||||||
CPUs.
|
CPUs.
|
||||||
|
|
||||||
|
--asm-raw::
|
||||||
|
Show raw instruction encoding of assembly instructions.
|
||||||
|
|
||||||
|
--source::
|
||||||
|
Interleave source code with assembly code. Enabled by default,
|
||||||
|
disable with --no-source.
|
||||||
|
|
||||||
|
--symfs=<directory>::
|
||||||
|
Look for files with symbols relative to this directory.
|
||||||
|
|
||||||
|
-M::
|
||||||
|
--disassembler-style=:: Set disassembler style for objdump.
|
||||||
|
|
||||||
SEE ALSO
|
SEE ALSO
|
||||||
--------
|
--------
|
||||||
linkperf:perf-record[1], linkperf:perf-report[1]
|
linkperf:perf-record[1], linkperf:perf-report[1]
|
||||||
|
|
|
@ -16,6 +16,9 @@ This command displays the buildids found in a perf.data file, so that other
|
||||||
tools can be used to fetch packages with matching symbol tables for use by
|
tools can be used to fetch packages with matching symbol tables for use by
|
||||||
perf report.
|
perf report.
|
||||||
|
|
||||||
|
It can also be used to show the build id of the running kernel or in an ELF
|
||||||
|
file using -i/--input.
|
||||||
|
|
||||||
OPTIONS
|
OPTIONS
|
||||||
-------
|
-------
|
||||||
-H::
|
-H::
|
||||||
|
@ -27,6 +30,9 @@ OPTIONS
|
||||||
-f::
|
-f::
|
||||||
--force::
|
--force::
|
||||||
Don't do ownership validation.
|
Don't do ownership validation.
|
||||||
|
-k::
|
||||||
|
--kernel::
|
||||||
|
Show running kernel build id.
|
||||||
-v::
|
-v::
|
||||||
--verbose::
|
--verbose::
|
||||||
Be more verbose.
|
Be more verbose.
|
||||||
|
|
|
@ -134,6 +134,24 @@ OPTIONS
|
||||||
CPUs are specified with -: 0-2. Default is to report samples on all
|
CPUs are specified with -: 0-2. Default is to report samples on all
|
||||||
CPUs.
|
CPUs.
|
||||||
|
|
||||||
|
-M::
|
||||||
|
--disassembler-style=:: Set disassembler style for objdump.
|
||||||
|
|
||||||
|
--source::
|
||||||
|
Interleave source code with assembly code. Enabled by default,
|
||||||
|
disable with --no-source.
|
||||||
|
|
||||||
|
--asm-raw::
|
||||||
|
Show raw instruction encoding of assembly instructions.
|
||||||
|
|
||||||
|
--show-total-period:: Show a column with the sum of periods.
|
||||||
|
|
||||||
|
-I::
|
||||||
|
--show-info::
|
||||||
|
Display extended information about the perf.data file. This adds
|
||||||
|
information which may be very large and thus may clutter the display.
|
||||||
|
It currently includes: cpu and numa topology of the host system.
|
||||||
|
|
||||||
SEE ALSO
|
SEE ALSO
|
||||||
--------
|
--------
|
||||||
linkperf:perf-stat[1]
|
linkperf:perf-stat[1], linkperf:perf-annotate[1]
|
||||||
|
|
|
@ -8,7 +8,7 @@ perf-sched - Tool to trace/measure scheduler properties (latencies)
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
--------
|
--------
|
||||||
[verse]
|
[verse]
|
||||||
'perf sched' {record|latency|map|replay|trace}
|
'perf sched' {record|latency|map|replay|script}
|
||||||
|
|
||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
-----------
|
-----------
|
||||||
|
@ -20,8 +20,8 @@ There are five variants of perf sched:
|
||||||
'perf sched latency' to report the per task scheduling latencies
|
'perf sched latency' to report the per task scheduling latencies
|
||||||
and other scheduling properties of the workload.
|
and other scheduling properties of the workload.
|
||||||
|
|
||||||
'perf sched trace' to see a detailed trace of the workload that
|
'perf sched script' to see a detailed trace of the workload that
|
||||||
was recorded.
|
was recorded (aliased to 'perf script' for now).
|
||||||
|
|
||||||
'perf sched replay' to simulate the workload that was recorded
|
'perf sched replay' to simulate the workload that was recorded
|
||||||
via perf sched record. (this is done by starting up mockup threads
|
via perf sched record. (this is done by starting up mockup threads
|
||||||
|
|
|
@ -188,6 +188,13 @@ OPTIONS
|
||||||
CPUs are specified with -: 0-2. Default is to report samples on all
|
CPUs are specified with -: 0-2. Default is to report samples on all
|
||||||
CPUs.
|
CPUs.
|
||||||
|
|
||||||
|
-I::
|
||||||
|
--show-info::
|
||||||
|
Display extended information about the perf.data file. This adds
|
||||||
|
information which may be very large and thus may clutter the display.
|
||||||
|
It currently includes: cpu and numa topology of the host system.
|
||||||
|
It can only be used with the perf script report mode.
|
||||||
|
|
||||||
SEE ALSO
|
SEE ALSO
|
||||||
--------
|
--------
|
||||||
linkperf:perf-record[1], linkperf:perf-script-perl[1],
|
linkperf:perf-record[1], linkperf:perf-script-perl[1],
|
||||||
|
|
|
@ -94,6 +94,22 @@ an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must ha
|
||||||
corresponding events, i.e., they always refer to events defined earlier on the command
|
corresponding events, i.e., they always refer to events defined earlier on the command
|
||||||
line.
|
line.
|
||||||
|
|
||||||
|
-o file::
|
||||||
|
--output file::
|
||||||
|
Print the output into the designated file.
|
||||||
|
|
||||||
|
--append::
|
||||||
|
Append to the output file designated with the -o option. Ignored if -o is not specified.
|
||||||
|
|
||||||
|
--log-fd::
|
||||||
|
|
||||||
|
Log output to fd, instead of stderr. Complementary to --output, and mutually exclusive
|
||||||
|
with it. --append may be used here. Examples:
|
||||||
|
3>results perf stat --log-fd 3 -- $cmd
|
||||||
|
3>>results perf stat --log-fd 3 --append -- $cmd
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
EXAMPLES
|
EXAMPLES
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
|
|
@ -106,6 +106,51 @@ Default is to monitor all CPUS.
|
||||||
--zero::
|
--zero::
|
||||||
Zero history across display updates.
|
Zero history across display updates.
|
||||||
|
|
||||||
|
-s::
|
||||||
|
--sort::
|
||||||
|
Sort by key(s): pid, comm, dso, symbol, parent
|
||||||
|
|
||||||
|
-n::
|
||||||
|
--show-nr-samples::
|
||||||
|
Show a column with the number of samples.
|
||||||
|
|
||||||
|
--show-total-period::
|
||||||
|
Show a column with the sum of periods.
|
||||||
|
|
||||||
|
--dsos::
|
||||||
|
Only consider symbols in these dsos.
|
||||||
|
|
||||||
|
--comms::
|
||||||
|
Only consider symbols in these comms.
|
||||||
|
|
||||||
|
--symbols::
|
||||||
|
Only consider these symbols.
|
||||||
|
|
||||||
|
-M::
|
||||||
|
--disassembler-style=:: Set disassembler style for objdump.
|
||||||
|
|
||||||
|
--source::
|
||||||
|
Interleave source code with assembly code. Enabled by default,
|
||||||
|
disable with --no-source.
|
||||||
|
|
||||||
|
--asm-raw::
|
||||||
|
Show raw instruction encoding of assembly instructions.
|
||||||
|
|
||||||
|
-G [type,min,order]::
|
||||||
|
--call-graph::
|
||||||
|
Display call chains using type, min percent threshold and order.
|
||||||
|
type can be either:
|
||||||
|
- flat: single column, linear exposure of call chains.
|
||||||
|
- graph: use a graph tree, displaying absolute overhead rates.
|
||||||
|
- fractal: like graph, but displays relative rates. Each branch of
|
||||||
|
the tree is considered as a new profiled object.
|
||||||
|
|
||||||
|
order can be either:
|
||||||
|
- callee: callee based call graph.
|
||||||
|
- caller: inverted caller based call graph.
|
||||||
|
|
||||||
|
Default: fractal,0.5,callee.
|
||||||
|
|
||||||
INTERACTIVE PROMPTING KEYS
|
INTERACTIVE PROMPTING KEYS
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
|
@ -130,9 +175,6 @@ INTERACTIVE PROMPTING KEYS
|
||||||
[S]::
|
[S]::
|
||||||
Stop annotation, return to full profile display.
|
Stop annotation, return to full profile display.
|
||||||
|
|
||||||
[w]::
|
|
||||||
Toggle between weighted sum and individual count[E]r profile.
|
|
||||||
|
|
||||||
[z]::
|
[z]::
|
||||||
Toggle event count zeroing across display updates.
|
Toggle event count zeroing across display updates.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
[colors]
|
||||||
|
|
||||||
|
# These were the old defaults
|
||||||
|
top = red, lightgray
|
||||||
|
medium = green, lightgray
|
||||||
|
normal = black, lightgray
|
||||||
|
selected = lightgray, magenta
|
||||||
|
code = blue, lightgray
|
||||||
|
|
||||||
|
[tui]
|
||||||
|
|
||||||
|
# Defaults if linked with libslang
|
||||||
|
report = on
|
||||||
|
annotate = on
|
||||||
|
top = on
|
||||||
|
|
||||||
|
[buildid]
|
||||||
|
|
||||||
|
# Default, disable using /dev/null
|
||||||
|
dir = /root/.debug
|
|
@ -466,13 +466,13 @@ else
|
||||||
LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o
|
LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o
|
||||||
LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o
|
LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o
|
||||||
LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o
|
LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o
|
||||||
LIB_OBJS += $(OUTPUT)util/ui/browsers/top.o
|
|
||||||
LIB_OBJS += $(OUTPUT)util/ui/helpline.o
|
LIB_OBJS += $(OUTPUT)util/ui/helpline.o
|
||||||
LIB_OBJS += $(OUTPUT)util/ui/progress.o
|
LIB_OBJS += $(OUTPUT)util/ui/progress.o
|
||||||
LIB_OBJS += $(OUTPUT)util/ui/util.o
|
LIB_OBJS += $(OUTPUT)util/ui/util.o
|
||||||
LIB_H += util/ui/browser.h
|
LIB_H += util/ui/browser.h
|
||||||
LIB_H += util/ui/browsers/map.h
|
LIB_H += util/ui/browsers/map.h
|
||||||
LIB_H += util/ui/helpline.h
|
LIB_H += util/ui/helpline.h
|
||||||
|
LIB_H += util/ui/keysyms.h
|
||||||
LIB_H += util/ui/libslang.h
|
LIB_H += util/ui/libslang.h
|
||||||
LIB_H += util/ui/progress.h
|
LIB_H += util/ui/progress.h
|
||||||
LIB_H += util/ui/util.h
|
LIB_H += util/ui/util.h
|
||||||
|
@ -729,9 +729,6 @@ $(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
|
$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
|
||||||
|
|
||||||
$(OUTPUT)util/ui/browsers/top.o: util/ui/browsers/top.c $(OUTPUT)PERF-CFLAGS
|
|
||||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
|
|
||||||
|
|
||||||
$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
|
$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
|
||||||
|
|
||||||
|
|
|
@ -2,3 +2,4 @@ ifndef NO_DWARF
|
||||||
PERF_HAVE_DWARF_REGS := 1
|
PERF_HAVE_DWARF_REGS := 1
|
||||||
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
|
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
|
||||||
endif
|
endif
|
||||||
|
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include "../../util/header.h"
|
||||||
|
|
||||||
|
#define __stringify_1(x) #x
|
||||||
|
#define __stringify(x) __stringify_1(x)
|
||||||
|
|
||||||
|
#define mfspr(rn) ({unsigned long rval; \
|
||||||
|
asm volatile("mfspr %0," __stringify(rn) \
|
||||||
|
: "=r" (rval)); rval; })
|
||||||
|
|
||||||
|
#define SPRN_PVR 0x11F /* Processor Version Register */
|
||||||
|
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
|
||||||
|
#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
|
||||||
|
|
||||||
|
int
|
||||||
|
get_cpuid(char *buffer, size_t sz)
|
||||||
|
{
|
||||||
|
unsigned long pvr;
|
||||||
|
int nb;
|
||||||
|
|
||||||
|
pvr = mfspr(SPRN_PVR);
|
||||||
|
|
||||||
|
nb = snprintf(buffer, sz, "%lu,%lu$", PVR_VER(pvr), PVR_REV(pvr));
|
||||||
|
|
||||||
|
/* look for end marker to ensure the entire data fit */
|
||||||
|
if (strchr(buffer, '$')) {
|
||||||
|
buffer[nb-1] = '\0';
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
|
@ -2,3 +2,4 @@ ifndef NO_DWARF
|
||||||
PERF_HAVE_DWARF_REGS := 1
|
PERF_HAVE_DWARF_REGS := 1
|
||||||
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
|
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
|
||||||
endif
|
endif
|
||||||
|
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include "../../util/header.h"
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c,
|
||||||
|
unsigned int *d)
|
||||||
|
{
|
||||||
|
__asm__ __volatile__ (".byte 0x53\n\tcpuid\n\t"
|
||||||
|
"movl %%ebx, %%esi\n\t.byte 0x5b"
|
||||||
|
: "=a" (*a),
|
||||||
|
"=S" (*b),
|
||||||
|
"=c" (*c),
|
||||||
|
"=d" (*d)
|
||||||
|
: "a" (op));
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
get_cpuid(char *buffer, size_t sz)
|
||||||
|
{
|
||||||
|
unsigned int a, b, c, d, lvl;
|
||||||
|
int family = -1, model = -1, step = -1;
|
||||||
|
int nb;
|
||||||
|
char vendor[16];
|
||||||
|
|
||||||
|
cpuid(0, &lvl, &b, &c, &d);
|
||||||
|
strncpy(&vendor[0], (char *)(&b), 4);
|
||||||
|
strncpy(&vendor[4], (char *)(&d), 4);
|
||||||
|
strncpy(&vendor[8], (char *)(&c), 4);
|
||||||
|
vendor[12] = '\0';
|
||||||
|
|
||||||
|
if (lvl >= 1) {
|
||||||
|
cpuid(1, &a, &b, &c, &d);
|
||||||
|
|
||||||
|
family = (a >> 8) & 0xf; /* bits 11 - 8 */
|
||||||
|
model = (a >> 4) & 0xf; /* Bits 7 - 4 */
|
||||||
|
step = a & 0xf;
|
||||||
|
|
||||||
|
/* extended family */
|
||||||
|
if (family == 0xf)
|
||||||
|
family += (a >> 20) & 0xff;
|
||||||
|
|
||||||
|
/* extended model */
|
||||||
|
if (family >= 0x6)
|
||||||
|
model += ((a >> 16) & 0xf) << 4;
|
||||||
|
}
|
||||||
|
nb = snprintf(buffer, sz, "%s,%u,%u,%u$", vendor, family, model, step);
|
||||||
|
|
||||||
|
/* look for end marker to ensure the entire data fit */
|
||||||
|
if (strchr(buffer, '$')) {
|
||||||
|
buffer[nb-1] = '\0';
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
|
@ -114,10 +114,11 @@ static int hist_entry__tty_annotate(struct hist_entry *he, int evidx)
|
||||||
print_line, full_paths, 0, 0);
|
print_line, full_paths, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hists__find_annotations(struct hists *self, int evidx)
|
static void hists__find_annotations(struct hists *self, int evidx,
|
||||||
|
int nr_events)
|
||||||
{
|
{
|
||||||
struct rb_node *nd = rb_first(&self->entries), *next;
|
struct rb_node *nd = rb_first(&self->entries), *next;
|
||||||
int key = KEY_RIGHT;
|
int key = K_RIGHT;
|
||||||
|
|
||||||
while (nd) {
|
while (nd) {
|
||||||
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
|
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
|
||||||
|
@ -129,7 +130,7 @@ static void hists__find_annotations(struct hists *self, int evidx)
|
||||||
notes = symbol__annotation(he->ms.sym);
|
notes = symbol__annotation(he->ms.sym);
|
||||||
if (notes->src == NULL) {
|
if (notes->src == NULL) {
|
||||||
find_next:
|
find_next:
|
||||||
if (key == KEY_LEFT)
|
if (key == K_LEFT)
|
||||||
nd = rb_prev(nd);
|
nd = rb_prev(nd);
|
||||||
else
|
else
|
||||||
nd = rb_next(nd);
|
nd = rb_next(nd);
|
||||||
|
@ -137,12 +138,13 @@ find_next:
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_browser > 0) {
|
if (use_browser > 0) {
|
||||||
key = hist_entry__tui_annotate(he, evidx);
|
key = hist_entry__tui_annotate(he, evidx, nr_events,
|
||||||
|
NULL, NULL, 0);
|
||||||
switch (key) {
|
switch (key) {
|
||||||
case KEY_RIGHT:
|
case K_RIGHT:
|
||||||
next = rb_next(nd);
|
next = rb_next(nd);
|
||||||
break;
|
break;
|
||||||
case KEY_LEFT:
|
case K_LEFT:
|
||||||
next = rb_prev(nd);
|
next = rb_prev(nd);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -215,7 +217,8 @@ static int __cmd_annotate(void)
|
||||||
total_nr_samples += nr_samples;
|
total_nr_samples += nr_samples;
|
||||||
hists__collapse_resort(hists);
|
hists__collapse_resort(hists);
|
||||||
hists__output_resort(hists);
|
hists__output_resort(hists);
|
||||||
hists__find_annotations(hists, pos->idx);
|
hists__find_annotations(hists, pos->idx,
|
||||||
|
session->evlist->nr_entries);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,6 +270,14 @@ static const struct option options[] = {
|
||||||
OPT_BOOLEAN('P', "full-paths", &full_paths,
|
OPT_BOOLEAN('P', "full-paths", &full_paths,
|
||||||
"Don't shorten the displayed pathnames"),
|
"Don't shorten the displayed pathnames"),
|
||||||
OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
|
OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
|
||||||
|
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
||||||
|
"Look for files with symbols relative to this directory"),
|
||||||
|
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
||||||
|
"Interleave source code with assembly code (default)"),
|
||||||
|
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
||||||
|
"Display raw encoding of assembly instructions (default)"),
|
||||||
|
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
|
||||||
|
"Specify disassembler style (e.g. -M intel for intel syntax)"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
/*
|
/*
|
||||||
* builtin-buildid-list.c
|
* builtin-buildid-list.c
|
||||||
*
|
*
|
||||||
* Builtin buildid-list command: list buildids in perf.data
|
* Builtin buildid-list command: list buildids in perf.data, in the running
|
||||||
|
* kernel and in ELF files.
|
||||||
*
|
*
|
||||||
* Copyright (C) 2009, Red Hat Inc.
|
* Copyright (C) 2009, Red Hat Inc.
|
||||||
* Copyright (C) 2009, Arnaldo Carvalho de Melo <acme@redhat.com>
|
* Copyright (C) 2009, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||||
|
@ -15,8 +16,11 @@
|
||||||
#include "util/session.h"
|
#include "util/session.h"
|
||||||
#include "util/symbol.h"
|
#include "util/symbol.h"
|
||||||
|
|
||||||
|
#include <libelf.h>
|
||||||
|
|
||||||
static char const *input_name = "perf.data";
|
static char const *input_name = "perf.data";
|
||||||
static bool force;
|
static bool force;
|
||||||
|
static bool show_kernel;
|
||||||
static bool with_hits;
|
static bool with_hits;
|
||||||
|
|
||||||
static const char * const buildid_list_usage[] = {
|
static const char * const buildid_list_usage[] = {
|
||||||
|
@ -29,12 +33,13 @@ static const struct option options[] = {
|
||||||
OPT_STRING('i', "input", &input_name, "file",
|
OPT_STRING('i', "input", &input_name, "file",
|
||||||
"input file name"),
|
"input file name"),
|
||||||
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
||||||
|
OPT_BOOLEAN('k', "kernel", &show_kernel, "Show current kernel build id"),
|
||||||
OPT_INCR('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose"),
|
"be more verbose"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __cmd_buildid_list(void)
|
static int perf_session__list_build_ids(void)
|
||||||
{
|
{
|
||||||
struct perf_session *session;
|
struct perf_session *session;
|
||||||
|
|
||||||
|
@ -52,6 +57,49 @@ static int __cmd_buildid_list(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sysfs__fprintf_build_id(FILE *fp)
|
||||||
|
{
|
||||||
|
u8 kallsyms_build_id[BUILD_ID_SIZE];
|
||||||
|
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
|
||||||
|
|
||||||
|
if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
|
||||||
|
sizeof(kallsyms_build_id)) != 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
build_id__sprintf(kallsyms_build_id, sizeof(kallsyms_build_id),
|
||||||
|
sbuild_id);
|
||||||
|
fprintf(fp, "%s\n", sbuild_id);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int filename__fprintf_build_id(const char *name, FILE *fp)
|
||||||
|
{
|
||||||
|
u8 build_id[BUILD_ID_SIZE];
|
||||||
|
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
|
||||||
|
|
||||||
|
if (filename__read_build_id(name, build_id,
|
||||||
|
sizeof(build_id)) != sizeof(build_id))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
|
||||||
|
return fprintf(fp, "%s\n", sbuild_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cmd_buildid_list(void)
|
||||||
|
{
|
||||||
|
if (show_kernel)
|
||||||
|
return sysfs__fprintf_build_id(stdout);
|
||||||
|
|
||||||
|
elf_version(EV_CURRENT);
|
||||||
|
/*
|
||||||
|
* See if this is an ELF file first:
|
||||||
|
*/
|
||||||
|
if (filename__fprintf_build_id(input_name, stdout))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return perf_session__list_build_ids();
|
||||||
|
}
|
||||||
|
|
||||||
int cmd_buildid_list(int argc, const char **argv, const char *prefix __used)
|
int cmd_buildid_list(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
argc = parse_options(argc, argv, options, buildid_list_usage, 0);
|
argc = parse_options(argc, argv, options, buildid_list_usage, 0);
|
||||||
|
|
|
@ -162,7 +162,7 @@ static int __cmd_diff(void)
|
||||||
|
|
||||||
hists__match(&session[0]->hists, &session[1]->hists);
|
hists__match(&session[0]->hists, &session[1]->hists);
|
||||||
hists__fprintf(&session[1]->hists, &session[0]->hists,
|
hists__fprintf(&session[1]->hists, &session[0]->hists,
|
||||||
show_displacement, stdout);
|
show_displacement, true, 0, 0, stdout);
|
||||||
out_delete:
|
out_delete:
|
||||||
for (i = 0; i < 2; ++i)
|
for (i = 0; i < 2; ++i)
|
||||||
perf_session__delete(session[i]);
|
perf_session__delete(session[i]);
|
||||||
|
|
|
@ -73,6 +73,7 @@ static off_t post_processing_offset;
|
||||||
|
|
||||||
static struct perf_session *session;
|
static struct perf_session *session;
|
||||||
static const char *cpu_list;
|
static const char *cpu_list;
|
||||||
|
static const char *progname;
|
||||||
|
|
||||||
static void advance_output(size_t size)
|
static void advance_output(size_t size)
|
||||||
{
|
{
|
||||||
|
@ -137,17 +138,29 @@ static void mmap_read(struct perf_mmap *md)
|
||||||
|
|
||||||
static volatile int done = 0;
|
static volatile int done = 0;
|
||||||
static volatile int signr = -1;
|
static volatile int signr = -1;
|
||||||
|
static volatile int child_finished = 0;
|
||||||
|
|
||||||
static void sig_handler(int sig)
|
static void sig_handler(int sig)
|
||||||
{
|
{
|
||||||
|
if (sig == SIGCHLD)
|
||||||
|
child_finished = 1;
|
||||||
|
|
||||||
done = 1;
|
done = 1;
|
||||||
signr = sig;
|
signr = sig;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sig_atexit(void)
|
static void sig_atexit(void)
|
||||||
{
|
{
|
||||||
if (child_pid > 0)
|
int status;
|
||||||
kill(child_pid, SIGTERM);
|
|
||||||
|
if (child_pid > 0) {
|
||||||
|
if (!child_finished)
|
||||||
|
kill(child_pid, SIGTERM);
|
||||||
|
|
||||||
|
wait(&status);
|
||||||
|
if (WIFSIGNALED(status))
|
||||||
|
psignal(WTERMSIG(status), progname);
|
||||||
|
}
|
||||||
|
|
||||||
if (signr == -1 || signr == SIGUSR1)
|
if (signr == -1 || signr == SIGUSR1)
|
||||||
return;
|
return;
|
||||||
|
@ -446,6 +459,8 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
char buf;
|
char buf;
|
||||||
struct machine *machine;
|
struct machine *machine;
|
||||||
|
|
||||||
|
progname = argv[0];
|
||||||
|
|
||||||
page_size = sysconf(_SC_PAGE_SIZE);
|
page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
|
|
||||||
atexit(sig_atexit);
|
atexit(sig_atexit);
|
||||||
|
@ -514,6 +529,19 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
if (have_tracepoints(&evsel_list->entries))
|
if (have_tracepoints(&evsel_list->entries))
|
||||||
perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
|
perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
|
||||||
|
|
||||||
|
perf_header__set_feat(&session->header, HEADER_HOSTNAME);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_OSRELEASE);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_ARCH);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_CPUDESC);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_NRCPUS);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_EVENT_DESC);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_CMDLINE);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_VERSION);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_TOTAL_MEM);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
|
||||||
|
perf_header__set_feat(&session->header, HEADER_CPUID);
|
||||||
|
|
||||||
/* 512 kiB: default amount of unprivileged mlocked memory */
|
/* 512 kiB: default amount of unprivileged mlocked memory */
|
||||||
if (mmap_pages == UINT_MAX)
|
if (mmap_pages == UINT_MAX)
|
||||||
mmap_pages = (512 * 1024) / page_size;
|
mmap_pages = (512 * 1024) / page_size;
|
||||||
|
@ -785,6 +813,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
struct perf_evsel *pos;
|
struct perf_evsel *pos;
|
||||||
|
|
||||||
|
perf_header__set_cmdline(argc, argv);
|
||||||
|
|
||||||
evsel_list = perf_evlist__new(NULL, NULL);
|
evsel_list = perf_evlist__new(NULL, NULL);
|
||||||
if (evsel_list == NULL)
|
if (evsel_list == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -40,6 +40,7 @@ static char const *input_name = "perf.data";
|
||||||
static bool force, use_tui, use_stdio;
|
static bool force, use_tui, use_stdio;
|
||||||
static bool hide_unresolved;
|
static bool hide_unresolved;
|
||||||
static bool dont_use_callchains;
|
static bool dont_use_callchains;
|
||||||
|
static bool show_full_info;
|
||||||
|
|
||||||
static bool show_threads;
|
static bool show_threads;
|
||||||
static struct perf_read_values show_threads_values;
|
static struct perf_read_values show_threads_values;
|
||||||
|
@ -229,13 +230,10 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
|
||||||
|
|
||||||
list_for_each_entry(pos, &evlist->entries, node) {
|
list_for_each_entry(pos, &evlist->entries, node) {
|
||||||
struct hists *hists = &pos->hists;
|
struct hists *hists = &pos->hists;
|
||||||
const char *evname = NULL;
|
const char *evname = event_name(pos);
|
||||||
|
|
||||||
if (rb_first(&hists->entries) != rb_last(&hists->entries))
|
|
||||||
evname = event_name(pos);
|
|
||||||
|
|
||||||
hists__fprintf_nr_sample_events(hists, evname, stdout);
|
hists__fprintf_nr_sample_events(hists, evname, stdout);
|
||||||
hists__fprintf(hists, NULL, false, stdout);
|
hists__fprintf(hists, NULL, false, true, 0, 0, stdout);
|
||||||
fprintf(stdout, "\n\n");
|
fprintf(stdout, "\n\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,6 +274,9 @@ static int __cmd_report(void)
|
||||||
goto out_delete;
|
goto out_delete;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (use_browser <= 0)
|
||||||
|
perf_session__fprintf_info(session, stdout, show_full_info);
|
||||||
|
|
||||||
if (show_threads)
|
if (show_threads)
|
||||||
perf_read_values_init(&show_threads_values);
|
perf_read_values_init(&show_threads_values);
|
||||||
|
|
||||||
|
@ -330,9 +331,10 @@ static int __cmd_report(void)
|
||||||
goto out_delete;
|
goto out_delete;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_browser > 0)
|
if (use_browser > 0) {
|
||||||
perf_evlist__tui_browse_hists(session->evlist, help);
|
perf_evlist__tui_browse_hists(session->evlist, help,
|
||||||
else
|
NULL, NULL, 0);
|
||||||
|
} else
|
||||||
perf_evlist__tty_browse_hists(session->evlist, help);
|
perf_evlist__tty_browse_hists(session->evlist, help);
|
||||||
|
|
||||||
out_delete:
|
out_delete:
|
||||||
|
@ -487,6 +489,16 @@ static const struct option options[] = {
|
||||||
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
||||||
"Look for files with symbols relative to this directory"),
|
"Look for files with symbols relative to this directory"),
|
||||||
OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
|
OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
|
||||||
|
OPT_BOOLEAN('I', "show-info", &show_full_info,
|
||||||
|
"Display extended information about perf.data file"),
|
||||||
|
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
||||||
|
"Interleave source code with assembly code (default)"),
|
||||||
|
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
||||||
|
"Display raw encoding of assembly instructions (default)"),
|
||||||
|
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
|
||||||
|
"Specify disassembler style (e.g. -M intel for intel syntax)"),
|
||||||
|
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
|
||||||
|
"Show a column with the sum of periods"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ static u64 last_timestamp;
|
||||||
static u64 nr_unordered;
|
static u64 nr_unordered;
|
||||||
extern const struct option record_options[];
|
extern const struct option record_options[];
|
||||||
static bool no_callchain;
|
static bool no_callchain;
|
||||||
|
static bool show_full_info;
|
||||||
static const char *cpu_list;
|
static const char *cpu_list;
|
||||||
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
|
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
|
||||||
|
|
||||||
|
@ -1083,7 +1084,8 @@ static const struct option options[] = {
|
||||||
"comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr",
|
"comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr",
|
||||||
parse_output_fields),
|
parse_output_fields),
|
||||||
OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
|
OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
|
||||||
|
OPT_BOOLEAN('I', "show-info", &show_full_info,
|
||||||
|
"display extended information from perf.data file"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1268,6 +1270,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
perf_session__fprintf_info(session, stdout, show_full_info);
|
||||||
|
|
||||||
if (!no_callchain)
|
if (!no_callchain)
|
||||||
symbol_conf.use_callchain = true;
|
symbol_conf.use_callchain = true;
|
||||||
else
|
else
|
||||||
|
|
|
@ -194,6 +194,9 @@ static const char *cpu_list;
|
||||||
static const char *csv_sep = NULL;
|
static const char *csv_sep = NULL;
|
||||||
static bool csv_output = false;
|
static bool csv_output = false;
|
||||||
static bool group = false;
|
static bool group = false;
|
||||||
|
static const char *output_name = NULL;
|
||||||
|
static FILE *output = NULL;
|
||||||
|
static int output_fd;
|
||||||
|
|
||||||
static volatile int done = 0;
|
static volatile int done = 0;
|
||||||
|
|
||||||
|
@ -251,8 +254,13 @@ static double avg_stats(struct stats *stats)
|
||||||
*/
|
*/
|
||||||
static double stddev_stats(struct stats *stats)
|
static double stddev_stats(struct stats *stats)
|
||||||
{
|
{
|
||||||
double variance = stats->M2 / (stats->n - 1);
|
double variance, variance_mean;
|
||||||
double variance_mean = variance / stats->n;
|
|
||||||
|
if (!stats->n)
|
||||||
|
return 0.0;
|
||||||
|
|
||||||
|
variance = stats->M2 / (stats->n - 1);
|
||||||
|
variance_mean = variance / stats->n;
|
||||||
|
|
||||||
return sqrt(variance_mean);
|
return sqrt(variance_mean);
|
||||||
}
|
}
|
||||||
|
@ -352,7 +360,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
|
||||||
update_stats(&ps->res_stats[i], count[i]);
|
update_stats(&ps->res_stats[i], count[i]);
|
||||||
|
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
|
fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
|
||||||
event_name(counter), count[0], count[1], count[2]);
|
event_name(counter), count[0], count[1], count[2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -487,6 +495,8 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
if (forks) {
|
if (forks) {
|
||||||
close(go_pipe[1]);
|
close(go_pipe[1]);
|
||||||
wait(&status);
|
wait(&status);
|
||||||
|
if (WIFSIGNALED(status))
|
||||||
|
psignal(WTERMSIG(status), argv[0]);
|
||||||
} else {
|
} else {
|
||||||
while(!done) sleep(1);
|
while(!done) sleep(1);
|
||||||
}
|
}
|
||||||
|
@ -519,9 +529,9 @@ static void print_noise_pct(double total, double avg)
|
||||||
pct = 100.0*total/avg;
|
pct = 100.0*total/avg;
|
||||||
|
|
||||||
if (csv_output)
|
if (csv_output)
|
||||||
fprintf(stderr, "%s%.2f%%", csv_sep, pct);
|
fprintf(output, "%s%.2f%%", csv_sep, pct);
|
||||||
else
|
else if (pct)
|
||||||
fprintf(stderr, " ( +-%6.2f%% )", pct);
|
fprintf(output, " ( +-%6.2f%% )", pct);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_noise(struct perf_evsel *evsel, double avg)
|
static void print_noise(struct perf_evsel *evsel, double avg)
|
||||||
|
@ -546,16 +556,17 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
|
||||||
csv_output ? 0 : -4,
|
csv_output ? 0 : -4,
|
||||||
evsel_list->cpus->map[cpu], csv_sep);
|
evsel_list->cpus->map[cpu], csv_sep);
|
||||||
|
|
||||||
fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
|
fprintf(output, fmt, cpustr, msecs, csv_sep, event_name(evsel));
|
||||||
|
|
||||||
if (evsel->cgrp)
|
if (evsel->cgrp)
|
||||||
fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
|
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
|
||||||
|
|
||||||
if (csv_output)
|
if (csv_output)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
|
if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
|
||||||
fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats));
|
fprintf(output, " # %8.3f CPUs utilized ",
|
||||||
|
avg / avg_stats(&walltime_nsecs_stats));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -576,9 +587,9 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us
|
||||||
else if (ratio > 10.0)
|
else if (ratio > 10.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " frontend cycles idle ");
|
fprintf(output, " frontend cycles idle ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -599,9 +610,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use
|
||||||
else if (ratio > 20.0)
|
else if (ratio > 20.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " backend cycles idle ");
|
fprintf(output, " backend cycles idle ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -622,9 +633,9 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double
|
||||||
else if (ratio > 5.0)
|
else if (ratio > 5.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " of all branches ");
|
fprintf(output, " of all branches ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -645,9 +656,9 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou
|
||||||
else if (ratio > 5.0)
|
else if (ratio > 5.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " of all L1-dcache hits ");
|
fprintf(output, " of all L1-dcache hits ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -668,9 +679,9 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou
|
||||||
else if (ratio > 5.0)
|
else if (ratio > 5.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " of all L1-icache hits ");
|
fprintf(output, " of all L1-icache hits ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -691,9 +702,9 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
|
||||||
else if (ratio > 5.0)
|
else if (ratio > 5.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " of all dTLB cache hits ");
|
fprintf(output, " of all dTLB cache hits ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -714,9 +725,9 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
|
||||||
else if (ratio > 5.0)
|
else if (ratio > 5.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " of all iTLB cache hits ");
|
fprintf(output, " of all iTLB cache hits ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
|
||||||
|
@ -737,9 +748,9 @@ static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, doub
|
||||||
else if (ratio > 5.0)
|
else if (ratio > 5.0)
|
||||||
color = PERF_COLOR_YELLOW;
|
color = PERF_COLOR_YELLOW;
|
||||||
|
|
||||||
fprintf(stderr, " # ");
|
fprintf(output, " # ");
|
||||||
color_fprintf(stderr, color, "%6.2f%%", ratio);
|
color_fprintf(output, color, "%6.2f%%", ratio);
|
||||||
fprintf(stderr, " of all LL-cache hits ");
|
fprintf(output, " of all LL-cache hits ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
|
static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
|
||||||
|
@ -762,10 +773,10 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
|
||||||
else
|
else
|
||||||
cpu = 0;
|
cpu = 0;
|
||||||
|
|
||||||
fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel));
|
fprintf(output, fmt, cpustr, avg, csv_sep, event_name(evsel));
|
||||||
|
|
||||||
if (evsel->cgrp)
|
if (evsel->cgrp)
|
||||||
fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
|
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
|
||||||
|
|
||||||
if (csv_output)
|
if (csv_output)
|
||||||
return;
|
return;
|
||||||
|
@ -776,14 +787,14 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
|
||||||
if (total)
|
if (total)
|
||||||
ratio = avg / total;
|
ratio = avg / total;
|
||||||
|
|
||||||
fprintf(stderr, " # %5.2f insns per cycle ", ratio);
|
fprintf(output, " # %5.2f insns per cycle ", ratio);
|
||||||
|
|
||||||
total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
|
total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
|
||||||
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
|
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
|
||||||
|
|
||||||
if (total && avg) {
|
if (total && avg) {
|
||||||
ratio = total / avg;
|
ratio = total / avg;
|
||||||
fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio);
|
fprintf(output, "\n # %5.2f stalled cycles per insn", ratio);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
|
||||||
|
@ -831,7 +842,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
|
||||||
if (total)
|
if (total)
|
||||||
ratio = avg * 100 / total;
|
ratio = avg * 100 / total;
|
||||||
|
|
||||||
fprintf(stderr, " # %8.3f %% of all cache refs ", ratio);
|
fprintf(output, " # %8.3f %% of all cache refs ", ratio);
|
||||||
|
|
||||||
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
|
||||||
print_stalled_cycles_frontend(cpu, evsel, avg);
|
print_stalled_cycles_frontend(cpu, evsel, avg);
|
||||||
|
@ -843,16 +854,16 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
|
||||||
if (total)
|
if (total)
|
||||||
ratio = 1.0 * avg / total;
|
ratio = 1.0 * avg / total;
|
||||||
|
|
||||||
fprintf(stderr, " # %8.3f GHz ", ratio);
|
fprintf(output, " # %8.3f GHz ", ratio);
|
||||||
} else if (runtime_nsecs_stats[cpu].n != 0) {
|
} else if (runtime_nsecs_stats[cpu].n != 0) {
|
||||||
total = avg_stats(&runtime_nsecs_stats[cpu]);
|
total = avg_stats(&runtime_nsecs_stats[cpu]);
|
||||||
|
|
||||||
if (total)
|
if (total)
|
||||||
ratio = 1000.0 * avg / total;
|
ratio = 1000.0 * avg / total;
|
||||||
|
|
||||||
fprintf(stderr, " # %8.3f M/sec ", ratio);
|
fprintf(output, " # %8.3f M/sec ", ratio);
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, " ");
|
fprintf(output, " ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -867,7 +878,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
|
||||||
int scaled = counter->counts->scaled;
|
int scaled = counter->counts->scaled;
|
||||||
|
|
||||||
if (scaled == -1) {
|
if (scaled == -1) {
|
||||||
fprintf(stderr, "%*s%s%*s",
|
fprintf(output, "%*s%s%*s",
|
||||||
csv_output ? 0 : 18,
|
csv_output ? 0 : 18,
|
||||||
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
|
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
|
||||||
csv_sep,
|
csv_sep,
|
||||||
|
@ -875,9 +886,9 @@ static void print_counter_aggr(struct perf_evsel *counter)
|
||||||
event_name(counter));
|
event_name(counter));
|
||||||
|
|
||||||
if (counter->cgrp)
|
if (counter->cgrp)
|
||||||
fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
|
fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
|
||||||
|
|
||||||
fputc('\n', stderr);
|
fputc('\n', output);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -889,7 +900,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
|
||||||
print_noise(counter, avg);
|
print_noise(counter, avg);
|
||||||
|
|
||||||
if (csv_output) {
|
if (csv_output) {
|
||||||
fputc('\n', stderr);
|
fputc('\n', output);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -899,9 +910,9 @@ static void print_counter_aggr(struct perf_evsel *counter)
|
||||||
avg_enabled = avg_stats(&ps->res_stats[1]);
|
avg_enabled = avg_stats(&ps->res_stats[1]);
|
||||||
avg_running = avg_stats(&ps->res_stats[2]);
|
avg_running = avg_stats(&ps->res_stats[2]);
|
||||||
|
|
||||||
fprintf(stderr, " [%5.2f%%]", 100 * avg_running / avg_enabled);
|
fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled);
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
fprintf(output, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -918,7 +929,7 @@ static void print_counter(struct perf_evsel *counter)
|
||||||
ena = counter->counts->cpu[cpu].ena;
|
ena = counter->counts->cpu[cpu].ena;
|
||||||
run = counter->counts->cpu[cpu].run;
|
run = counter->counts->cpu[cpu].run;
|
||||||
if (run == 0 || ena == 0) {
|
if (run == 0 || ena == 0) {
|
||||||
fprintf(stderr, "CPU%*d%s%*s%s%*s",
|
fprintf(output, "CPU%*d%s%*s%s%*s",
|
||||||
csv_output ? 0 : -4,
|
csv_output ? 0 : -4,
|
||||||
evsel_list->cpus->map[cpu], csv_sep,
|
evsel_list->cpus->map[cpu], csv_sep,
|
||||||
csv_output ? 0 : 18,
|
csv_output ? 0 : 18,
|
||||||
|
@ -928,9 +939,10 @@ static void print_counter(struct perf_evsel *counter)
|
||||||
event_name(counter));
|
event_name(counter));
|
||||||
|
|
||||||
if (counter->cgrp)
|
if (counter->cgrp)
|
||||||
fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
|
fprintf(output, "%s%s",
|
||||||
|
csv_sep, counter->cgrp->name);
|
||||||
|
|
||||||
fputc('\n', stderr);
|
fputc('\n', output);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -943,9 +955,10 @@ static void print_counter(struct perf_evsel *counter)
|
||||||
print_noise(counter, 1.0);
|
print_noise(counter, 1.0);
|
||||||
|
|
||||||
if (run != ena)
|
if (run != ena)
|
||||||
fprintf(stderr, " (%.2f%%)", 100.0 * run / ena);
|
fprintf(output, " (%.2f%%)",
|
||||||
|
100.0 * run / ena);
|
||||||
}
|
}
|
||||||
fputc('\n', stderr);
|
fputc('\n', output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -957,21 +970,21 @@ static void print_stat(int argc, const char **argv)
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
if (!csv_output) {
|
if (!csv_output) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(output, "\n");
|
||||||
fprintf(stderr, " Performance counter stats for ");
|
fprintf(output, " Performance counter stats for ");
|
||||||
if(target_pid == -1 && target_tid == -1) {
|
if(target_pid == -1 && target_tid == -1) {
|
||||||
fprintf(stderr, "\'%s", argv[0]);
|
fprintf(output, "\'%s", argv[0]);
|
||||||
for (i = 1; i < argc; i++)
|
for (i = 1; i < argc; i++)
|
||||||
fprintf(stderr, " %s", argv[i]);
|
fprintf(output, " %s", argv[i]);
|
||||||
} else if (target_pid != -1)
|
} else if (target_pid != -1)
|
||||||
fprintf(stderr, "process id \'%d", target_pid);
|
fprintf(output, "process id \'%d", target_pid);
|
||||||
else
|
else
|
||||||
fprintf(stderr, "thread id \'%d", target_tid);
|
fprintf(output, "thread id \'%d", target_tid);
|
||||||
|
|
||||||
fprintf(stderr, "\'");
|
fprintf(output, "\'");
|
||||||
if (run_count > 1)
|
if (run_count > 1)
|
||||||
fprintf(stderr, " (%d runs)", run_count);
|
fprintf(output, " (%d runs)", run_count);
|
||||||
fprintf(stderr, ":\n\n");
|
fprintf(output, ":\n\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (no_aggr) {
|
if (no_aggr) {
|
||||||
|
@ -984,15 +997,15 @@ static void print_stat(int argc, const char **argv)
|
||||||
|
|
||||||
if (!csv_output) {
|
if (!csv_output) {
|
||||||
if (!null_run)
|
if (!null_run)
|
||||||
fprintf(stderr, "\n");
|
fprintf(output, "\n");
|
||||||
fprintf(stderr, " %17.9f seconds time elapsed",
|
fprintf(output, " %17.9f seconds time elapsed",
|
||||||
avg_stats(&walltime_nsecs_stats)/1e9);
|
avg_stats(&walltime_nsecs_stats)/1e9);
|
||||||
if (run_count > 1) {
|
if (run_count > 1) {
|
||||||
fprintf(stderr, " ");
|
fprintf(output, " ");
|
||||||
print_noise_pct(stddev_stats(&walltime_nsecs_stats),
|
print_noise_pct(stddev_stats(&walltime_nsecs_stats),
|
||||||
avg_stats(&walltime_nsecs_stats));
|
avg_stats(&walltime_nsecs_stats));
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n\n");
|
fprintf(output, "\n\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1030,6 +1043,8 @@ static int stat__set_big_num(const struct option *opt __used,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool append_file;
|
||||||
|
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
OPT_CALLBACK('e', "event", &evsel_list, "event",
|
OPT_CALLBACK('e', "event", &evsel_list, "event",
|
||||||
"event selector. use 'perf list' to list available events",
|
"event selector. use 'perf list' to list available events",
|
||||||
|
@ -1070,6 +1085,11 @@ static const struct option options[] = {
|
||||||
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
|
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
|
||||||
"monitor event in cgroup name only",
|
"monitor event in cgroup name only",
|
||||||
parse_cgroups),
|
parse_cgroups),
|
||||||
|
OPT_STRING('o', "output", &output_name, "file",
|
||||||
|
"output file name"),
|
||||||
|
OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
|
||||||
|
OPT_INTEGER(0, "log-fd", &output_fd,
|
||||||
|
"log output to fd, instead of stderr"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1141,6 +1161,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
struct perf_evsel *pos;
|
struct perf_evsel *pos;
|
||||||
int status = -ENOMEM;
|
int status = -ENOMEM;
|
||||||
|
const char *mode;
|
||||||
|
|
||||||
setlocale(LC_ALL, "");
|
setlocale(LC_ALL, "");
|
||||||
|
|
||||||
|
@ -1151,16 +1172,46 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
argc = parse_options(argc, argv, options, stat_usage,
|
argc = parse_options(argc, argv, options, stat_usage,
|
||||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
|
|
||||||
if (csv_sep)
|
output = stderr;
|
||||||
|
if (output_name && strcmp(output_name, "-"))
|
||||||
|
output = NULL;
|
||||||
|
|
||||||
|
if (output_name && output_fd) {
|
||||||
|
fprintf(stderr, "cannot use both --output and --log-fd\n");
|
||||||
|
usage_with_options(stat_usage, options);
|
||||||
|
}
|
||||||
|
if (!output) {
|
||||||
|
struct timespec tm;
|
||||||
|
mode = append_file ? "a" : "w";
|
||||||
|
|
||||||
|
output = fopen(output_name, mode);
|
||||||
|
if (!output) {
|
||||||
|
perror("failed to create output file");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
clock_gettime(CLOCK_REALTIME, &tm);
|
||||||
|
fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
|
||||||
|
} else if (output_fd != 2) {
|
||||||
|
mode = append_file ? "a" : "w";
|
||||||
|
output = fdopen(output_fd, mode);
|
||||||
|
if (!output) {
|
||||||
|
perror("Failed opening logfd");
|
||||||
|
return -errno;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (csv_sep) {
|
||||||
csv_output = true;
|
csv_output = true;
|
||||||
else
|
if (!strcmp(csv_sep, "\\t"))
|
||||||
|
csv_sep = "\t";
|
||||||
|
} else
|
||||||
csv_sep = DEFAULT_SEPARATOR;
|
csv_sep = DEFAULT_SEPARATOR;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* let the spreadsheet do the pretty-printing
|
* let the spreadsheet do the pretty-printing
|
||||||
*/
|
*/
|
||||||
if (csv_output) {
|
if (csv_output) {
|
||||||
/* User explicitely passed -B? */
|
/* User explicitly passed -B? */
|
||||||
if (big_num_opt == 1) {
|
if (big_num_opt == 1) {
|
||||||
fprintf(stderr, "-B option not supported with -x\n");
|
fprintf(stderr, "-B option not supported with -x\n");
|
||||||
usage_with_options(stat_usage, options);
|
usage_with_options(stat_usage, options);
|
||||||
|
@ -1226,7 +1277,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
status = 0;
|
status = 0;
|
||||||
for (run_idx = 0; run_idx < run_count; run_idx++) {
|
for (run_idx = 0; run_idx < run_count; run_idx++) {
|
||||||
if (run_count != 1 && verbose)
|
if (run_count != 1 && verbose)
|
||||||
fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
|
fprintf(output, "[ perf stat: executing run #%d ... ]\n",
|
||||||
|
run_idx + 1);
|
||||||
|
|
||||||
if (sync_run)
|
if (sync_run)
|
||||||
sync();
|
sync();
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
* any workload, CPU or specific PID.
|
* any workload, CPU or specific PID.
|
||||||
*
|
*
|
||||||
* Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
|
* Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
|
||||||
|
* 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||||
*
|
*
|
||||||
* Improvements and fixes by:
|
* Improvements and fixes by:
|
||||||
*
|
*
|
||||||
|
@ -36,6 +37,7 @@
|
||||||
#include "util/parse-events.h"
|
#include "util/parse-events.h"
|
||||||
#include "util/cpumap.h"
|
#include "util/cpumap.h"
|
||||||
#include "util/xyarray.h"
|
#include "util/xyarray.h"
|
||||||
|
#include "util/sort.h"
|
||||||
|
|
||||||
#include "util/debug.h"
|
#include "util/debug.h"
|
||||||
|
|
||||||
|
@ -65,12 +67,8 @@
|
||||||
static struct perf_top top = {
|
static struct perf_top top = {
|
||||||
.count_filter = 5,
|
.count_filter = 5,
|
||||||
.delay_secs = 2,
|
.delay_secs = 2,
|
||||||
.display_weighted = -1,
|
|
||||||
.target_pid = -1,
|
.target_pid = -1,
|
||||||
.target_tid = -1,
|
.target_tid = -1,
|
||||||
.active_symbols = LIST_HEAD_INIT(top.active_symbols),
|
|
||||||
.active_symbols_lock = PTHREAD_MUTEX_INITIALIZER,
|
|
||||||
.active_symbols_cond = PTHREAD_COND_INITIALIZER,
|
|
||||||
.freq = 1000, /* 1 KHz */
|
.freq = 1000, /* 1 KHz */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -78,6 +76,12 @@ static bool system_wide = false;
|
||||||
|
|
||||||
static bool use_tui, use_stdio;
|
static bool use_tui, use_stdio;
|
||||||
|
|
||||||
|
static bool sort_has_symbols;
|
||||||
|
|
||||||
|
static bool dont_use_callchains;
|
||||||
|
static char callchain_default_opt[] = "fractal,0.5,callee";
|
||||||
|
|
||||||
|
|
||||||
static int default_interval = 0;
|
static int default_interval = 0;
|
||||||
|
|
||||||
static bool kptr_restrict_warned;
|
static bool kptr_restrict_warned;
|
||||||
|
@ -85,7 +89,6 @@ static bool vmlinux_warned;
|
||||||
static bool inherit = false;
|
static bool inherit = false;
|
||||||
static int realtime_prio = 0;
|
static int realtime_prio = 0;
|
||||||
static bool group = false;
|
static bool group = false;
|
||||||
static unsigned int page_size;
|
|
||||||
static unsigned int mmap_pages = 128;
|
static unsigned int mmap_pages = 128;
|
||||||
|
|
||||||
static bool dump_symtab = false;
|
static bool dump_symtab = false;
|
||||||
|
@ -93,7 +96,6 @@ static bool dump_symtab = false;
|
||||||
static struct winsize winsize;
|
static struct winsize winsize;
|
||||||
|
|
||||||
static const char *sym_filter = NULL;
|
static const char *sym_filter = NULL;
|
||||||
struct sym_entry *sym_filter_entry_sched = NULL;
|
|
||||||
static int sym_pcnt_filter = 5;
|
static int sym_pcnt_filter = 5;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -136,18 +138,18 @@ static void sig_winch_handler(int sig __used)
|
||||||
update_print_entries(&winsize);
|
update_print_entries(&winsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int parse_source(struct sym_entry *syme)
|
static int parse_source(struct hist_entry *he)
|
||||||
{
|
{
|
||||||
struct symbol *sym;
|
struct symbol *sym;
|
||||||
struct annotation *notes;
|
struct annotation *notes;
|
||||||
struct map *map;
|
struct map *map;
|
||||||
int err = -1;
|
int err = -1;
|
||||||
|
|
||||||
if (!syme)
|
if (!he || !he->ms.sym)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
sym = sym_entry__symbol(syme);
|
sym = he->ms.sym;
|
||||||
map = syme->map;
|
map = he->ms.map;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can't annotate with just /proc/kallsyms
|
* We can't annotate with just /proc/kallsyms
|
||||||
|
@ -175,53 +177,62 @@ static int parse_source(struct sym_entry *syme)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = symbol__annotate(sym, syme->map, 0);
|
err = symbol__annotate(sym, map, 0);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
out_assign:
|
out_assign:
|
||||||
top.sym_filter_entry = syme;
|
top.sym_filter_entry = he;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(¬es->lock);
|
pthread_mutex_unlock(¬es->lock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __zero_source_counters(struct sym_entry *syme)
|
static void __zero_source_counters(struct hist_entry *he)
|
||||||
{
|
{
|
||||||
struct symbol *sym = sym_entry__symbol(syme);
|
struct symbol *sym = he->ms.sym;
|
||||||
symbol__annotate_zero_histograms(sym);
|
symbol__annotate_zero_histograms(sym);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void record_precise_ip(struct sym_entry *syme, struct map *map,
|
static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
|
||||||
int counter, u64 ip)
|
|
||||||
{
|
{
|
||||||
struct annotation *notes;
|
struct annotation *notes;
|
||||||
struct symbol *sym;
|
struct symbol *sym;
|
||||||
|
|
||||||
if (syme != top.sym_filter_entry)
|
if (he == NULL || he->ms.sym == NULL ||
|
||||||
|
(he != top.sym_filter_entry && use_browser != 1))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
sym = sym_entry__symbol(syme);
|
sym = he->ms.sym;
|
||||||
notes = symbol__annotation(sym);
|
notes = symbol__annotation(sym);
|
||||||
|
|
||||||
if (pthread_mutex_trylock(¬es->lock))
|
if (pthread_mutex_trylock(¬es->lock))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ip = map->map_ip(map, ip);
|
if (notes->src == NULL &&
|
||||||
symbol__inc_addr_samples(sym, map, counter, ip);
|
symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
|
||||||
|
pthread_mutex_unlock(¬es->lock);
|
||||||
|
pr_err("Not enough memory for annotating '%s' symbol!\n",
|
||||||
|
sym->name);
|
||||||
|
sleep(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ip = he->ms.map->map_ip(he->ms.map, ip);
|
||||||
|
symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
|
||||||
|
|
||||||
pthread_mutex_unlock(¬es->lock);
|
pthread_mutex_unlock(¬es->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void show_details(struct sym_entry *syme)
|
static void show_details(struct hist_entry *he)
|
||||||
{
|
{
|
||||||
struct annotation *notes;
|
struct annotation *notes;
|
||||||
struct symbol *symbol;
|
struct symbol *symbol;
|
||||||
int more;
|
int more;
|
||||||
|
|
||||||
if (!syme)
|
if (!he)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
symbol = sym_entry__symbol(syme);
|
symbol = he->ms.sym;
|
||||||
notes = symbol__annotation(symbol);
|
notes = symbol__annotation(symbol);
|
||||||
|
|
||||||
pthread_mutex_lock(¬es->lock);
|
pthread_mutex_lock(¬es->lock);
|
||||||
|
@ -232,7 +243,7 @@ static void show_details(struct sym_entry *syme)
|
||||||
printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
|
printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
|
||||||
printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
|
printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
|
||||||
|
|
||||||
more = symbol__annotate_printf(symbol, syme->map, top.sym_evsel->idx,
|
more = symbol__annotate_printf(symbol, he->ms.map, top.sym_evsel->idx,
|
||||||
0, sym_pcnt_filter, top.print_entries, 4);
|
0, sym_pcnt_filter, top.print_entries, 4);
|
||||||
if (top.zero)
|
if (top.zero)
|
||||||
symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
|
symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
|
||||||
|
@ -246,21 +257,28 @@ out_unlock:
|
||||||
|
|
||||||
static const char CONSOLE_CLEAR[] = "[H[2J";
|
static const char CONSOLE_CLEAR[] = "[H[2J";
|
||||||
|
|
||||||
static void __list_insert_active_sym(struct sym_entry *syme)
|
static struct hist_entry *
|
||||||
|
perf_session__add_hist_entry(struct perf_session *session,
|
||||||
|
struct addr_location *al,
|
||||||
|
struct perf_sample *sample,
|
||||||
|
struct perf_evsel *evsel)
|
||||||
{
|
{
|
||||||
list_add(&syme->node, &top.active_symbols);
|
struct hist_entry *he;
|
||||||
|
|
||||||
|
he = __hists__add_entry(&evsel->hists, al, NULL, sample->period);
|
||||||
|
if (he == NULL)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
session->hists.stats.total_period += sample->period;
|
||||||
|
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
|
||||||
|
return he;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_sym_table(struct perf_session *session)
|
static void print_sym_table(void)
|
||||||
{
|
{
|
||||||
char bf[160];
|
char bf[160];
|
||||||
int printed = 0;
|
int printed = 0;
|
||||||
struct rb_node *nd;
|
|
||||||
struct sym_entry *syme;
|
|
||||||
struct rb_root tmp = RB_ROOT;
|
|
||||||
const int win_width = winsize.ws_col - 1;
|
const int win_width = winsize.ws_col - 1;
|
||||||
int sym_width, dso_width, dso_short_width;
|
|
||||||
float sum_ksamples = perf_top__decay_samples(&top, &tmp);
|
|
||||||
|
|
||||||
puts(CONSOLE_CLEAR);
|
puts(CONSOLE_CLEAR);
|
||||||
|
|
||||||
|
@ -271,10 +289,12 @@ static void print_sym_table(struct perf_session *session)
|
||||||
|
|
||||||
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
|
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
|
||||||
|
|
||||||
if (session->hists.stats.total_lost != 0) {
|
if (top.total_lost_warned != top.session->hists.stats.total_lost) {
|
||||||
|
top.total_lost_warned = top.session->hists.stats.total_lost;
|
||||||
color_fprintf(stdout, PERF_COLOR_RED, "WARNING:");
|
color_fprintf(stdout, PERF_COLOR_RED, "WARNING:");
|
||||||
printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n",
|
printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n",
|
||||||
session->hists.stats.total_lost);
|
top.total_lost_warned);
|
||||||
|
++printed;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (top.sym_filter_entry) {
|
if (top.sym_filter_entry) {
|
||||||
|
@ -282,58 +302,15 @@ static void print_sym_table(struct perf_session *session)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_top__find_widths(&top, &tmp, &dso_width, &dso_short_width,
|
hists__collapse_resort_threaded(&top.sym_evsel->hists);
|
||||||
&sym_width);
|
hists__output_resort_threaded(&top.sym_evsel->hists);
|
||||||
|
hists__decay_entries_threaded(&top.sym_evsel->hists,
|
||||||
if (sym_width + dso_width > winsize.ws_col - 29) {
|
top.hide_user_symbols,
|
||||||
dso_width = dso_short_width;
|
top.hide_kernel_symbols);
|
||||||
if (sym_width + dso_width > winsize.ws_col - 29)
|
hists__output_recalc_col_len(&top.sym_evsel->hists, winsize.ws_row - 3);
|
||||||
sym_width = winsize.ws_col - dso_width - 29;
|
|
||||||
}
|
|
||||||
putchar('\n');
|
putchar('\n');
|
||||||
if (top.evlist->nr_entries == 1)
|
hists__fprintf(&top.sym_evsel->hists, NULL, false, false,
|
||||||
printf(" samples pcnt");
|
winsize.ws_row - 4 - printed, win_width, stdout);
|
||||||
else
|
|
||||||
printf(" weight samples pcnt");
|
|
||||||
|
|
||||||
if (verbose)
|
|
||||||
printf(" RIP ");
|
|
||||||
printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
|
|
||||||
printf(" %s _______ _____",
|
|
||||||
top.evlist->nr_entries == 1 ? " " : "______");
|
|
||||||
if (verbose)
|
|
||||||
printf(" ________________");
|
|
||||||
printf(" %-*.*s", sym_width, sym_width, graph_line);
|
|
||||||
printf(" %-*.*s", dso_width, dso_width, graph_line);
|
|
||||||
puts("\n");
|
|
||||||
|
|
||||||
for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
|
|
||||||
struct symbol *sym;
|
|
||||||
double pcnt;
|
|
||||||
|
|
||||||
syme = rb_entry(nd, struct sym_entry, rb_node);
|
|
||||||
sym = sym_entry__symbol(syme);
|
|
||||||
if (++printed > top.print_entries ||
|
|
||||||
(int)syme->snap_count < top.count_filter)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
|
|
||||||
sum_ksamples));
|
|
||||||
|
|
||||||
if (top.evlist->nr_entries == 1 || !top.display_weighted)
|
|
||||||
printf("%20.2f ", syme->weight);
|
|
||||||
else
|
|
||||||
printf("%9.1f %10ld ", syme->weight, syme->snap_count);
|
|
||||||
|
|
||||||
percent_color_fprintf(stdout, "%4.1f%%", pcnt);
|
|
||||||
if (verbose)
|
|
||||||
printf(" %016" PRIx64, sym->start);
|
|
||||||
printf(" %-*.*s", sym_width, sym_width, sym->name);
|
|
||||||
printf(" %-*.*s\n", dso_width, dso_width,
|
|
||||||
dso_width >= syme->map->dso->long_name_len ?
|
|
||||||
syme->map->dso->long_name :
|
|
||||||
syme->map->dso->short_name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prompt_integer(int *target, const char *msg)
|
static void prompt_integer(int *target, const char *msg)
|
||||||
|
@ -371,10 +348,11 @@ static void prompt_percent(int *target, const char *msg)
|
||||||
*target = tmp;
|
*target = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prompt_symbol(struct sym_entry **target, const char *msg)
|
static void prompt_symbol(struct hist_entry **target, const char *msg)
|
||||||
{
|
{
|
||||||
char *buf = malloc(0), *p;
|
char *buf = malloc(0), *p;
|
||||||
struct sym_entry *syme = *target, *n, *found = NULL;
|
struct hist_entry *syme = *target, *n, *found = NULL;
|
||||||
|
struct rb_node *next;
|
||||||
size_t dummy = 0;
|
size_t dummy = 0;
|
||||||
|
|
||||||
/* zero counters of active symbol */
|
/* zero counters of active symbol */
|
||||||
|
@ -391,17 +369,14 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
|
||||||
if (p)
|
if (p)
|
||||||
*p = 0;
|
*p = 0;
|
||||||
|
|
||||||
pthread_mutex_lock(&top.active_symbols_lock);
|
next = rb_first(&top.sym_evsel->hists.entries);
|
||||||
syme = list_entry(top.active_symbols.next, struct sym_entry, node);
|
while (next) {
|
||||||
pthread_mutex_unlock(&top.active_symbols_lock);
|
n = rb_entry(next, struct hist_entry, rb_node);
|
||||||
|
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
|
||||||
list_for_each_entry_safe_from(syme, n, &top.active_symbols, node) {
|
found = n;
|
||||||
struct symbol *sym = sym_entry__symbol(syme);
|
|
||||||
|
|
||||||
if (!strcmp(buf, sym->name)) {
|
|
||||||
found = syme;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
next = rb_next(&n->rb_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!found) {
|
if (!found) {
|
||||||
|
@ -420,7 +395,7 @@ static void print_mapped_keys(void)
|
||||||
char *name = NULL;
|
char *name = NULL;
|
||||||
|
|
||||||
if (top.sym_filter_entry) {
|
if (top.sym_filter_entry) {
|
||||||
struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
|
struct symbol *sym = top.sym_filter_entry->ms.sym;
|
||||||
name = sym->name;
|
name = sym->name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -437,9 +412,6 @@ static void print_mapped_keys(void)
|
||||||
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
|
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
|
||||||
fprintf(stdout, "\t[S] stop annotation.\n");
|
fprintf(stdout, "\t[S] stop annotation.\n");
|
||||||
|
|
||||||
if (top.evlist->nr_entries > 1)
|
|
||||||
fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", top.display_weighted ? 1 : 0);
|
|
||||||
|
|
||||||
fprintf(stdout,
|
fprintf(stdout,
|
||||||
"\t[K] hide kernel_symbols symbols. \t(%s)\n",
|
"\t[K] hide kernel_symbols symbols. \t(%s)\n",
|
||||||
top.hide_kernel_symbols ? "yes" : "no");
|
top.hide_kernel_symbols ? "yes" : "no");
|
||||||
|
@ -466,7 +438,6 @@ static int key_mapped(int c)
|
||||||
case 'S':
|
case 'S':
|
||||||
return 1;
|
return 1;
|
||||||
case 'E':
|
case 'E':
|
||||||
case 'w':
|
|
||||||
return top.evlist->nr_entries > 1 ? 1 : 0;
|
return top.evlist->nr_entries > 1 ? 1 : 0;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -475,7 +446,7 @@ static int key_mapped(int c)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void handle_keypress(struct perf_session *session, int c)
|
static void handle_keypress(int c)
|
||||||
{
|
{
|
||||||
if (!key_mapped(c)) {
|
if (!key_mapped(c)) {
|
||||||
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
||||||
|
@ -551,7 +522,7 @@ static void handle_keypress(struct perf_session *session, int c)
|
||||||
case 'Q':
|
case 'Q':
|
||||||
printf("exiting.\n");
|
printf("exiting.\n");
|
||||||
if (dump_symtab)
|
if (dump_symtab)
|
||||||
perf_session__fprintf_dsos(session, stderr);
|
perf_session__fprintf_dsos(top.session, stderr);
|
||||||
exit(0);
|
exit(0);
|
||||||
case 's':
|
case 's':
|
||||||
prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
|
prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
|
||||||
|
@ -560,7 +531,7 @@ static void handle_keypress(struct perf_session *session, int c)
|
||||||
if (!top.sym_filter_entry)
|
if (!top.sym_filter_entry)
|
||||||
break;
|
break;
|
||||||
else {
|
else {
|
||||||
struct sym_entry *syme = top.sym_filter_entry;
|
struct hist_entry *syme = top.sym_filter_entry;
|
||||||
|
|
||||||
top.sym_filter_entry = NULL;
|
top.sym_filter_entry = NULL;
|
||||||
__zero_source_counters(syme);
|
__zero_source_counters(syme);
|
||||||
|
@ -569,9 +540,6 @@ static void handle_keypress(struct perf_session *session, int c)
|
||||||
case 'U':
|
case 'U':
|
||||||
top.hide_user_symbols = !top.hide_user_symbols;
|
top.hide_user_symbols = !top.hide_user_symbols;
|
||||||
break;
|
break;
|
||||||
case 'w':
|
|
||||||
top.display_weighted = ~top.display_weighted;
|
|
||||||
break;
|
|
||||||
case 'z':
|
case 'z':
|
||||||
top.zero = !top.zero;
|
top.zero = !top.zero;
|
||||||
break;
|
break;
|
||||||
|
@ -580,19 +548,31 @@ static void handle_keypress(struct perf_session *session, int c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void perf_top__sort_new_samples(void *arg)
|
||||||
|
{
|
||||||
|
struct perf_top *t = arg;
|
||||||
|
perf_top__reset_sample_counters(t);
|
||||||
|
|
||||||
|
if (t->evlist->selected != NULL)
|
||||||
|
t->sym_evsel = t->evlist->selected;
|
||||||
|
|
||||||
|
hists__collapse_resort_threaded(&t->sym_evsel->hists);
|
||||||
|
hists__output_resort_threaded(&t->sym_evsel->hists);
|
||||||
|
hists__decay_entries_threaded(&t->sym_evsel->hists,
|
||||||
|
top.hide_user_symbols,
|
||||||
|
top.hide_kernel_symbols);
|
||||||
|
hists__output_recalc_col_len(&t->sym_evsel->hists, winsize.ws_row - 3);
|
||||||
|
}
|
||||||
|
|
||||||
static void *display_thread_tui(void *arg __used)
|
static void *display_thread_tui(void *arg __used)
|
||||||
{
|
{
|
||||||
int err = 0;
|
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
|
||||||
pthread_mutex_lock(&top.active_symbols_lock);
|
|
||||||
while (list_empty(&top.active_symbols)) {
|
perf_top__sort_new_samples(&top);
|
||||||
err = pthread_cond_wait(&top.active_symbols_cond,
|
perf_evlist__tui_browse_hists(top.evlist, help,
|
||||||
&top.active_symbols_lock);
|
perf_top__sort_new_samples,
|
||||||
if (err)
|
&top, top.delay_secs);
|
||||||
break;
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&top.active_symbols_lock);
|
|
||||||
if (!err)
|
|
||||||
perf_top__tui_browser(&top);
|
|
||||||
exit_browser(0);
|
exit_browser(0);
|
||||||
exit(0);
|
exit(0);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -603,7 +583,6 @@ static void *display_thread(void *arg __used)
|
||||||
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
||||||
struct termios tc, save;
|
struct termios tc, save;
|
||||||
int delay_msecs, c;
|
int delay_msecs, c;
|
||||||
struct perf_session *session = (struct perf_session *) arg;
|
|
||||||
|
|
||||||
tcgetattr(0, &save);
|
tcgetattr(0, &save);
|
||||||
tc = save;
|
tc = save;
|
||||||
|
@ -611,20 +590,35 @@ static void *display_thread(void *arg __used)
|
||||||
tc.c_cc[VMIN] = 0;
|
tc.c_cc[VMIN] = 0;
|
||||||
tc.c_cc[VTIME] = 0;
|
tc.c_cc[VTIME] = 0;
|
||||||
|
|
||||||
|
pthread__unblock_sigwinch();
|
||||||
repeat:
|
repeat:
|
||||||
delay_msecs = top.delay_secs * 1000;
|
delay_msecs = top.delay_secs * 1000;
|
||||||
tcsetattr(0, TCSANOW, &tc);
|
tcsetattr(0, TCSANOW, &tc);
|
||||||
/* trash return*/
|
/* trash return*/
|
||||||
getc(stdin);
|
getc(stdin);
|
||||||
|
|
||||||
do {
|
while (1) {
|
||||||
print_sym_table(session);
|
print_sym_table();
|
||||||
} while (!poll(&stdin_poll, 1, delay_msecs) == 1);
|
/*
|
||||||
|
* Either timeout expired or we got an EINTR due to SIGWINCH,
|
||||||
|
* refresh screen in both cases.
|
||||||
|
*/
|
||||||
|
switch (poll(&stdin_poll, 1, delay_msecs)) {
|
||||||
|
case 0:
|
||||||
|
continue;
|
||||||
|
case -1:
|
||||||
|
if (errno == EINTR)
|
||||||
|
continue;
|
||||||
|
/* Fall trhu */
|
||||||
|
default:
|
||||||
|
goto process_hotkey;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
process_hotkey:
|
||||||
c = getc(stdin);
|
c = getc(stdin);
|
||||||
tcsetattr(0, TCSAFLUSH, &save);
|
tcsetattr(0, TCSAFLUSH, &save);
|
||||||
|
|
||||||
handle_keypress(session, c);
|
handle_keypress(c);
|
||||||
goto repeat;
|
goto repeat;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -645,9 +639,8 @@ static const char *skip_symbols[] = {
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
static int symbol_filter(struct map *map, struct symbol *sym)
|
static int symbol_filter(struct map *map __used, struct symbol *sym)
|
||||||
{
|
{
|
||||||
struct sym_entry *syme;
|
|
||||||
const char *name = sym->name;
|
const char *name = sym->name;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -667,16 +660,6 @@ static int symbol_filter(struct map *map, struct symbol *sym)
|
||||||
strstr(name, "_text_end"))
|
strstr(name, "_text_end"))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
syme = symbol__priv(sym);
|
|
||||||
syme->map = map;
|
|
||||||
symbol__annotate_init(map, sym);
|
|
||||||
|
|
||||||
if (!top.sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
|
|
||||||
/* schedule initial sym_filter_entry setup */
|
|
||||||
sym_filter_entry_sched = syme;
|
|
||||||
sym_filter = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; skip_symbols[i]; i++) {
|
for (i = 0; skip_symbols[i]; i++) {
|
||||||
if (!strcmp(skip_symbols[i], name)) {
|
if (!strcmp(skip_symbols[i], name)) {
|
||||||
sym->ignore = true;
|
sym->ignore = true;
|
||||||
|
@ -691,10 +674,11 @@ static void perf_event__process_sample(const union perf_event *event,
|
||||||
struct perf_sample *sample,
|
struct perf_sample *sample,
|
||||||
struct perf_session *session)
|
struct perf_session *session)
|
||||||
{
|
{
|
||||||
|
struct symbol *parent = NULL;
|
||||||
u64 ip = event->ip.ip;
|
u64 ip = event->ip.ip;
|
||||||
struct sym_entry *syme;
|
|
||||||
struct addr_location al;
|
struct addr_location al;
|
||||||
struct machine *machine;
|
struct machine *machine;
|
||||||
|
int err;
|
||||||
u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||||
|
|
||||||
++top.samples;
|
++top.samples;
|
||||||
|
@ -783,46 +767,41 @@ static void perf_event__process_sample(const union perf_event *event,
|
||||||
sleep(5);
|
sleep(5);
|
||||||
vmlinux_warned = true;
|
vmlinux_warned = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* let's see, whether we need to install initial sym_filter_entry */
|
if (al.sym == NULL || !al.sym->ignore) {
|
||||||
if (sym_filter_entry_sched) {
|
|
||||||
top.sym_filter_entry = sym_filter_entry_sched;
|
|
||||||
sym_filter_entry_sched = NULL;
|
|
||||||
if (parse_source(top.sym_filter_entry) < 0) {
|
|
||||||
struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
|
|
||||||
|
|
||||||
pr_err("Can't annotate %s", sym->name);
|
|
||||||
if (top.sym_filter_entry->map->dso->symtab_type == SYMTAB__KALLSYMS) {
|
|
||||||
pr_err(": No vmlinux file was found in the path:\n");
|
|
||||||
machine__fprintf_vmlinux_path(machine, stderr);
|
|
||||||
} else
|
|
||||||
pr_err(".\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
syme = symbol__priv(al.sym);
|
|
||||||
if (!al.sym->ignore) {
|
|
||||||
struct perf_evsel *evsel;
|
struct perf_evsel *evsel;
|
||||||
|
struct hist_entry *he;
|
||||||
|
|
||||||
evsel = perf_evlist__id2evsel(top.evlist, sample->id);
|
evsel = perf_evlist__id2evsel(top.evlist, sample->id);
|
||||||
assert(evsel != NULL);
|
assert(evsel != NULL);
|
||||||
syme->count[evsel->idx]++;
|
|
||||||
record_precise_ip(syme, al.map, evsel->idx, ip);
|
if ((sort__has_parent || symbol_conf.use_callchain) &&
|
||||||
pthread_mutex_lock(&top.active_symbols_lock);
|
sample->callchain) {
|
||||||
if (list_empty(&syme->node) || !syme->node.next) {
|
err = perf_session__resolve_callchain(session, al.thread,
|
||||||
static bool first = true;
|
sample->callchain, &parent);
|
||||||
__list_insert_active_sym(syme);
|
if (err)
|
||||||
if (first) {
|
return;
|
||||||
pthread_cond_broadcast(&top.active_symbols_cond);
|
|
||||||
first = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&top.active_symbols_lock);
|
|
||||||
|
he = perf_session__add_hist_entry(session, &al, sample, evsel);
|
||||||
|
if (he == NULL) {
|
||||||
|
pr_err("Problem incrementing symbol period, skipping event\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (symbol_conf.use_callchain) {
|
||||||
|
err = callchain_append(he->callchain, &session->callchain_cursor,
|
||||||
|
sample->period);
|
||||||
|
if (err)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sort_has_symbols)
|
||||||
|
record_precise_ip(he, evsel->idx, ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
|
static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
|
||||||
|
@ -873,7 +852,11 @@ static void start_counters(struct perf_evlist *evlist)
|
||||||
attr->read_format |= PERF_FORMAT_ID;
|
attr->read_format |= PERF_FORMAT_ID;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (symbol_conf.use_callchain)
|
||||||
|
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
|
||||||
|
|
||||||
attr->mmap = 1;
|
attr->mmap = 1;
|
||||||
|
attr->comm = 1;
|
||||||
attr->inherit = inherit;
|
attr->inherit = inherit;
|
||||||
try_again:
|
try_again:
|
||||||
if (perf_evsel__open(counter, top.evlist->cpus,
|
if (perf_evsel__open(counter, top.evlist->cpus,
|
||||||
|
@ -928,35 +911,56 @@ out_err:
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int setup_sample_type(void)
|
||||||
|
{
|
||||||
|
if (!sort_has_symbols) {
|
||||||
|
if (symbol_conf.use_callchain) {
|
||||||
|
ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
} else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
|
||||||
|
if (callchain_register_param(&callchain_param) < 0) {
|
||||||
|
ui__warning("Can't register callchain params.\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __cmd_top(void)
|
static int __cmd_top(void)
|
||||||
{
|
{
|
||||||
pthread_t thread;
|
pthread_t thread;
|
||||||
int ret __used;
|
int ret;
|
||||||
/*
|
/*
|
||||||
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
|
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
|
||||||
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
|
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
|
||||||
*/
|
*/
|
||||||
struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
|
top.session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
|
||||||
if (session == NULL)
|
if (top.session == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = setup_sample_type();
|
||||||
|
if (ret)
|
||||||
|
goto out_delete;
|
||||||
|
|
||||||
if (top.target_tid != -1)
|
if (top.target_tid != -1)
|
||||||
perf_event__synthesize_thread_map(top.evlist->threads,
|
perf_event__synthesize_thread_map(top.evlist->threads,
|
||||||
perf_event__process, session);
|
perf_event__process, top.session);
|
||||||
else
|
else
|
||||||
perf_event__synthesize_threads(perf_event__process, session);
|
perf_event__synthesize_threads(perf_event__process, top.session);
|
||||||
|
|
||||||
start_counters(top.evlist);
|
start_counters(top.evlist);
|
||||||
session->evlist = top.evlist;
|
top.session->evlist = top.evlist;
|
||||||
perf_session__update_sample_type(session);
|
perf_session__update_sample_type(top.session);
|
||||||
|
|
||||||
/* Wait for a minimal set of events before starting the snapshot */
|
/* Wait for a minimal set of events before starting the snapshot */
|
||||||
poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
|
poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
|
||||||
|
|
||||||
perf_session__mmap_read(session);
|
perf_session__mmap_read(top.session);
|
||||||
|
|
||||||
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
|
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
|
||||||
display_thread), session)) {
|
display_thread), NULL)) {
|
||||||
printf("Could not create display thread.\n");
|
printf("Could not create display thread.\n");
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
@ -974,12 +978,96 @@ static int __cmd_top(void)
|
||||||
while (1) {
|
while (1) {
|
||||||
u64 hits = top.samples;
|
u64 hits = top.samples;
|
||||||
|
|
||||||
perf_session__mmap_read(session);
|
perf_session__mmap_read(top.session);
|
||||||
|
|
||||||
if (hits == top.samples)
|
if (hits == top.samples)
|
||||||
ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
|
ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_delete:
|
||||||
|
perf_session__delete(top.session);
|
||||||
|
top.session = NULL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
parse_callchain_opt(const struct option *opt __used, const char *arg,
|
||||||
|
int unset)
|
||||||
|
{
|
||||||
|
char *tok, *tok2;
|
||||||
|
char *endptr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* --no-call-graph
|
||||||
|
*/
|
||||||
|
if (unset) {
|
||||||
|
dont_use_callchains = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
symbol_conf.use_callchain = true;
|
||||||
|
|
||||||
|
if (!arg)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
tok = strtok((char *)arg, ",");
|
||||||
|
if (!tok)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
/* get the output mode */
|
||||||
|
if (!strncmp(tok, "graph", strlen(arg)))
|
||||||
|
callchain_param.mode = CHAIN_GRAPH_ABS;
|
||||||
|
|
||||||
|
else if (!strncmp(tok, "flat", strlen(arg)))
|
||||||
|
callchain_param.mode = CHAIN_FLAT;
|
||||||
|
|
||||||
|
else if (!strncmp(tok, "fractal", strlen(arg)))
|
||||||
|
callchain_param.mode = CHAIN_GRAPH_REL;
|
||||||
|
|
||||||
|
else if (!strncmp(tok, "none", strlen(arg))) {
|
||||||
|
callchain_param.mode = CHAIN_NONE;
|
||||||
|
symbol_conf.use_callchain = false;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
/* get the min percentage */
|
||||||
|
tok = strtok(NULL, ",");
|
||||||
|
if (!tok)
|
||||||
|
goto setup;
|
||||||
|
|
||||||
|
callchain_param.min_percent = strtod(tok, &endptr);
|
||||||
|
if (tok == endptr)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
/* get the print limit */
|
||||||
|
tok2 = strtok(NULL, ",");
|
||||||
|
if (!tok2)
|
||||||
|
goto setup;
|
||||||
|
|
||||||
|
if (tok2[0] != 'c') {
|
||||||
|
callchain_param.print_limit = strtod(tok2, &endptr);
|
||||||
|
tok2 = strtok(NULL, ",");
|
||||||
|
if (!tok2)
|
||||||
|
goto setup;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* get the call chain order */
|
||||||
|
if (!strcmp(tok2, "caller"))
|
||||||
|
callchain_param.order = ORDER_CALLER;
|
||||||
|
else if (!strcmp(tok2, "callee"))
|
||||||
|
callchain_param.order = ORDER_CALLEE;
|
||||||
|
else
|
||||||
|
return -1;
|
||||||
|
setup:
|
||||||
|
if (callchain_register_param(&callchain_param) < 0) {
|
||||||
|
fprintf(stderr, "Can't register callchain params\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1019,7 +1107,7 @@ static const struct option options[] = {
|
||||||
"put the counters into a counter group"),
|
"put the counters into a counter group"),
|
||||||
OPT_BOOLEAN('i', "inherit", &inherit,
|
OPT_BOOLEAN('i', "inherit", &inherit,
|
||||||
"child tasks inherit counters"),
|
"child tasks inherit counters"),
|
||||||
OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
|
OPT_STRING(0, "sym-annotate", &sym_filter, "symbol name",
|
||||||
"symbol to annotate"),
|
"symbol to annotate"),
|
||||||
OPT_BOOLEAN('z', "zero", &top.zero,
|
OPT_BOOLEAN('z', "zero", &top.zero,
|
||||||
"zero history across updates"),
|
"zero history across updates"),
|
||||||
|
@ -1033,6 +1121,28 @@ static const struct option options[] = {
|
||||||
OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
|
OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
|
||||||
OPT_INCR('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show counter open errors, etc)"),
|
"be more verbose (show counter open errors, etc)"),
|
||||||
|
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
||||||
|
"sort by key(s): pid, comm, dso, symbol, parent"),
|
||||||
|
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
|
||||||
|
"Show a column with the number of samples"),
|
||||||
|
OPT_CALLBACK_DEFAULT('G', "call-graph", NULL, "output_type,min_percent, call_order",
|
||||||
|
"Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
|
||||||
|
"Default: fractal,0.5,callee", &parse_callchain_opt,
|
||||||
|
callchain_default_opt),
|
||||||
|
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
|
||||||
|
"Show a column with the sum of periods"),
|
||||||
|
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
|
||||||
|
"only consider symbols in these dsos"),
|
||||||
|
OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
|
||||||
|
"only consider symbols in these comms"),
|
||||||
|
OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
|
||||||
|
"only consider these symbols"),
|
||||||
|
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
||||||
|
"Interleave source code with assembly code (default)"),
|
||||||
|
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
||||||
|
"Display raw encoding of assembly instructions (default)"),
|
||||||
|
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
|
||||||
|
"Specify disassembler style (e.g. -M intel for intel syntax)"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1045,18 +1155,16 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||||
if (top.evlist == NULL)
|
if (top.evlist == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
page_size = sysconf(_SC_PAGE_SIZE);
|
symbol_conf.exclude_other = false;
|
||||||
|
|
||||||
argc = parse_options(argc, argv, options, top_usage, 0);
|
argc = parse_options(argc, argv, options, top_usage, 0);
|
||||||
if (argc)
|
if (argc)
|
||||||
usage_with_options(top_usage, options);
|
usage_with_options(top_usage, options);
|
||||||
|
|
||||||
/*
|
if (sort_order == default_sort_order)
|
||||||
* XXX For now start disabled, only using TUI if explicitely asked for.
|
sort_order = "dso,symbol";
|
||||||
* Change that when handle_keys equivalent gets written, live annotation
|
|
||||||
* done, etc.
|
setup_sorting(top_usage, options);
|
||||||
*/
|
|
||||||
use_browser = 0;
|
|
||||||
|
|
||||||
if (use_stdio)
|
if (use_stdio)
|
||||||
use_browser = 0;
|
use_browser = 0;
|
||||||
|
@ -1119,13 +1227,22 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||||
|
|
||||||
top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
|
top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
|
||||||
|
|
||||||
symbol_conf.priv_size = (sizeof(struct sym_entry) + sizeof(struct annotation) +
|
symbol_conf.priv_size = sizeof(struct annotation);
|
||||||
(top.evlist->nr_entries + 1) * sizeof(unsigned long));
|
|
||||||
|
|
||||||
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
|
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
|
||||||
if (symbol__init() < 0)
|
if (symbol__init() < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout);
|
||||||
|
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
|
||||||
|
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Avoid annotation data structures overhead when symbols aren't on the
|
||||||
|
* sort list.
|
||||||
|
*/
|
||||||
|
sort_has_symbols = sort_sym.list.next != NULL;
|
||||||
|
|
||||||
get_term_dimensions(&winsize);
|
get_term_dimensions(&winsize);
|
||||||
if (top.print_entries == 0) {
|
if (top.print_entries == 0) {
|
||||||
update_print_entries(&winsize);
|
update_print_entries(&winsize);
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
#include "util/util.h"
|
#include "util/util.h"
|
||||||
#include "util/strbuf.h"
|
#include "util/strbuf.h"
|
||||||
|
|
||||||
extern const char perf_version_string[];
|
|
||||||
extern const char perf_usage_string[];
|
extern const char perf_usage_string[];
|
||||||
extern const char perf_more_info_string[];
|
extern const char perf_more_info_string[];
|
||||||
|
|
||||||
|
|
|
@ -427,6 +427,24 @@ static void get_debugfs_mntpt(void)
|
||||||
debugfs_mntpt[0] = '\0';
|
debugfs_mntpt[0] = '\0';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pthread__block_sigwinch(void)
|
||||||
|
{
|
||||||
|
sigset_t set;
|
||||||
|
|
||||||
|
sigemptyset(&set);
|
||||||
|
sigaddset(&set, SIGWINCH);
|
||||||
|
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void pthread__unblock_sigwinch(void)
|
||||||
|
{
|
||||||
|
sigset_t set;
|
||||||
|
|
||||||
|
sigemptyset(&set);
|
||||||
|
sigaddset(&set, SIGWINCH);
|
||||||
|
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, const char **argv)
|
int main(int argc, const char **argv)
|
||||||
{
|
{
|
||||||
const char *cmd;
|
const char *cmd;
|
||||||
|
@ -480,6 +498,12 @@ int main(int argc, const char **argv)
|
||||||
* time.
|
* time.
|
||||||
*/
|
*/
|
||||||
setup_path();
|
setup_path();
|
||||||
|
/*
|
||||||
|
* Block SIGWINCH notifications so that the thread that wants it can
|
||||||
|
* unblock and get syscalls like select interrupted instead of waiting
|
||||||
|
* forever while the signal goes to some other non interested thread.
|
||||||
|
*/
|
||||||
|
pthread__block_sigwinch();
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
static int done_help;
|
static int done_help;
|
||||||
|
|
|
@ -9,18 +9,21 @@ void get_term_dimensions(struct winsize *ws);
|
||||||
#include "../../arch/x86/include/asm/unistd.h"
|
#include "../../arch/x86/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
|
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
|
||||||
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
||||||
|
#define CPUINFO_PROC "model name"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
#if defined(__x86_64__)
|
||||||
#include "../../arch/x86/include/asm/unistd.h"
|
#include "../../arch/x86/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile("lfence" ::: "memory")
|
#define rmb() asm volatile("lfence" ::: "memory")
|
||||||
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
||||||
|
#define CPUINFO_PROC "model name"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __powerpc__
|
#ifdef __powerpc__
|
||||||
#include "../../arch/powerpc/include/asm/unistd.h"
|
#include "../../arch/powerpc/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile ("sync" ::: "memory")
|
#define rmb() asm volatile ("sync" ::: "memory")
|
||||||
#define cpu_relax() asm volatile ("" ::: "memory");
|
#define cpu_relax() asm volatile ("" ::: "memory");
|
||||||
|
#define CPUINFO_PROC "cpu"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __s390__
|
#ifdef __s390__
|
||||||
|
@ -37,30 +40,35 @@ void get_term_dimensions(struct winsize *ws);
|
||||||
# define rmb() asm volatile("" ::: "memory")
|
# define rmb() asm volatile("" ::: "memory")
|
||||||
#endif
|
#endif
|
||||||
#define cpu_relax() asm volatile("" ::: "memory")
|
#define cpu_relax() asm volatile("" ::: "memory")
|
||||||
|
#define CPUINFO_PROC "cpu type"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __hppa__
|
#ifdef __hppa__
|
||||||
#include "../../arch/parisc/include/asm/unistd.h"
|
#include "../../arch/parisc/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile("" ::: "memory")
|
#define rmb() asm volatile("" ::: "memory")
|
||||||
#define cpu_relax() asm volatile("" ::: "memory");
|
#define cpu_relax() asm volatile("" ::: "memory");
|
||||||
|
#define CPUINFO_PROC "cpu"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __sparc__
|
#ifdef __sparc__
|
||||||
#include "../../arch/sparc/include/asm/unistd.h"
|
#include "../../arch/sparc/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile("":::"memory")
|
#define rmb() asm volatile("":::"memory")
|
||||||
#define cpu_relax() asm volatile("":::"memory")
|
#define cpu_relax() asm volatile("":::"memory")
|
||||||
|
#define CPUINFO_PROC "cpu"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __alpha__
|
#ifdef __alpha__
|
||||||
#include "../../arch/alpha/include/asm/unistd.h"
|
#include "../../arch/alpha/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile("mb" ::: "memory")
|
#define rmb() asm volatile("mb" ::: "memory")
|
||||||
#define cpu_relax() asm volatile("" ::: "memory")
|
#define cpu_relax() asm volatile("" ::: "memory")
|
||||||
|
#define CPUINFO_PROC "cpu model"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __ia64__
|
#ifdef __ia64__
|
||||||
#include "../../arch/ia64/include/asm/unistd.h"
|
#include "../../arch/ia64/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile ("mf" ::: "memory")
|
#define rmb() asm volatile ("mf" ::: "memory")
|
||||||
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
|
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
|
||||||
|
#define CPUINFO_PROC "model name"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __arm__
|
#ifdef __arm__
|
||||||
|
@ -71,6 +79,7 @@ void get_term_dimensions(struct winsize *ws);
|
||||||
*/
|
*/
|
||||||
#define rmb() ((void(*)(void))0xffff0fa0)()
|
#define rmb() ((void(*)(void))0xffff0fa0)()
|
||||||
#define cpu_relax() asm volatile("":::"memory")
|
#define cpu_relax() asm volatile("":::"memory")
|
||||||
|
#define CPUINFO_PROC "Processor"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __mips__
|
#ifdef __mips__
|
||||||
|
@ -83,6 +92,7 @@ void get_term_dimensions(struct winsize *ws);
|
||||||
: /* no input */ \
|
: /* no input */ \
|
||||||
: "memory")
|
: "memory")
|
||||||
#define cpu_relax() asm volatile("" ::: "memory")
|
#define cpu_relax() asm volatile("" ::: "memory")
|
||||||
|
#define CPUINFO_PROC "cpu model"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
@ -171,5 +181,8 @@ struct ip_callchain {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern bool perf_host, perf_guest;
|
extern bool perf_host, perf_guest;
|
||||||
|
extern const char perf_version_string[];
|
||||||
|
|
||||||
|
void pthread__unblock_sigwinch(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/bash
|
||||||
|
perf record -e skb:kfree_skb $@
|
|
@ -0,0 +1,4 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# description: display a table of dropped frames
|
||||||
|
|
||||||
|
perf script -s "$PERF_EXEC_PATH"/scripts/python/net_dropmonitor.py $@
|
|
@ -0,0 +1,72 @@
|
||||||
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||||
|
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||||
|
|
||||||
|
from perf_trace_context import *
|
||||||
|
from Core import *
|
||||||
|
from Util import *
|
||||||
|
|
||||||
|
drop_log = {}
|
||||||
|
kallsyms = []
|
||||||
|
|
||||||
|
def get_kallsyms_table():
|
||||||
|
global kallsyms
|
||||||
|
try:
|
||||||
|
f = open("/proc/kallsyms", "r")
|
||||||
|
linecount = 0
|
||||||
|
for line in f:
|
||||||
|
linecount = linecount+1
|
||||||
|
f.seek(0)
|
||||||
|
except:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
j = 0
|
||||||
|
for line in f:
|
||||||
|
loc = int(line.split()[0], 16)
|
||||||
|
name = line.split()[2]
|
||||||
|
j = j +1
|
||||||
|
if ((j % 100) == 0):
|
||||||
|
print "\r" + str(j) + "/" + str(linecount),
|
||||||
|
kallsyms.append({ 'loc': loc, 'name' : name})
|
||||||
|
|
||||||
|
print "\r" + str(j) + "/" + str(linecount)
|
||||||
|
kallsyms.sort()
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_sym(sloc):
|
||||||
|
loc = int(sloc)
|
||||||
|
for i in kallsyms:
|
||||||
|
if (i['loc'] >= loc):
|
||||||
|
return (i['name'], i['loc']-loc)
|
||||||
|
return (None, 0)
|
||||||
|
|
||||||
|
def print_drop_table():
|
||||||
|
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
|
||||||
|
for i in drop_log.keys():
|
||||||
|
(sym, off) = get_sym(i)
|
||||||
|
if sym == None:
|
||||||
|
sym = i
|
||||||
|
print "%25s %25s %25s" % (sym, off, drop_log[i])
|
||||||
|
|
||||||
|
|
||||||
|
def trace_begin():
|
||||||
|
print "Starting trace (Ctrl-C to dump results)"
|
||||||
|
|
||||||
|
def trace_end():
|
||||||
|
print "Gathering kallsyms data"
|
||||||
|
get_kallsyms_table()
|
||||||
|
print_drop_table()
|
||||||
|
|
||||||
|
# called from perf, when it finds a correspoinding event
|
||||||
|
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
|
||||||
|
skbaddr, protocol, location):
|
||||||
|
slocation = str(location)
|
||||||
|
try:
|
||||||
|
drop_log[slocation] = drop_log[slocation] + 1
|
||||||
|
except:
|
||||||
|
drop_log[slocation] = 1
|
|
@ -16,6 +16,8 @@
|
||||||
#include "annotate.h"
|
#include "annotate.h"
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
|
|
||||||
|
const char *disassembler_style;
|
||||||
|
|
||||||
int symbol__annotate_init(struct map *map __used, struct symbol *sym)
|
int symbol__annotate_init(struct map *map __used, struct symbol *sym)
|
||||||
{
|
{
|
||||||
struct annotation *notes = symbol__annotation(sym);
|
struct annotation *notes = symbol__annotation(sym);
|
||||||
|
@ -323,10 +325,15 @@ fallback:
|
||||||
dso, dso->long_name, sym, sym->name);
|
dso, dso->long_name, sym, sym->name);
|
||||||
|
|
||||||
snprintf(command, sizeof(command),
|
snprintf(command, sizeof(command),
|
||||||
"objdump --start-address=0x%016" PRIx64
|
"objdump %s%s --start-address=0x%016" PRIx64
|
||||||
" --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
|
" --stop-address=0x%016" PRIx64
|
||||||
|
" -d %s %s -C %s|grep -v %s|expand",
|
||||||
|
disassembler_style ? "-M " : "",
|
||||||
|
disassembler_style ? disassembler_style : "",
|
||||||
map__rip_2objdump(map, sym->start),
|
map__rip_2objdump(map, sym->start),
|
||||||
map__rip_2objdump(map, sym->end),
|
map__rip_2objdump(map, sym->end),
|
||||||
|
symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
|
||||||
|
symbol_conf.annotate_src ? "-S" : "",
|
||||||
symfs_filename, filename);
|
symfs_filename, filename);
|
||||||
|
|
||||||
pr_debug("Executing: %s\n", command);
|
pr_debug("Executing: %s\n", command);
|
||||||
|
|
|
@ -91,13 +91,18 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
|
||||||
#ifdef NO_NEWT_SUPPORT
|
#ifdef NO_NEWT_SUPPORT
|
||||||
static inline int symbol__tui_annotate(struct symbol *sym __used,
|
static inline int symbol__tui_annotate(struct symbol *sym __used,
|
||||||
struct map *map __used,
|
struct map *map __used,
|
||||||
int evidx __used, int refresh __used)
|
int evidx __used,
|
||||||
|
void(*timer)(void *arg) __used,
|
||||||
|
void *arg __used, int delay_secs __used)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
|
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
|
||||||
int refresh);
|
int nr_events, void(*timer)(void *arg), void *arg,
|
||||||
|
int delay_secs);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern const char *disassembler_style;
|
||||||
|
|
||||||
#endif /* __PERF_ANNOTATE_H */
|
#endif /* __PERF_ANNOTATE_H */
|
||||||
|
|
|
@ -200,7 +200,7 @@ static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
|
||||||
* Auto-detect:
|
* Auto-detect:
|
||||||
*/
|
*/
|
||||||
if (perf_use_color_default < 0) {
|
if (perf_use_color_default < 0) {
|
||||||
if (isatty(1) || pager_in_use())
|
if (isatty(fileno(fp)) || pager_in_use())
|
||||||
perf_use_color_default = 1;
|
perf_use_color_default = 1;
|
||||||
else
|
else
|
||||||
perf_use_color_default = 0;
|
perf_use_color_default = 0;
|
||||||
|
|
|
@ -533,3 +533,9 @@ bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
|
||||||
first = list_entry(evlist->entries.next, struct perf_evsel, node);
|
first = list_entry(evlist->entries.next, struct perf_evsel, node);
|
||||||
return first->attr.sample_id_all;
|
return first->attr.sample_id_all;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void perf_evlist__set_selected(struct perf_evlist *evlist,
|
||||||
|
struct perf_evsel *evsel)
|
||||||
|
{
|
||||||
|
evlist->selected = evsel;
|
||||||
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ struct perf_evlist {
|
||||||
struct pollfd *pollfd;
|
struct pollfd *pollfd;
|
||||||
struct thread_map *threads;
|
struct thread_map *threads;
|
||||||
struct cpu_map *cpus;
|
struct cpu_map *cpus;
|
||||||
|
struct perf_evsel *selected;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct perf_evsel;
|
struct perf_evsel;
|
||||||
|
@ -56,6 +57,9 @@ void perf_evlist__munmap(struct perf_evlist *evlist);
|
||||||
void perf_evlist__disable(struct perf_evlist *evlist);
|
void perf_evlist__disable(struct perf_evlist *evlist);
|
||||||
void perf_evlist__enable(struct perf_evlist *evlist);
|
void perf_evlist__enable(struct perf_evlist *evlist);
|
||||||
|
|
||||||
|
void perf_evlist__set_selected(struct perf_evlist *evlist,
|
||||||
|
struct perf_evsel *evsel);
|
||||||
|
|
||||||
static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
|
static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
|
||||||
struct cpu_map *cpus,
|
struct cpu_map *cpus,
|
||||||
struct thread_map *threads)
|
struct thread_map *threads)
|
||||||
|
|
|
@ -39,6 +39,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
|
||||||
evsel->idx = idx;
|
evsel->idx = idx;
|
||||||
evsel->attr = *attr;
|
evsel->attr = *attr;
|
||||||
INIT_LIST_HEAD(&evsel->node);
|
INIT_LIST_HEAD(&evsel->node);
|
||||||
|
hists__init(&evsel->hists);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
|
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -12,6 +12,20 @@
|
||||||
enum {
|
enum {
|
||||||
HEADER_TRACE_INFO = 1,
|
HEADER_TRACE_INFO = 1,
|
||||||
HEADER_BUILD_ID,
|
HEADER_BUILD_ID,
|
||||||
|
|
||||||
|
HEADER_HOSTNAME,
|
||||||
|
HEADER_OSRELEASE,
|
||||||
|
HEADER_VERSION,
|
||||||
|
HEADER_ARCH,
|
||||||
|
HEADER_NRCPUS,
|
||||||
|
HEADER_CPUDESC,
|
||||||
|
HEADER_CPUID,
|
||||||
|
HEADER_TOTAL_MEM,
|
||||||
|
HEADER_CMDLINE,
|
||||||
|
HEADER_EVENT_DESC,
|
||||||
|
HEADER_CPU_TOPOLOGY,
|
||||||
|
HEADER_NUMA_TOPOLOGY,
|
||||||
|
|
||||||
HEADER_LAST_FEATURE,
|
HEADER_LAST_FEATURE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -68,10 +82,15 @@ void perf_header__set_feat(struct perf_header *header, int feat);
|
||||||
void perf_header__clear_feat(struct perf_header *header, int feat);
|
void perf_header__clear_feat(struct perf_header *header, int feat);
|
||||||
bool perf_header__has_feat(const struct perf_header *header, int feat);
|
bool perf_header__has_feat(const struct perf_header *header, int feat);
|
||||||
|
|
||||||
|
int perf_header__set_cmdline(int argc, const char **argv);
|
||||||
|
|
||||||
int perf_header__process_sections(struct perf_header *header, int fd,
|
int perf_header__process_sections(struct perf_header *header, int fd,
|
||||||
|
void *data,
|
||||||
int (*process)(struct perf_file_section *section,
|
int (*process)(struct perf_file_section *section,
|
||||||
struct perf_header *ph,
|
struct perf_header *ph,
|
||||||
int feat, int fd));
|
int feat, int fd, void *data));
|
||||||
|
|
||||||
|
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
|
||||||
|
|
||||||
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
|
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
|
||||||
const char *name, bool is_kallsyms);
|
const char *name, bool is_kallsyms);
|
||||||
|
@ -104,4 +123,10 @@ int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
|
||||||
struct perf_session *session);
|
struct perf_session *session);
|
||||||
int perf_event__process_build_id(union perf_event *event,
|
int perf_event__process_build_id(union perf_event *event,
|
||||||
struct perf_session *session);
|
struct perf_session *session);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* arch specific callback
|
||||||
|
*/
|
||||||
|
int get_cpuid(char *buffer, size_t sz);
|
||||||
|
|
||||||
#endif /* __PERF_HEADER_H */
|
#endif /* __PERF_HEADER_H */
|
||||||
|
|
|
@ -6,6 +6,11 @@
|
||||||
#include "sort.h"
|
#include "sort.h"
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
|
||||||
|
static bool hists__filter_entry_by_dso(struct hists *hists,
|
||||||
|
struct hist_entry *he);
|
||||||
|
static bool hists__filter_entry_by_thread(struct hists *hists,
|
||||||
|
struct hist_entry *he);
|
||||||
|
|
||||||
enum hist_filter {
|
enum hist_filter {
|
||||||
HIST_FILTER__DSO,
|
HIST_FILTER__DSO,
|
||||||
HIST_FILTER__THREAD,
|
HIST_FILTER__THREAD,
|
||||||
|
@ -18,56 +23,56 @@ struct callchain_param callchain_param = {
|
||||||
.order = ORDER_CALLEE
|
.order = ORDER_CALLEE
|
||||||
};
|
};
|
||||||
|
|
||||||
u16 hists__col_len(struct hists *self, enum hist_column col)
|
u16 hists__col_len(struct hists *hists, enum hist_column col)
|
||||||
{
|
{
|
||||||
return self->col_len[col];
|
return hists->col_len[col];
|
||||||
}
|
}
|
||||||
|
|
||||||
void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
|
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
|
||||||
{
|
{
|
||||||
self->col_len[col] = len;
|
hists->col_len[col] = len;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
|
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
|
||||||
{
|
{
|
||||||
if (len > hists__col_len(self, col)) {
|
if (len > hists__col_len(hists, col)) {
|
||||||
hists__set_col_len(self, col, len);
|
hists__set_col_len(hists, col, len);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hists__reset_col_len(struct hists *self)
|
static void hists__reset_col_len(struct hists *hists)
|
||||||
{
|
{
|
||||||
enum hist_column col;
|
enum hist_column col;
|
||||||
|
|
||||||
for (col = 0; col < HISTC_NR_COLS; ++col)
|
for (col = 0; col < HISTC_NR_COLS; ++col)
|
||||||
hists__set_col_len(self, col, 0);
|
hists__set_col_len(hists, col, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
|
static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
|
||||||
{
|
{
|
||||||
u16 len;
|
u16 len;
|
||||||
|
|
||||||
if (h->ms.sym)
|
if (h->ms.sym)
|
||||||
hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
|
hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen);
|
||||||
else {
|
else {
|
||||||
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
||||||
|
|
||||||
if (hists__col_len(self, HISTC_DSO) < unresolved_col_width &&
|
if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width &&
|
||||||
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
||||||
!symbol_conf.dso_list)
|
!symbol_conf.dso_list)
|
||||||
hists__set_col_len(self, HISTC_DSO,
|
hists__set_col_len(hists, HISTC_DSO,
|
||||||
unresolved_col_width);
|
unresolved_col_width);
|
||||||
}
|
}
|
||||||
|
|
||||||
len = thread__comm_len(h->thread);
|
len = thread__comm_len(h->thread);
|
||||||
if (hists__new_col_len(self, HISTC_COMM, len))
|
if (hists__new_col_len(hists, HISTC_COMM, len))
|
||||||
hists__set_col_len(self, HISTC_THREAD, len + 6);
|
hists__set_col_len(hists, HISTC_THREAD, len + 6);
|
||||||
|
|
||||||
if (h->ms.map) {
|
if (h->ms.map) {
|
||||||
len = dso__name_len(h->ms.map->dso);
|
len = dso__name_len(h->ms.map->dso);
|
||||||
hists__new_col_len(self, HISTC_DSO, len);
|
hists__new_col_len(hists, HISTC_DSO, len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,6 +97,67 @@ static void hist_entry__add_cpumode_period(struct hist_entry *self,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hist_entry__decay(struct hist_entry *he)
|
||||||
|
{
|
||||||
|
he->period = (he->period * 7) / 8;
|
||||||
|
he->nr_events = (he->nr_events * 7) / 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
|
||||||
|
{
|
||||||
|
u64 prev_period = he->period;
|
||||||
|
|
||||||
|
if (prev_period == 0)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
hist_entry__decay(he);
|
||||||
|
|
||||||
|
if (!he->filtered)
|
||||||
|
hists->stats.total_period -= prev_period - he->period;
|
||||||
|
|
||||||
|
return he->period == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __hists__decay_entries(struct hists *hists, bool zap_user,
|
||||||
|
bool zap_kernel, bool threaded)
|
||||||
|
{
|
||||||
|
struct rb_node *next = rb_first(&hists->entries);
|
||||||
|
struct hist_entry *n;
|
||||||
|
|
||||||
|
while (next) {
|
||||||
|
n = rb_entry(next, struct hist_entry, rb_node);
|
||||||
|
next = rb_next(&n->rb_node);
|
||||||
|
/*
|
||||||
|
* We may be annotating this, for instance, so keep it here in
|
||||||
|
* case some it gets new samples, we'll eventually free it when
|
||||||
|
* the user stops browsing and it agains gets fully decayed.
|
||||||
|
*/
|
||||||
|
if (((zap_user && n->level == '.') ||
|
||||||
|
(zap_kernel && n->level != '.') ||
|
||||||
|
hists__decay_entry(hists, n)) &&
|
||||||
|
!n->used) {
|
||||||
|
rb_erase(&n->rb_node, &hists->entries);
|
||||||
|
|
||||||
|
if (sort__need_collapse || threaded)
|
||||||
|
rb_erase(&n->rb_node_in, &hists->entries_collapsed);
|
||||||
|
|
||||||
|
hist_entry__free(n);
|
||||||
|
--hists->nr_entries;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
|
||||||
|
{
|
||||||
|
return __hists__decay_entries(hists, zap_user, zap_kernel, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void hists__decay_entries_threaded(struct hists *hists,
|
||||||
|
bool zap_user, bool zap_kernel)
|
||||||
|
{
|
||||||
|
return __hists__decay_entries(hists, zap_user, zap_kernel, true);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* histogram, sorted on item, collects periods
|
* histogram, sorted on item, collects periods
|
||||||
*/
|
*/
|
||||||
|
@ -113,11 +179,12 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
|
static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
|
||||||
{
|
{
|
||||||
if (!h->filtered) {
|
if (!h->filtered) {
|
||||||
hists__calc_col_len(self, h);
|
hists__calc_col_len(hists, h);
|
||||||
++self->nr_entries;
|
++hists->nr_entries;
|
||||||
|
hists->stats.total_period += h->period;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,11 +195,11 @@ static u8 symbol__parent_filter(const struct symbol *parent)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct hist_entry *__hists__add_entry(struct hists *self,
|
struct hist_entry *__hists__add_entry(struct hists *hists,
|
||||||
struct addr_location *al,
|
struct addr_location *al,
|
||||||
struct symbol *sym_parent, u64 period)
|
struct symbol *sym_parent, u64 period)
|
||||||
{
|
{
|
||||||
struct rb_node **p = &self->entries.rb_node;
|
struct rb_node **p;
|
||||||
struct rb_node *parent = NULL;
|
struct rb_node *parent = NULL;
|
||||||
struct hist_entry *he;
|
struct hist_entry *he;
|
||||||
struct hist_entry entry = {
|
struct hist_entry entry = {
|
||||||
|
@ -150,9 +217,13 @@ struct hist_entry *__hists__add_entry(struct hists *self,
|
||||||
};
|
};
|
||||||
int cmp;
|
int cmp;
|
||||||
|
|
||||||
|
pthread_mutex_lock(&hists->lock);
|
||||||
|
|
||||||
|
p = &hists->entries_in->rb_node;
|
||||||
|
|
||||||
while (*p != NULL) {
|
while (*p != NULL) {
|
||||||
parent = *p;
|
parent = *p;
|
||||||
he = rb_entry(parent, struct hist_entry, rb_node);
|
he = rb_entry(parent, struct hist_entry, rb_node_in);
|
||||||
|
|
||||||
cmp = hist_entry__cmp(&entry, he);
|
cmp = hist_entry__cmp(&entry, he);
|
||||||
|
|
||||||
|
@ -170,12 +241,14 @@ struct hist_entry *__hists__add_entry(struct hists *self,
|
||||||
|
|
||||||
he = hist_entry__new(&entry);
|
he = hist_entry__new(&entry);
|
||||||
if (!he)
|
if (!he)
|
||||||
return NULL;
|
goto out_unlock;
|
||||||
rb_link_node(&he->rb_node, parent, p);
|
|
||||||
rb_insert_color(&he->rb_node, &self->entries);
|
rb_link_node(&he->rb_node_in, parent, p);
|
||||||
hists__inc_nr_entries(self, he);
|
rb_insert_color(&he->rb_node_in, hists->entries_in);
|
||||||
out:
|
out:
|
||||||
hist_entry__add_cpumode_period(he, al->cpumode, period);
|
hist_entry__add_cpumode_period(he, al->cpumode, period);
|
||||||
|
out_unlock:
|
||||||
|
pthread_mutex_unlock(&hists->lock);
|
||||||
return he;
|
return he;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,7 +295,7 @@ void hist_entry__free(struct hist_entry *he)
|
||||||
* collapse the histogram
|
* collapse the histogram
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static bool hists__collapse_insert_entry(struct hists *self,
|
static bool hists__collapse_insert_entry(struct hists *hists,
|
||||||
struct rb_root *root,
|
struct rb_root *root,
|
||||||
struct hist_entry *he)
|
struct hist_entry *he)
|
||||||
{
|
{
|
||||||
|
@ -233,15 +306,16 @@ static bool hists__collapse_insert_entry(struct hists *self,
|
||||||
|
|
||||||
while (*p != NULL) {
|
while (*p != NULL) {
|
||||||
parent = *p;
|
parent = *p;
|
||||||
iter = rb_entry(parent, struct hist_entry, rb_node);
|
iter = rb_entry(parent, struct hist_entry, rb_node_in);
|
||||||
|
|
||||||
cmp = hist_entry__collapse(iter, he);
|
cmp = hist_entry__collapse(iter, he);
|
||||||
|
|
||||||
if (!cmp) {
|
if (!cmp) {
|
||||||
iter->period += he->period;
|
iter->period += he->period;
|
||||||
|
iter->nr_events += he->nr_events;
|
||||||
if (symbol_conf.use_callchain) {
|
if (symbol_conf.use_callchain) {
|
||||||
callchain_cursor_reset(&self->callchain_cursor);
|
callchain_cursor_reset(&hists->callchain_cursor);
|
||||||
callchain_merge(&self->callchain_cursor, iter->callchain,
|
callchain_merge(&hists->callchain_cursor, iter->callchain,
|
||||||
he->callchain);
|
he->callchain);
|
||||||
}
|
}
|
||||||
hist_entry__free(he);
|
hist_entry__free(he);
|
||||||
|
@ -254,35 +328,70 @@ static bool hists__collapse_insert_entry(struct hists *self,
|
||||||
p = &(*p)->rb_right;
|
p = &(*p)->rb_right;
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_link_node(&he->rb_node, parent, p);
|
rb_link_node(&he->rb_node_in, parent, p);
|
||||||
rb_insert_color(&he->rb_node, root);
|
rb_insert_color(&he->rb_node_in, root);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void hists__collapse_resort(struct hists *self)
|
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
|
||||||
{
|
{
|
||||||
struct rb_root tmp;
|
struct rb_root *root;
|
||||||
|
|
||||||
|
pthread_mutex_lock(&hists->lock);
|
||||||
|
|
||||||
|
root = hists->entries_in;
|
||||||
|
if (++hists->entries_in > &hists->entries_in_array[1])
|
||||||
|
hists->entries_in = &hists->entries_in_array[0];
|
||||||
|
|
||||||
|
pthread_mutex_unlock(&hists->lock);
|
||||||
|
|
||||||
|
return root;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
|
||||||
|
{
|
||||||
|
hists__filter_entry_by_dso(hists, he);
|
||||||
|
hists__filter_entry_by_thread(hists, he);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __hists__collapse_resort(struct hists *hists, bool threaded)
|
||||||
|
{
|
||||||
|
struct rb_root *root;
|
||||||
struct rb_node *next;
|
struct rb_node *next;
|
||||||
struct hist_entry *n;
|
struct hist_entry *n;
|
||||||
|
|
||||||
if (!sort__need_collapse)
|
if (!sort__need_collapse && !threaded)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tmp = RB_ROOT;
|
root = hists__get_rotate_entries_in(hists);
|
||||||
next = rb_first(&self->entries);
|
next = rb_first(root);
|
||||||
self->nr_entries = 0;
|
hists->stats.total_period = 0;
|
||||||
hists__reset_col_len(self);
|
|
||||||
|
|
||||||
while (next) {
|
while (next) {
|
||||||
n = rb_entry(next, struct hist_entry, rb_node);
|
n = rb_entry(next, struct hist_entry, rb_node_in);
|
||||||
next = rb_next(&n->rb_node);
|
next = rb_next(&n->rb_node_in);
|
||||||
|
|
||||||
rb_erase(&n->rb_node, &self->entries);
|
rb_erase(&n->rb_node_in, root);
|
||||||
if (hists__collapse_insert_entry(self, &tmp, n))
|
if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
|
||||||
hists__inc_nr_entries(self, n);
|
/*
|
||||||
|
* If it wasn't combined with one of the entries already
|
||||||
|
* collapsed, we need to apply the filters that may have
|
||||||
|
* been set by, say, the hist_browser.
|
||||||
|
*/
|
||||||
|
hists__apply_filters(hists, n);
|
||||||
|
hists__inc_nr_entries(hists, n);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self->entries = tmp;
|
void hists__collapse_resort(struct hists *hists)
|
||||||
|
{
|
||||||
|
return __hists__collapse_resort(hists, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void hists__collapse_resort_threaded(struct hists *hists)
|
||||||
|
{
|
||||||
|
return __hists__collapse_resort(hists, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -315,31 +424,43 @@ static void __hists__insert_output_entry(struct rb_root *entries,
|
||||||
rb_insert_color(&he->rb_node, entries);
|
rb_insert_color(&he->rb_node, entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hists__output_resort(struct hists *self)
|
static void __hists__output_resort(struct hists *hists, bool threaded)
|
||||||
{
|
{
|
||||||
struct rb_root tmp;
|
struct rb_root *root;
|
||||||
struct rb_node *next;
|
struct rb_node *next;
|
||||||
struct hist_entry *n;
|
struct hist_entry *n;
|
||||||
u64 min_callchain_hits;
|
u64 min_callchain_hits;
|
||||||
|
|
||||||
min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
|
min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
|
||||||
|
|
||||||
tmp = RB_ROOT;
|
if (sort__need_collapse || threaded)
|
||||||
next = rb_first(&self->entries);
|
root = &hists->entries_collapsed;
|
||||||
|
else
|
||||||
|
root = hists->entries_in;
|
||||||
|
|
||||||
self->nr_entries = 0;
|
next = rb_first(root);
|
||||||
hists__reset_col_len(self);
|
hists->entries = RB_ROOT;
|
||||||
|
|
||||||
|
hists->nr_entries = 0;
|
||||||
|
hists__reset_col_len(hists);
|
||||||
|
|
||||||
while (next) {
|
while (next) {
|
||||||
n = rb_entry(next, struct hist_entry, rb_node);
|
n = rb_entry(next, struct hist_entry, rb_node_in);
|
||||||
next = rb_next(&n->rb_node);
|
next = rb_next(&n->rb_node_in);
|
||||||
|
|
||||||
rb_erase(&n->rb_node, &self->entries);
|
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
|
||||||
__hists__insert_output_entry(&tmp, n, min_callchain_hits);
|
hists__inc_nr_entries(hists, n);
|
||||||
hists__inc_nr_entries(self, n);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self->entries = tmp;
|
void hists__output_resort(struct hists *hists)
|
||||||
|
{
|
||||||
|
return __hists__output_resort(hists, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void hists__output_resort_threaded(struct hists *hists)
|
||||||
|
{
|
||||||
|
return __hists__output_resort(hists, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
|
static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
|
||||||
|
@ -594,12 +715,27 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
|
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
|
||||||
struct hists *hists, struct hists *pair_hists,
|
{
|
||||||
bool show_displacement, long displacement,
|
struct rb_node *next = rb_first(&hists->entries);
|
||||||
bool color, u64 session_total)
|
struct hist_entry *n;
|
||||||
|
int row = 0;
|
||||||
|
|
||||||
|
hists__reset_col_len(hists);
|
||||||
|
|
||||||
|
while (next && row++ < max_rows) {
|
||||||
|
n = rb_entry(next, struct hist_entry, rb_node);
|
||||||
|
if (!n->filtered)
|
||||||
|
hists__calc_col_len(hists, n);
|
||||||
|
next = rb_next(&n->rb_node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hist_entry__pcnt_snprintf(struct hist_entry *self, char *s,
|
||||||
|
size_t size, struct hists *pair_hists,
|
||||||
|
bool show_displacement, long displacement,
|
||||||
|
bool color, u64 session_total)
|
||||||
{
|
{
|
||||||
struct sort_entry *se;
|
|
||||||
u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
|
u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
|
||||||
u64 nr_events;
|
u64 nr_events;
|
||||||
const char *sep = symbol_conf.field_sep;
|
const char *sep = symbol_conf.field_sep;
|
||||||
|
@ -664,6 +800,13 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
|
||||||
ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
|
ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (symbol_conf.show_total_period) {
|
||||||
|
if (sep)
|
||||||
|
ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
|
||||||
|
else
|
||||||
|
ret += snprintf(s + ret, size - ret, " %12" PRIu64, period);
|
||||||
|
}
|
||||||
|
|
||||||
if (pair_hists) {
|
if (pair_hists) {
|
||||||
char bf[32];
|
char bf[32];
|
||||||
double old_percent = 0, new_percent = 0, diff;
|
double old_percent = 0, new_percent = 0, diff;
|
||||||
|
@ -698,26 +841,42 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
|
||||||
|
struct hists *hists)
|
||||||
|
{
|
||||||
|
const char *sep = symbol_conf.field_sep;
|
||||||
|
struct sort_entry *se;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||||
if (se->elide)
|
if (se->elide)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
|
ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
|
||||||
ret += se->se_snprintf(self, s + ret, size - ret,
|
ret += se->se_snprintf(he, s + ret, size - ret,
|
||||||
hists__col_len(hists, se->se_width_idx));
|
hists__col_len(hists, se->se_width_idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
|
int hist_entry__fprintf(struct hist_entry *he, size_t size, struct hists *hists,
|
||||||
struct hists *pair_hists, bool show_displacement,
|
struct hists *pair_hists, bool show_displacement,
|
||||||
long displacement, FILE *fp, u64 session_total)
|
long displacement, FILE *fp, u64 session_total)
|
||||||
{
|
{
|
||||||
char bf[512];
|
char bf[512];
|
||||||
hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
|
int ret;
|
||||||
show_displacement, displacement,
|
|
||||||
true, session_total);
|
if (size == 0 || size > sizeof(bf))
|
||||||
|
size = sizeof(bf);
|
||||||
|
|
||||||
|
ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
|
||||||
|
show_displacement, displacement,
|
||||||
|
true, session_total);
|
||||||
|
hist_entry__snprintf(he, bf + ret, size - ret, hists);
|
||||||
return fprintf(fp, "%s\n", bf);
|
return fprintf(fp, "%s\n", bf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -738,8 +897,9 @@ static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
|
||||||
left_margin);
|
left_margin);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t hists__fprintf(struct hists *self, struct hists *pair,
|
size_t hists__fprintf(struct hists *hists, struct hists *pair,
|
||||||
bool show_displacement, FILE *fp)
|
bool show_displacement, bool show_header, int max_rows,
|
||||||
|
int max_cols, FILE *fp)
|
||||||
{
|
{
|
||||||
struct sort_entry *se;
|
struct sort_entry *se;
|
||||||
struct rb_node *nd;
|
struct rb_node *nd;
|
||||||
|
@ -749,9 +909,13 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
|
||||||
unsigned int width;
|
unsigned int width;
|
||||||
const char *sep = symbol_conf.field_sep;
|
const char *sep = symbol_conf.field_sep;
|
||||||
const char *col_width = symbol_conf.col_width_list_str;
|
const char *col_width = symbol_conf.col_width_list_str;
|
||||||
|
int nr_rows = 0;
|
||||||
|
|
||||||
init_rem_hits();
|
init_rem_hits();
|
||||||
|
|
||||||
|
if (!show_header)
|
||||||
|
goto print_entries;
|
||||||
|
|
||||||
fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
|
fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
|
||||||
|
|
||||||
if (symbol_conf.show_nr_samples) {
|
if (symbol_conf.show_nr_samples) {
|
||||||
|
@ -761,6 +925,13 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
|
||||||
fputs(" Samples ", fp);
|
fputs(" Samples ", fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (symbol_conf.show_total_period) {
|
||||||
|
if (sep)
|
||||||
|
ret += fprintf(fp, "%cPeriod", *sep);
|
||||||
|
else
|
||||||
|
ret += fprintf(fp, " Period ");
|
||||||
|
}
|
||||||
|
|
||||||
if (symbol_conf.show_cpu_utilization) {
|
if (symbol_conf.show_cpu_utilization) {
|
||||||
if (sep) {
|
if (sep) {
|
||||||
ret += fprintf(fp, "%csys", *sep);
|
ret += fprintf(fp, "%csys", *sep);
|
||||||
|
@ -803,18 +974,21 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
|
||||||
width = strlen(se->se_header);
|
width = strlen(se->se_header);
|
||||||
if (symbol_conf.col_width_list_str) {
|
if (symbol_conf.col_width_list_str) {
|
||||||
if (col_width) {
|
if (col_width) {
|
||||||
hists__set_col_len(self, se->se_width_idx,
|
hists__set_col_len(hists, se->se_width_idx,
|
||||||
atoi(col_width));
|
atoi(col_width));
|
||||||
col_width = strchr(col_width, ',');
|
col_width = strchr(col_width, ',');
|
||||||
if (col_width)
|
if (col_width)
|
||||||
++col_width;
|
++col_width;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!hists__new_col_len(self, se->se_width_idx, width))
|
if (!hists__new_col_len(hists, se->se_width_idx, width))
|
||||||
width = hists__col_len(self, se->se_width_idx);
|
width = hists__col_len(hists, se->se_width_idx);
|
||||||
fprintf(fp, " %*s", width, se->se_header);
|
fprintf(fp, " %*s", width, se->se_header);
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(fp, "\n");
|
fprintf(fp, "\n");
|
||||||
|
if (max_rows && ++nr_rows >= max_rows)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (sep)
|
if (sep)
|
||||||
goto print_entries;
|
goto print_entries;
|
||||||
|
@ -822,6 +996,8 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
|
||||||
fprintf(fp, "# ........");
|
fprintf(fp, "# ........");
|
||||||
if (symbol_conf.show_nr_samples)
|
if (symbol_conf.show_nr_samples)
|
||||||
fprintf(fp, " ..........");
|
fprintf(fp, " ..........");
|
||||||
|
if (symbol_conf.show_total_period)
|
||||||
|
fprintf(fp, " ............");
|
||||||
if (pair) {
|
if (pair) {
|
||||||
fprintf(fp, " ..........");
|
fprintf(fp, " ..........");
|
||||||
if (show_displacement)
|
if (show_displacement)
|
||||||
|
@ -834,17 +1010,23 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
fprintf(fp, " ");
|
fprintf(fp, " ");
|
||||||
width = hists__col_len(self, se->se_width_idx);
|
width = hists__col_len(hists, se->se_width_idx);
|
||||||
if (width == 0)
|
if (width == 0)
|
||||||
width = strlen(se->se_header);
|
width = strlen(se->se_header);
|
||||||
for (i = 0; i < width; i++)
|
for (i = 0; i < width; i++)
|
||||||
fprintf(fp, ".");
|
fprintf(fp, ".");
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(fp, "\n#\n");
|
fprintf(fp, "\n");
|
||||||
|
if (max_rows && ++nr_rows >= max_rows)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
fprintf(fp, "#\n");
|
||||||
|
if (max_rows && ++nr_rows >= max_rows)
|
||||||
|
goto out;
|
||||||
|
|
||||||
print_entries:
|
print_entries:
|
||||||
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
||||||
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
||||||
|
|
||||||
if (h->filtered)
|
if (h->filtered)
|
||||||
|
@ -858,19 +1040,22 @@ print_entries:
|
||||||
displacement = 0;
|
displacement = 0;
|
||||||
++position;
|
++position;
|
||||||
}
|
}
|
||||||
ret += hist_entry__fprintf(h, self, pair, show_displacement,
|
ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
|
||||||
displacement, fp, self->stats.total_period);
|
displacement, fp, hists->stats.total_period);
|
||||||
|
|
||||||
if (symbol_conf.use_callchain)
|
if (symbol_conf.use_callchain)
|
||||||
ret += hist_entry__fprintf_callchain(h, self, fp,
|
ret += hist_entry__fprintf_callchain(h, hists, fp,
|
||||||
self->stats.total_period);
|
hists->stats.total_period);
|
||||||
|
if (max_rows && ++nr_rows >= max_rows)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (h->ms.map == NULL && verbose > 1) {
|
if (h->ms.map == NULL && verbose > 1) {
|
||||||
__map_groups__fprintf_maps(&h->thread->mg,
|
__map_groups__fprintf_maps(&h->thread->mg,
|
||||||
MAP__FUNCTION, verbose, fp);
|
MAP__FUNCTION, verbose, fp);
|
||||||
fprintf(fp, "%.10s end\n", graph_dotted_line);
|
fprintf(fp, "%.10s end\n", graph_dotted_line);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
free(rem_sq_bracket);
|
free(rem_sq_bracket);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -879,7 +1064,7 @@ print_entries:
|
||||||
/*
|
/*
|
||||||
* See hists__fprintf to match the column widths
|
* See hists__fprintf to match the column widths
|
||||||
*/
|
*/
|
||||||
unsigned int hists__sort_list_width(struct hists *self)
|
unsigned int hists__sort_list_width(struct hists *hists)
|
||||||
{
|
{
|
||||||
struct sort_entry *se;
|
struct sort_entry *se;
|
||||||
int ret = 9; /* total % */
|
int ret = 9; /* total % */
|
||||||
|
@ -896,9 +1081,12 @@ unsigned int hists__sort_list_width(struct hists *self)
|
||||||
if (symbol_conf.show_nr_samples)
|
if (symbol_conf.show_nr_samples)
|
||||||
ret += 11;
|
ret += 11;
|
||||||
|
|
||||||
|
if (symbol_conf.show_total_period)
|
||||||
|
ret += 13;
|
||||||
|
|
||||||
list_for_each_entry(se, &hist_entry__sort_list, list)
|
list_for_each_entry(se, &hist_entry__sort_list, list)
|
||||||
if (!se->elide)
|
if (!se->elide)
|
||||||
ret += 2 + hists__col_len(self, se->se_width_idx);
|
ret += 2 + hists__col_len(hists, se->se_width_idx);
|
||||||
|
|
||||||
if (verbose) /* Addr + origin */
|
if (verbose) /* Addr + origin */
|
||||||
ret += 3 + BITS_PER_LONG / 4;
|
ret += 3 + BITS_PER_LONG / 4;
|
||||||
|
@ -906,63 +1094,84 @@ unsigned int hists__sort_list_width(struct hists *self)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
|
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
|
||||||
enum hist_filter filter)
|
enum hist_filter filter)
|
||||||
{
|
{
|
||||||
h->filtered &= ~(1 << filter);
|
h->filtered &= ~(1 << filter);
|
||||||
if (h->filtered)
|
if (h->filtered)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
++self->nr_entries;
|
++hists->nr_entries;
|
||||||
if (h->ms.unfolded)
|
if (h->ms.unfolded)
|
||||||
self->nr_entries += h->nr_rows;
|
hists->nr_entries += h->nr_rows;
|
||||||
h->row_offset = 0;
|
h->row_offset = 0;
|
||||||
self->stats.total_period += h->period;
|
hists->stats.total_period += h->period;
|
||||||
self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
|
hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
|
||||||
|
|
||||||
hists__calc_col_len(self, h);
|
hists__calc_col_len(hists, h);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hists__filter_by_dso(struct hists *self, const struct dso *dso)
|
|
||||||
|
static bool hists__filter_entry_by_dso(struct hists *hists,
|
||||||
|
struct hist_entry *he)
|
||||||
|
{
|
||||||
|
if (hists->dso_filter != NULL &&
|
||||||
|
(he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
|
||||||
|
he->filtered |= (1 << HIST_FILTER__DSO);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void hists__filter_by_dso(struct hists *hists)
|
||||||
{
|
{
|
||||||
struct rb_node *nd;
|
struct rb_node *nd;
|
||||||
|
|
||||||
self->nr_entries = self->stats.total_period = 0;
|
hists->nr_entries = hists->stats.total_period = 0;
|
||||||
self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
|
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
|
||||||
hists__reset_col_len(self);
|
hists__reset_col_len(hists);
|
||||||
|
|
||||||
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
||||||
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
||||||
|
|
||||||
if (symbol_conf.exclude_other && !h->parent)
|
if (symbol_conf.exclude_other && !h->parent)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
|
if (hists__filter_entry_by_dso(hists, h))
|
||||||
h->filtered |= (1 << HIST_FILTER__DSO);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
|
hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void hists__filter_by_thread(struct hists *self, const struct thread *thread)
|
static bool hists__filter_entry_by_thread(struct hists *hists,
|
||||||
|
struct hist_entry *he)
|
||||||
|
{
|
||||||
|
if (hists->thread_filter != NULL &&
|
||||||
|
he->thread != hists->thread_filter) {
|
||||||
|
he->filtered |= (1 << HIST_FILTER__THREAD);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void hists__filter_by_thread(struct hists *hists)
|
||||||
{
|
{
|
||||||
struct rb_node *nd;
|
struct rb_node *nd;
|
||||||
|
|
||||||
self->nr_entries = self->stats.total_period = 0;
|
hists->nr_entries = hists->stats.total_period = 0;
|
||||||
self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
|
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
|
||||||
hists__reset_col_len(self);
|
hists__reset_col_len(hists);
|
||||||
|
|
||||||
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
||||||
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
||||||
|
|
||||||
if (thread != NULL && h->thread != thread) {
|
if (hists__filter_entry_by_thread(hists, h))
|
||||||
h->filtered |= (1 << HIST_FILTER__THREAD);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
|
hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -976,13 +1185,13 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize)
|
||||||
return symbol__annotate(he->ms.sym, he->ms.map, privsize);
|
return symbol__annotate(he->ms.sym, he->ms.map, privsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hists__inc_nr_events(struct hists *self, u32 type)
|
void hists__inc_nr_events(struct hists *hists, u32 type)
|
||||||
{
|
{
|
||||||
++self->stats.nr_events[0];
|
++hists->stats.nr_events[0];
|
||||||
++self->stats.nr_events[type];
|
++hists->stats.nr_events[type];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
|
size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
size_t ret = 0;
|
size_t ret = 0;
|
||||||
|
@ -990,7 +1199,7 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
|
||||||
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
|
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
|
||||||
const char *name;
|
const char *name;
|
||||||
|
|
||||||
if (self->stats.nr_events[i] == 0)
|
if (hists->stats.nr_events[i] == 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
name = perf_event__name(i);
|
name = perf_event__name(i);
|
||||||
|
@ -998,8 +1207,18 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret += fprintf(fp, "%16s events: %10d\n", name,
|
ret += fprintf(fp, "%16s events: %10d\n", name,
|
||||||
self->stats.nr_events[i]);
|
hists->stats.nr_events[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void hists__init(struct hists *hists)
|
||||||
|
{
|
||||||
|
memset(hists, 0, sizeof(*hists));
|
||||||
|
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
|
||||||
|
hists->entries_in = &hists->entries_in_array[0];
|
||||||
|
hists->entries_collapsed = RB_ROOT;
|
||||||
|
hists->entries = RB_ROOT;
|
||||||
|
pthread_mutex_init(&hists->lock, NULL);
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#define __PERF_HIST_H
|
#define __PERF_HIST_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <pthread.h>
|
||||||
#include "callchain.h"
|
#include "callchain.h"
|
||||||
|
|
||||||
extern struct callchain_param callchain_param;
|
extern struct callchain_param callchain_param;
|
||||||
|
@ -42,9 +43,18 @@ enum hist_column {
|
||||||
HISTC_NR_COLS, /* Last entry */
|
HISTC_NR_COLS, /* Last entry */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct thread;
|
||||||
|
struct dso;
|
||||||
|
|
||||||
struct hists {
|
struct hists {
|
||||||
|
struct rb_root entries_in_array[2];
|
||||||
|
struct rb_root *entries_in;
|
||||||
struct rb_root entries;
|
struct rb_root entries;
|
||||||
|
struct rb_root entries_collapsed;
|
||||||
u64 nr_entries;
|
u64 nr_entries;
|
||||||
|
const struct thread *thread_filter;
|
||||||
|
const struct dso *dso_filter;
|
||||||
|
pthread_mutex_t lock;
|
||||||
struct events_stats stats;
|
struct events_stats stats;
|
||||||
u64 event_stream;
|
u64 event_stream;
|
||||||
u16 col_len[HISTC_NR_COLS];
|
u16 col_len[HISTC_NR_COLS];
|
||||||
|
@ -52,34 +62,42 @@ struct hists {
|
||||||
struct callchain_cursor callchain_cursor;
|
struct callchain_cursor callchain_cursor;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void hists__init(struct hists *hists);
|
||||||
|
|
||||||
struct hist_entry *__hists__add_entry(struct hists *self,
|
struct hist_entry *__hists__add_entry(struct hists *self,
|
||||||
struct addr_location *al,
|
struct addr_location *al,
|
||||||
struct symbol *parent, u64 period);
|
struct symbol *parent, u64 period);
|
||||||
extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
|
extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
|
||||||
extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
|
extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
|
||||||
int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
|
int hist_entry__fprintf(struct hist_entry *he, size_t size, struct hists *hists,
|
||||||
struct hists *pair_hists, bool show_displacement,
|
struct hists *pair_hists, bool show_displacement,
|
||||||
long displacement, FILE *fp, u64 total);
|
long displacement, FILE *fp, u64 session_total);
|
||||||
int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
|
int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
|
||||||
struct hists *hists, struct hists *pair_hists,
|
struct hists *hists);
|
||||||
bool show_displacement, long displacement,
|
|
||||||
bool color, u64 total);
|
|
||||||
void hist_entry__free(struct hist_entry *);
|
void hist_entry__free(struct hist_entry *);
|
||||||
|
|
||||||
void hists__output_resort(struct hists *self);
|
void hists__output_resort(struct hists *self);
|
||||||
|
void hists__output_resort_threaded(struct hists *hists);
|
||||||
void hists__collapse_resort(struct hists *self);
|
void hists__collapse_resort(struct hists *self);
|
||||||
|
void hists__collapse_resort_threaded(struct hists *hists);
|
||||||
|
|
||||||
|
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
|
||||||
|
void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
|
||||||
|
bool zap_kernel);
|
||||||
|
void hists__output_recalc_col_len(struct hists *hists, int max_rows);
|
||||||
|
|
||||||
void hists__inc_nr_events(struct hists *self, u32 type);
|
void hists__inc_nr_events(struct hists *self, u32 type);
|
||||||
size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
|
size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
|
||||||
|
|
||||||
size_t hists__fprintf(struct hists *self, struct hists *pair,
|
size_t hists__fprintf(struct hists *self, struct hists *pair,
|
||||||
bool show_displacement, FILE *fp);
|
bool show_displacement, bool show_header,
|
||||||
|
int max_rows, int max_cols, FILE *fp);
|
||||||
|
|
||||||
int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
|
int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
|
||||||
int hist_entry__annotate(struct hist_entry *self, size_t privsize);
|
int hist_entry__annotate(struct hist_entry *self, size_t privsize);
|
||||||
|
|
||||||
void hists__filter_by_dso(struct hists *self, const struct dso *dso);
|
void hists__filter_by_dso(struct hists *hists);
|
||||||
void hists__filter_by_thread(struct hists *self, const struct thread *thread);
|
void hists__filter_by_thread(struct hists *hists);
|
||||||
|
|
||||||
u16 hists__col_len(struct hists *self, enum hist_column col);
|
u16 hists__col_len(struct hists *self, enum hist_column col);
|
||||||
void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
|
void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
|
||||||
|
@ -90,26 +108,33 @@ struct perf_evlist;
|
||||||
#ifdef NO_NEWT_SUPPORT
|
#ifdef NO_NEWT_SUPPORT
|
||||||
static inline
|
static inline
|
||||||
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
|
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
|
||||||
const char *help __used)
|
const char *help __used,
|
||||||
|
void(*timer)(void *arg) __used,
|
||||||
|
void *arg __used,
|
||||||
|
int refresh __used)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
|
static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
|
||||||
int evidx __used)
|
int evidx __used,
|
||||||
|
int nr_events __used,
|
||||||
|
void(*timer)(void *arg) __used,
|
||||||
|
void *arg __used,
|
||||||
|
int delay_secs __used)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#define KEY_LEFT -1
|
#define K_LEFT -1
|
||||||
#define KEY_RIGHT -2
|
#define K_RIGHT -2
|
||||||
#else
|
#else
|
||||||
#include <newt.h>
|
#include "ui/keysyms.h"
|
||||||
int hist_entry__tui_annotate(struct hist_entry *self, int evidx);
|
int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events,
|
||||||
|
void(*timer)(void *arg), void *arg, int delay_secs);
|
||||||
|
|
||||||
#define KEY_LEFT NEWT_KEY_LEFT
|
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
|
||||||
#define KEY_RIGHT NEWT_KEY_RIGHT
|
void(*timer)(void *arg), void *arg,
|
||||||
|
int refresh);
|
||||||
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
unsigned int hists__sort_list_width(struct hists *self);
|
unsigned int hists__sort_list_width(struct hists *self);
|
||||||
|
|
|
@ -18,6 +18,13 @@ static inline int is_anon_memory(const char *filename)
|
||||||
return strcmp(filename, "//anon") == 0;
|
return strcmp(filename, "//anon") == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int is_no_dso_memory(const char *filename)
|
||||||
|
{
|
||||||
|
return !strcmp(filename, "[stack]") ||
|
||||||
|
!strcmp(filename, "[vdso]") ||
|
||||||
|
!strcmp(filename, "[heap]");
|
||||||
|
}
|
||||||
|
|
||||||
void map__init(struct map *self, enum map_type type,
|
void map__init(struct map *self, enum map_type type,
|
||||||
u64 start, u64 end, u64 pgoff, struct dso *dso)
|
u64 start, u64 end, u64 pgoff, struct dso *dso)
|
||||||
{
|
{
|
||||||
|
@ -42,9 +49,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
|
||||||
if (self != NULL) {
|
if (self != NULL) {
|
||||||
char newfilename[PATH_MAX];
|
char newfilename[PATH_MAX];
|
||||||
struct dso *dso;
|
struct dso *dso;
|
||||||
int anon;
|
int anon, no_dso;
|
||||||
|
|
||||||
anon = is_anon_memory(filename);
|
anon = is_anon_memory(filename);
|
||||||
|
no_dso = is_no_dso_memory(filename);
|
||||||
|
|
||||||
if (anon) {
|
if (anon) {
|
||||||
snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
|
snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
|
||||||
|
@ -57,12 +65,16 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
|
||||||
|
|
||||||
map__init(self, type, start, start + len, pgoff, dso);
|
map__init(self, type, start, start + len, pgoff, dso);
|
||||||
|
|
||||||
if (anon) {
|
if (anon || no_dso) {
|
||||||
set_identity:
|
|
||||||
self->map_ip = self->unmap_ip = identity__map_ip;
|
self->map_ip = self->unmap_ip = identity__map_ip;
|
||||||
} else if (strcmp(filename, "[vdso]") == 0) {
|
|
||||||
dso__set_loaded(dso, self->type);
|
/*
|
||||||
goto set_identity;
|
* Set memory without DSO as loaded. All map__find_*
|
||||||
|
* functions still return NULL, and we avoid the
|
||||||
|
* unnecessary map__load warning.
|
||||||
|
*/
|
||||||
|
if (no_dso)
|
||||||
|
dso__set_loaded(dso, self->type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return self;
|
return self;
|
||||||
|
@ -127,8 +139,8 @@ int map__load(struct map *self, symbol_filter_t filter)
|
||||||
|
|
||||||
if (len > sizeof(DSO__DELETED) &&
|
if (len > sizeof(DSO__DELETED) &&
|
||||||
strcmp(name + real_len + 1, DSO__DELETED) == 0) {
|
strcmp(name + real_len + 1, DSO__DELETED) == 0) {
|
||||||
pr_warning("%.*s was updated, restart the long "
|
pr_warning("%.*s was updated (is prelink enabled?). "
|
||||||
"running apps that use it!\n",
|
"Restart the long running apps that use it!\n",
|
||||||
(int)real_len, name);
|
(int)real_len, name);
|
||||||
} else {
|
} else {
|
||||||
pr_warning("no symbols found in %s, maybe install "
|
pr_warning("no symbols found in %s, maybe install "
|
||||||
|
@ -220,55 +232,55 @@ u64 map__objdump_2ip(struct map *map, u64 addr)
|
||||||
return ip;
|
return ip;
|
||||||
}
|
}
|
||||||
|
|
||||||
void map_groups__init(struct map_groups *self)
|
void map_groups__init(struct map_groups *mg)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < MAP__NR_TYPES; ++i) {
|
for (i = 0; i < MAP__NR_TYPES; ++i) {
|
||||||
self->maps[i] = RB_ROOT;
|
mg->maps[i] = RB_ROOT;
|
||||||
INIT_LIST_HEAD(&self->removed_maps[i]);
|
INIT_LIST_HEAD(&mg->removed_maps[i]);
|
||||||
}
|
}
|
||||||
self->machine = NULL;
|
mg->machine = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void maps__delete(struct rb_root *self)
|
static void maps__delete(struct rb_root *maps)
|
||||||
{
|
{
|
||||||
struct rb_node *next = rb_first(self);
|
struct rb_node *next = rb_first(maps);
|
||||||
|
|
||||||
while (next) {
|
while (next) {
|
||||||
struct map *pos = rb_entry(next, struct map, rb_node);
|
struct map *pos = rb_entry(next, struct map, rb_node);
|
||||||
|
|
||||||
next = rb_next(&pos->rb_node);
|
next = rb_next(&pos->rb_node);
|
||||||
rb_erase(&pos->rb_node, self);
|
rb_erase(&pos->rb_node, maps);
|
||||||
map__delete(pos);
|
map__delete(pos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void maps__delete_removed(struct list_head *self)
|
static void maps__delete_removed(struct list_head *maps)
|
||||||
{
|
{
|
||||||
struct map *pos, *n;
|
struct map *pos, *n;
|
||||||
|
|
||||||
list_for_each_entry_safe(pos, n, self, node) {
|
list_for_each_entry_safe(pos, n, maps, node) {
|
||||||
list_del(&pos->node);
|
list_del(&pos->node);
|
||||||
map__delete(pos);
|
map__delete(pos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void map_groups__exit(struct map_groups *self)
|
void map_groups__exit(struct map_groups *mg)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < MAP__NR_TYPES; ++i) {
|
for (i = 0; i < MAP__NR_TYPES; ++i) {
|
||||||
maps__delete(&self->maps[i]);
|
maps__delete(&mg->maps[i]);
|
||||||
maps__delete_removed(&self->removed_maps[i]);
|
maps__delete_removed(&mg->removed_maps[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void map_groups__flush(struct map_groups *self)
|
void map_groups__flush(struct map_groups *mg)
|
||||||
{
|
{
|
||||||
int type;
|
int type;
|
||||||
|
|
||||||
for (type = 0; type < MAP__NR_TYPES; type++) {
|
for (type = 0; type < MAP__NR_TYPES; type++) {
|
||||||
struct rb_root *root = &self->maps[type];
|
struct rb_root *root = &mg->maps[type];
|
||||||
struct rb_node *next = rb_first(root);
|
struct rb_node *next = rb_first(root);
|
||||||
|
|
||||||
while (next) {
|
while (next) {
|
||||||
|
@ -280,17 +292,17 @@ void map_groups__flush(struct map_groups *self)
|
||||||
* instance in some hist_entry instances, so
|
* instance in some hist_entry instances, so
|
||||||
* just move them to a separate list.
|
* just move them to a separate list.
|
||||||
*/
|
*/
|
||||||
list_add_tail(&pos->node, &self->removed_maps[pos->type]);
|
list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct symbol *map_groups__find_symbol(struct map_groups *self,
|
struct symbol *map_groups__find_symbol(struct map_groups *mg,
|
||||||
enum map_type type, u64 addr,
|
enum map_type type, u64 addr,
|
||||||
struct map **mapp,
|
struct map **mapp,
|
||||||
symbol_filter_t filter)
|
symbol_filter_t filter)
|
||||||
{
|
{
|
||||||
struct map *map = map_groups__find(self, type, addr);
|
struct map *map = map_groups__find(mg, type, addr);
|
||||||
|
|
||||||
if (map != NULL) {
|
if (map != NULL) {
|
||||||
if (mapp != NULL)
|
if (mapp != NULL)
|
||||||
|
@ -301,7 +313,7 @@ struct symbol *map_groups__find_symbol(struct map_groups *self,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
|
struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
|
||||||
enum map_type type,
|
enum map_type type,
|
||||||
const char *name,
|
const char *name,
|
||||||
struct map **mapp,
|
struct map **mapp,
|
||||||
|
@ -309,7 +321,7 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
|
||||||
{
|
{
|
||||||
struct rb_node *nd;
|
struct rb_node *nd;
|
||||||
|
|
||||||
for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
|
for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
|
||||||
struct map *pos = rb_entry(nd, struct map, rb_node);
|
struct map *pos = rb_entry(nd, struct map, rb_node);
|
||||||
struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
|
struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
|
||||||
|
|
||||||
|
@ -323,13 +335,13 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t __map_groups__fprintf_maps(struct map_groups *self,
|
size_t __map_groups__fprintf_maps(struct map_groups *mg,
|
||||||
enum map_type type, int verbose, FILE *fp)
|
enum map_type type, int verbose, FILE *fp)
|
||||||
{
|
{
|
||||||
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
|
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
|
||||||
struct rb_node *nd;
|
struct rb_node *nd;
|
||||||
|
|
||||||
for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
|
for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
|
||||||
struct map *pos = rb_entry(nd, struct map, rb_node);
|
struct map *pos = rb_entry(nd, struct map, rb_node);
|
||||||
printed += fprintf(fp, "Map:");
|
printed += fprintf(fp, "Map:");
|
||||||
printed += map__fprintf(pos, fp);
|
printed += map__fprintf(pos, fp);
|
||||||
|
@ -342,22 +354,22 @@ size_t __map_groups__fprintf_maps(struct map_groups *self,
|
||||||
return printed;
|
return printed;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp)
|
size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
|
||||||
{
|
{
|
||||||
size_t printed = 0, i;
|
size_t printed = 0, i;
|
||||||
for (i = 0; i < MAP__NR_TYPES; ++i)
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
||||||
printed += __map_groups__fprintf_maps(self, i, verbose, fp);
|
printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
|
||||||
return printed;
|
return printed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
|
static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
|
||||||
enum map_type type,
|
enum map_type type,
|
||||||
int verbose, FILE *fp)
|
int verbose, FILE *fp)
|
||||||
{
|
{
|
||||||
struct map *pos;
|
struct map *pos;
|
||||||
size_t printed = 0;
|
size_t printed = 0;
|
||||||
|
|
||||||
list_for_each_entry(pos, &self->removed_maps[type], node) {
|
list_for_each_entry(pos, &mg->removed_maps[type], node) {
|
||||||
printed += fprintf(fp, "Map:");
|
printed += fprintf(fp, "Map:");
|
||||||
printed += map__fprintf(pos, fp);
|
printed += map__fprintf(pos, fp);
|
||||||
if (verbose > 1) {
|
if (verbose > 1) {
|
||||||
|
@ -368,26 +380,26 @@ static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
|
||||||
return printed;
|
return printed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t map_groups__fprintf_removed_maps(struct map_groups *self,
|
static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
|
||||||
int verbose, FILE *fp)
|
int verbose, FILE *fp)
|
||||||
{
|
{
|
||||||
size_t printed = 0, i;
|
size_t printed = 0, i;
|
||||||
for (i = 0; i < MAP__NR_TYPES; ++i)
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
||||||
printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp);
|
printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
|
||||||
return printed;
|
return printed;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp)
|
size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
|
||||||
{
|
{
|
||||||
size_t printed = map_groups__fprintf_maps(self, verbose, fp);
|
size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
|
||||||
printed += fprintf(fp, "Removed maps:\n");
|
printed += fprintf(fp, "Removed maps:\n");
|
||||||
return printed + map_groups__fprintf_removed_maps(self, verbose, fp);
|
return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
|
int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
|
||||||
int verbose, FILE *fp)
|
int verbose, FILE *fp)
|
||||||
{
|
{
|
||||||
struct rb_root *root = &self->maps[map->type];
|
struct rb_root *root = &mg->maps[map->type];
|
||||||
struct rb_node *next = rb_first(root);
|
struct rb_node *next = rb_first(root);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
@ -418,7 +430,7 @@ int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
|
||||||
}
|
}
|
||||||
|
|
||||||
before->end = map->start - 1;
|
before->end = map->start - 1;
|
||||||
map_groups__insert(self, before);
|
map_groups__insert(mg, before);
|
||||||
if (verbose >= 2)
|
if (verbose >= 2)
|
||||||
map__fprintf(before, fp);
|
map__fprintf(before, fp);
|
||||||
}
|
}
|
||||||
|
@ -432,7 +444,7 @@ int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
|
||||||
}
|
}
|
||||||
|
|
||||||
after->start = map->end + 1;
|
after->start = map->end + 1;
|
||||||
map_groups__insert(self, after);
|
map_groups__insert(mg, after);
|
||||||
if (verbose >= 2)
|
if (verbose >= 2)
|
||||||
map__fprintf(after, fp);
|
map__fprintf(after, fp);
|
||||||
}
|
}
|
||||||
|
@ -441,7 +453,7 @@ move_map:
|
||||||
* If we have references, just move them to a separate list.
|
* If we have references, just move them to a separate list.
|
||||||
*/
|
*/
|
||||||
if (pos->referenced)
|
if (pos->referenced)
|
||||||
list_add_tail(&pos->node, &self->removed_maps[map->type]);
|
list_add_tail(&pos->node, &mg->removed_maps[map->type]);
|
||||||
else
|
else
|
||||||
map__delete(pos);
|
map__delete(pos);
|
||||||
|
|
||||||
|
@ -455,7 +467,7 @@ move_map:
|
||||||
/*
|
/*
|
||||||
* XXX This should not really _copy_ te maps, but refcount them.
|
* XXX This should not really _copy_ te maps, but refcount them.
|
||||||
*/
|
*/
|
||||||
int map_groups__clone(struct map_groups *self,
|
int map_groups__clone(struct map_groups *mg,
|
||||||
struct map_groups *parent, enum map_type type)
|
struct map_groups *parent, enum map_type type)
|
||||||
{
|
{
|
||||||
struct rb_node *nd;
|
struct rb_node *nd;
|
||||||
|
@ -464,7 +476,7 @@ int map_groups__clone(struct map_groups *self,
|
||||||
struct map *new = map__clone(map);
|
struct map *new = map__clone(map);
|
||||||
if (new == NULL)
|
if (new == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
map_groups__insert(self, new);
|
map_groups__insert(mg, new);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,17 +123,17 @@ void map__fixup_end(struct map *self);
|
||||||
|
|
||||||
void map__reloc_vmlinux(struct map *self);
|
void map__reloc_vmlinux(struct map *self);
|
||||||
|
|
||||||
size_t __map_groups__fprintf_maps(struct map_groups *self,
|
size_t __map_groups__fprintf_maps(struct map_groups *mg,
|
||||||
enum map_type type, int verbose, FILE *fp);
|
enum map_type type, int verbose, FILE *fp);
|
||||||
void maps__insert(struct rb_root *maps, struct map *map);
|
void maps__insert(struct rb_root *maps, struct map *map);
|
||||||
void maps__remove(struct rb_root *self, struct map *map);
|
void maps__remove(struct rb_root *maps, struct map *map);
|
||||||
struct map *maps__find(struct rb_root *maps, u64 addr);
|
struct map *maps__find(struct rb_root *maps, u64 addr);
|
||||||
void map_groups__init(struct map_groups *self);
|
void map_groups__init(struct map_groups *mg);
|
||||||
void map_groups__exit(struct map_groups *self);
|
void map_groups__exit(struct map_groups *mg);
|
||||||
int map_groups__clone(struct map_groups *self,
|
int map_groups__clone(struct map_groups *mg,
|
||||||
struct map_groups *parent, enum map_type type);
|
struct map_groups *parent, enum map_type type);
|
||||||
size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp);
|
size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp);
|
||||||
size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp);
|
size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp);
|
||||||
|
|
||||||
typedef void (*machine__process_t)(struct machine *self, void *data);
|
typedef void (*machine__process_t)(struct machine *self, void *data);
|
||||||
|
|
||||||
|
@ -162,29 +162,29 @@ static inline bool machine__is_host(struct machine *self)
|
||||||
return self ? self->pid == HOST_KERNEL_ID : false;
|
return self ? self->pid == HOST_KERNEL_ID : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void map_groups__insert(struct map_groups *self, struct map *map)
|
static inline void map_groups__insert(struct map_groups *mg, struct map *map)
|
||||||
{
|
{
|
||||||
maps__insert(&self->maps[map->type], map);
|
maps__insert(&mg->maps[map->type], map);
|
||||||
map->groups = self;
|
map->groups = mg;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void map_groups__remove(struct map_groups *self, struct map *map)
|
static inline void map_groups__remove(struct map_groups *mg, struct map *map)
|
||||||
{
|
{
|
||||||
maps__remove(&self->maps[map->type], map);
|
maps__remove(&mg->maps[map->type], map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct map *map_groups__find(struct map_groups *self,
|
static inline struct map *map_groups__find(struct map_groups *mg,
|
||||||
enum map_type type, u64 addr)
|
enum map_type type, u64 addr)
|
||||||
{
|
{
|
||||||
return maps__find(&self->maps[type], addr);
|
return maps__find(&mg->maps[type], addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct symbol *map_groups__find_symbol(struct map_groups *self,
|
struct symbol *map_groups__find_symbol(struct map_groups *mg,
|
||||||
enum map_type type, u64 addr,
|
enum map_type type, u64 addr,
|
||||||
struct map **mapp,
|
struct map **mapp,
|
||||||
symbol_filter_t filter);
|
symbol_filter_t filter);
|
||||||
|
|
||||||
struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
|
struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
|
||||||
enum map_type type,
|
enum map_type type,
|
||||||
const char *name,
|
const char *name,
|
||||||
struct map **mapp,
|
struct map **mapp,
|
||||||
|
@ -208,11 +208,11 @@ struct symbol *machine__find_kernel_function(struct machine *self, u64 addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
struct symbol *map_groups__find_function_by_name(struct map_groups *self,
|
struct symbol *map_groups__find_function_by_name(struct map_groups *mg,
|
||||||
const char *name, struct map **mapp,
|
const char *name, struct map **mapp,
|
||||||
symbol_filter_t filter)
|
symbol_filter_t filter)
|
||||||
{
|
{
|
||||||
return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
|
return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp, filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
|
@ -225,13 +225,13 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *self,
|
||||||
filter);
|
filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
|
int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
|
||||||
int verbose, FILE *fp);
|
int verbose, FILE *fp);
|
||||||
|
|
||||||
struct map *map_groups__find_by_name(struct map_groups *self,
|
struct map *map_groups__find_by_name(struct map_groups *mg,
|
||||||
enum map_type type, const char *name);
|
enum map_type type, const char *name);
|
||||||
struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
|
struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
|
||||||
|
|
||||||
void map_groups__flush(struct map_groups *self);
|
void map_groups__flush(struct map_groups *mg);
|
||||||
|
|
||||||
#endif /* __PERF_MAP_H */
|
#endif /* __PERF_MAP_H */
|
||||||
|
|
|
@ -1956,8 +1956,10 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
|
||||||
|
|
||||||
pr_debug("Writing event: %s\n", buf);
|
pr_debug("Writing event: %s\n", buf);
|
||||||
ret = write(fd, buf, strlen(buf));
|
ret = write(fd, buf, strlen(buf));
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
|
ret = -errno;
|
||||||
goto error;
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
printf("Remove event: %s\n", ent->s);
|
printf("Remove event: %s\n", ent->s);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1326,3 +1326,22 @@ int perf_session__cpu_bitmap(struct perf_session *session,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
|
||||||
|
bool full)
|
||||||
|
{
|
||||||
|
struct stat st;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (session == NULL || fp == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ret = fstat(session->fd, &st);
|
||||||
|
if (ret == -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
fprintf(fp, "# ========\n");
|
||||||
|
fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
|
||||||
|
perf_header__fprintf_info(session, fp, full);
|
||||||
|
fprintf(fp, "# ========\n#\n");
|
||||||
|
}
|
||||||
|
|
|
@ -177,4 +177,5 @@ void perf_session__print_ip(union perf_event *event,
|
||||||
int perf_session__cpu_bitmap(struct perf_session *session,
|
int perf_session__cpu_bitmap(struct perf_session *session,
|
||||||
const char *cpu_list, unsigned long *cpu_bitmap);
|
const char *cpu_list, unsigned long *cpu_bitmap);
|
||||||
|
|
||||||
|
void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
|
||||||
#endif /* __PERF_SESSION_H */
|
#endif /* __PERF_SESSION_H */
|
||||||
|
|
|
@ -177,7 +177,9 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
|
||||||
BITS_PER_LONG / 4, self->ip, o);
|
BITS_PER_LONG / 4, self->ip, o);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level);
|
if (!sort_dso.elide)
|
||||||
|
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level);
|
||||||
|
|
||||||
if (self->ms.sym)
|
if (self->ms.sym)
|
||||||
ret += repsep_snprintf(bf + ret, size - ret, "%s",
|
ret += repsep_snprintf(bf + ret, size - ret, "%s",
|
||||||
self->ms.sym->name);
|
self->ms.sym->name);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue