Merge branch 'perf'
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
commit
9beeaa2d68
|
@ -165,8 +165,8 @@ the user entry_handler invocation is also skipped.
|
||||||
|
|
||||||
1.4 How Does Jump Optimization Work?
|
1.4 How Does Jump Optimization Work?
|
||||||
|
|
||||||
If you configured your kernel with CONFIG_OPTPROBES=y (currently
|
If your kernel is built with CONFIG_OPTPROBES=y (currently this flag
|
||||||
this option is supported on x86/x86-64, non-preemptive kernel) and
|
is automatically set 'y' on x86/x86-64, non-preemptive kernel) and
|
||||||
the "debug.kprobes_optimization" kernel parameter is set to 1 (see
|
the "debug.kprobes_optimization" kernel parameter is set to 1 (see
|
||||||
sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
|
sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
|
||||||
instruction instead of a breakpoint instruction at each probepoint.
|
instruction instead of a breakpoint instruction at each probepoint.
|
||||||
|
@ -271,8 +271,6 @@ tweak the kernel's execution path, you need to suppress optimization,
|
||||||
using one of the following techniques:
|
using one of the following techniques:
|
||||||
- Specify an empty function for the kprobe's post_handler or break_handler.
|
- Specify an empty function for the kprobe's post_handler or break_handler.
|
||||||
or
|
or
|
||||||
- Config CONFIG_OPTPROBES=n.
|
|
||||||
or
|
|
||||||
- Execute 'sysctl -w debug.kprobes_optimization=n'
|
- Execute 'sysctl -w debug.kprobes_optimization=n'
|
||||||
|
|
||||||
2. Architectures Supported
|
2. Architectures Supported
|
||||||
|
@ -307,10 +305,6 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
|
||||||
so you can use "objdump -d -l vmlinux" to see the source-to-object
|
so you can use "objdump -d -l vmlinux" to see the source-to-object
|
||||||
code mapping.
|
code mapping.
|
||||||
|
|
||||||
If you want to reduce probing overhead, set "Kprobes jump optimization
|
|
||||||
support" (CONFIG_OPTPROBES) to "y". You can find this option under the
|
|
||||||
"Kprobes" line.
|
|
||||||
|
|
||||||
4. API Reference
|
4. API Reference
|
||||||
|
|
||||||
The Kprobes API includes a "register" function and an "unregister"
|
The Kprobes API includes a "register" function and an "unregister"
|
||||||
|
|
|
@ -40,7 +40,9 @@ Synopsis of kprobe_events
|
||||||
$stack : Fetch stack address.
|
$stack : Fetch stack address.
|
||||||
$retval : Fetch return value.(*)
|
$retval : Fetch return value.(*)
|
||||||
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
|
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
|
||||||
NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
|
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
|
||||||
|
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
|
||||||
|
(u8/u16/u32/u64/s8/s16/s32/s64) are supported.
|
||||||
|
|
||||||
(*) only for return probe.
|
(*) only for return probe.
|
||||||
(**) this is useful for fetching a field of data structures.
|
(**) this is useful for fetching a field of data structures.
|
||||||
|
|
10
MAINTAINERS
10
MAINTAINERS
|
@ -4353,13 +4353,13 @@ M: Paul Mackerras <paulus@samba.org>
|
||||||
M: Ingo Molnar <mingo@elte.hu>
|
M: Ingo Molnar <mingo@elte.hu>
|
||||||
M: Arnaldo Carvalho de Melo <acme@redhat.com>
|
M: Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: kernel/perf_event.c
|
F: kernel/perf_event*.c
|
||||||
F: include/linux/perf_event.h
|
F: include/linux/perf_event.h
|
||||||
F: arch/*/kernel/perf_event.c
|
F: arch/*/kernel/perf_event*.c
|
||||||
F: arch/*/kernel/*/perf_event.c
|
F: arch/*/kernel/*/perf_event*.c
|
||||||
F: arch/*/kernel/*/*/perf_event.c
|
F: arch/*/kernel/*/*/perf_event*.c
|
||||||
F: arch/*/include/asm/perf_event.h
|
F: arch/*/include/asm/perf_event.h
|
||||||
F: arch/*/lib/perf_event.c
|
F: arch/*/lib/perf_event*.c
|
||||||
F: arch/*/kernel/perf_callchain.c
|
F: arch/*/kernel/perf_callchain.c
|
||||||
F: tools/perf/
|
F: tools/perf/
|
||||||
|
|
||||||
|
|
|
@ -42,15 +42,10 @@ config KPROBES
|
||||||
If in doubt, say "N".
|
If in doubt, say "N".
|
||||||
|
|
||||||
config OPTPROBES
|
config OPTPROBES
|
||||||
bool "Kprobes jump optimization support (EXPERIMENTAL)"
|
def_bool y
|
||||||
default y
|
depends on KPROBES && HAVE_OPTPROBES
|
||||||
depends on KPROBES
|
|
||||||
depends on !PREEMPT
|
depends on !PREEMPT
|
||||||
depends on HAVE_OPTPROBES
|
|
||||||
select KALLSYMS_ALL
|
select KALLSYMS_ALL
|
||||||
help
|
|
||||||
This option will allow kprobes to optimize breakpoint to
|
|
||||||
a jump for reducing its overhead.
|
|
||||||
|
|
||||||
config HAVE_EFFICIENT_UNALIGNED_ACCESS
|
config HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -58,6 +58,9 @@ config X86
|
||||||
select HAVE_ARCH_KMEMCHECK
|
select HAVE_ARCH_KMEMCHECK
|
||||||
select HAVE_USER_RETURN_NOTIFIER
|
select HAVE_USER_RETURN_NOTIFIER
|
||||||
|
|
||||||
|
config INSTRUCTION_DECODER
|
||||||
|
def_bool (KPROBES || PERF_EVENTS)
|
||||||
|
|
||||||
config OUTPUT_FORMAT
|
config OUTPUT_FORMAT
|
||||||
string
|
string
|
||||||
default "elf32-i386" if X86_32
|
default "elf32-i386" if X86_32
|
||||||
|
|
|
@ -502,23 +502,3 @@ config CPU_SUP_UMC_32
|
||||||
CPU might render the kernel unbootable.
|
CPU might render the kernel unbootable.
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
config X86_DS
|
|
||||||
def_bool X86_PTRACE_BTS
|
|
||||||
depends on X86_DEBUGCTLMSR
|
|
||||||
select HAVE_HW_BRANCH_TRACER
|
|
||||||
|
|
||||||
config X86_PTRACE_BTS
|
|
||||||
bool "Branch Trace Store"
|
|
||||||
default y
|
|
||||||
depends on X86_DEBUGCTLMSR
|
|
||||||
depends on BROKEN
|
|
||||||
---help---
|
|
||||||
This adds a ptrace interface to the hardware's branch trace store.
|
|
||||||
|
|
||||||
Debuggers may use it to collect an execution trace of the debugged
|
|
||||||
application in order to answer the question 'how did I get here?'.
|
|
||||||
Debuggers may trace user mode as well as kernel mode.
|
|
||||||
|
|
||||||
Say Y unless there is no application development on this machine
|
|
||||||
and you want to save a small amount of code size.
|
|
||||||
|
|
|
@ -174,15 +174,6 @@ config IOMMU_LEAK
|
||||||
Add a simple leak tracer to the IOMMU code. This is useful when you
|
Add a simple leak tracer to the IOMMU code. This is useful when you
|
||||||
are debugging a buggy device driver that leaks IOMMU mappings.
|
are debugging a buggy device driver that leaks IOMMU mappings.
|
||||||
|
|
||||||
config X86_DS_SELFTEST
|
|
||||||
bool "DS selftest"
|
|
||||||
default y
|
|
||||||
depends on DEBUG_KERNEL
|
|
||||||
depends on X86_DS
|
|
||||||
---help---
|
|
||||||
Perform Debug Store selftests at boot time.
|
|
||||||
If in doubt, say "N".
|
|
||||||
|
|
||||||
config HAVE_MMIOTRACE_SUPPORT
|
config HAVE_MMIOTRACE_SUPPORT
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
|
|
@ -373,6 +373,7 @@ extern atomic_t init_deasserted;
|
||||||
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
|
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
static inline u32 apic_read(u32 reg)
|
static inline u32 apic_read(u32 reg)
|
||||||
{
|
{
|
||||||
return apic->read(reg);
|
return apic->read(reg);
|
||||||
|
@ -403,10 +404,19 @@ static inline u32 safe_apic_wait_icr_idle(void)
|
||||||
return apic->safe_wait_icr_idle();
|
return apic->safe_wait_icr_idle();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_X86_LOCAL_APIC */
|
||||||
|
|
||||||
|
static inline u32 apic_read(u32 reg) { return 0; }
|
||||||
|
static inline void apic_write(u32 reg, u32 val) { }
|
||||||
|
static inline u64 apic_icr_read(void) { return 0; }
|
||||||
|
static inline void apic_icr_write(u32 low, u32 high) { }
|
||||||
|
static inline void apic_wait_icr_idle(void) { }
|
||||||
|
static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
|
||||||
|
|
||||||
|
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||||
|
|
||||||
static inline void ack_APIC_irq(void)
|
static inline void ack_APIC_irq(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
|
||||||
/*
|
/*
|
||||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||||
* ... yummie.
|
* ... yummie.
|
||||||
|
@ -414,7 +424,6 @@ static inline void ack_APIC_irq(void)
|
||||||
|
|
||||||
/* Docs say use 0 for future compatibility */
|
/* Docs say use 0 for future compatibility */
|
||||||
apic_write(APIC_EOI, 0);
|
apic_write(APIC_EOI, 0);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned default_get_apic_id(unsigned long x)
|
static inline unsigned default_get_apic_id(unsigned long x)
|
||||||
|
|
|
@ -1,302 +0,0 @@
|
||||||
/*
|
|
||||||
* Debug Store (DS) support
|
|
||||||
*
|
|
||||||
* This provides a low-level interface to the hardware's Debug Store
|
|
||||||
* feature that is used for branch trace store (BTS) and
|
|
||||||
* precise-event based sampling (PEBS).
|
|
||||||
*
|
|
||||||
* It manages:
|
|
||||||
* - DS and BTS hardware configuration
|
|
||||||
* - buffer overflow handling (to be done)
|
|
||||||
* - buffer access
|
|
||||||
*
|
|
||||||
* It does not do:
|
|
||||||
* - security checking (is the caller allowed to trace the task)
|
|
||||||
* - buffer allocation (memory accounting)
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Copyright (C) 2007-2009 Intel Corporation.
|
|
||||||
* Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_X86_DS_H
|
|
||||||
#define _ASM_X86_DS_H
|
|
||||||
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/err.h>
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_DS
|
|
||||||
|
|
||||||
struct task_struct;
|
|
||||||
struct ds_context;
|
|
||||||
struct ds_tracer;
|
|
||||||
struct bts_tracer;
|
|
||||||
struct pebs_tracer;
|
|
||||||
|
|
||||||
typedef void (*bts_ovfl_callback_t)(struct bts_tracer *);
|
|
||||||
typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A list of features plus corresponding macros to talk about them in
|
|
||||||
* the ds_request function's flags parameter.
|
|
||||||
*
|
|
||||||
* We use the enum to index an array of corresponding control bits;
|
|
||||||
* we use the macro to index a flags bit-vector.
|
|
||||||
*/
|
|
||||||
enum ds_feature {
|
|
||||||
dsf_bts = 0,
|
|
||||||
dsf_bts_kernel,
|
|
||||||
#define BTS_KERNEL (1 << dsf_bts_kernel)
|
|
||||||
/* trace kernel-mode branches */
|
|
||||||
|
|
||||||
dsf_bts_user,
|
|
||||||
#define BTS_USER (1 << dsf_bts_user)
|
|
||||||
/* trace user-mode branches */
|
|
||||||
|
|
||||||
dsf_bts_overflow,
|
|
||||||
dsf_bts_max,
|
|
||||||
dsf_pebs = dsf_bts_max,
|
|
||||||
|
|
||||||
dsf_pebs_max,
|
|
||||||
dsf_ctl_max = dsf_pebs_max,
|
|
||||||
dsf_bts_timestamps = dsf_ctl_max,
|
|
||||||
#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps)
|
|
||||||
/* add timestamps into BTS trace */
|
|
||||||
|
|
||||||
#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS)
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Request BTS or PEBS
|
|
||||||
*
|
|
||||||
* Due to alignement constraints, the actual buffer may be slightly
|
|
||||||
* smaller than the requested or provided buffer.
|
|
||||||
*
|
|
||||||
* Returns a pointer to a tracer structure on success, or
|
|
||||||
* ERR_PTR(errcode) on failure.
|
|
||||||
*
|
|
||||||
* The interrupt threshold is independent from the overflow callback
|
|
||||||
* to allow users to use their own overflow interrupt handling mechanism.
|
|
||||||
*
|
|
||||||
* The function might sleep.
|
|
||||||
*
|
|
||||||
* task: the task to request recording for
|
|
||||||
* cpu: the cpu to request recording for
|
|
||||||
* base: the base pointer for the (non-pageable) buffer;
|
|
||||||
* size: the size of the provided buffer in bytes
|
|
||||||
* ovfl: pointer to a function to be called on buffer overflow;
|
|
||||||
* NULL if cyclic buffer requested
|
|
||||||
* th: the interrupt threshold in records from the end of the buffer;
|
|
||||||
* -1 if no interrupt threshold is requested.
|
|
||||||
* flags: a bit-mask of the above flags
|
|
||||||
*/
|
|
||||||
extern struct bts_tracer *ds_request_bts_task(struct task_struct *task,
|
|
||||||
void *base, size_t size,
|
|
||||||
bts_ovfl_callback_t ovfl,
|
|
||||||
size_t th, unsigned int flags);
|
|
||||||
extern struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
|
|
||||||
bts_ovfl_callback_t ovfl,
|
|
||||||
size_t th, unsigned int flags);
|
|
||||||
extern struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
|
|
||||||
void *base, size_t size,
|
|
||||||
pebs_ovfl_callback_t ovfl,
|
|
||||||
size_t th, unsigned int flags);
|
|
||||||
extern struct pebs_tracer *ds_request_pebs_cpu(int cpu,
|
|
||||||
void *base, size_t size,
|
|
||||||
pebs_ovfl_callback_t ovfl,
|
|
||||||
size_t th, unsigned int flags);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Release BTS or PEBS resources
|
|
||||||
* Suspend and resume BTS or PEBS tracing
|
|
||||||
*
|
|
||||||
* Must be called with irq's enabled.
|
|
||||||
*
|
|
||||||
* tracer: the tracer handle returned from ds_request_~()
|
|
||||||
*/
|
|
||||||
extern void ds_release_bts(struct bts_tracer *tracer);
|
|
||||||
extern void ds_suspend_bts(struct bts_tracer *tracer);
|
|
||||||
extern void ds_resume_bts(struct bts_tracer *tracer);
|
|
||||||
extern void ds_release_pebs(struct pebs_tracer *tracer);
|
|
||||||
extern void ds_suspend_pebs(struct pebs_tracer *tracer);
|
|
||||||
extern void ds_resume_pebs(struct pebs_tracer *tracer);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Release BTS or PEBS resources
|
|
||||||
* Suspend and resume BTS or PEBS tracing
|
|
||||||
*
|
|
||||||
* Cpu tracers must call this on the traced cpu.
|
|
||||||
* Task tracers must call ds_release_~_noirq() for themselves.
|
|
||||||
*
|
|
||||||
* May be called with irq's disabled.
|
|
||||||
*
|
|
||||||
* Returns 0 if successful;
|
|
||||||
* -EPERM if the cpu tracer does not trace the current cpu.
|
|
||||||
* -EPERM if the task tracer does not trace itself.
|
|
||||||
*
|
|
||||||
* tracer: the tracer handle returned from ds_request_~()
|
|
||||||
*/
|
|
||||||
extern int ds_release_bts_noirq(struct bts_tracer *tracer);
|
|
||||||
extern int ds_suspend_bts_noirq(struct bts_tracer *tracer);
|
|
||||||
extern int ds_resume_bts_noirq(struct bts_tracer *tracer);
|
|
||||||
extern int ds_release_pebs_noirq(struct pebs_tracer *tracer);
|
|
||||||
extern int ds_suspend_pebs_noirq(struct pebs_tracer *tracer);
|
|
||||||
extern int ds_resume_pebs_noirq(struct pebs_tracer *tracer);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The raw DS buffer state as it is used for BTS and PEBS recording.
|
|
||||||
*
|
|
||||||
* This is the low-level, arch-dependent interface for working
|
|
||||||
* directly on the raw trace data.
|
|
||||||
*/
|
|
||||||
struct ds_trace {
|
|
||||||
/* the number of bts/pebs records */
|
|
||||||
size_t n;
|
|
||||||
/* the size of a bts/pebs record in bytes */
|
|
||||||
size_t size;
|
|
||||||
/* pointers into the raw buffer:
|
|
||||||
- to the first entry */
|
|
||||||
void *begin;
|
|
||||||
/* - one beyond the last entry */
|
|
||||||
void *end;
|
|
||||||
/* - one beyond the newest entry */
|
|
||||||
void *top;
|
|
||||||
/* - the interrupt threshold */
|
|
||||||
void *ith;
|
|
||||||
/* flags given on ds_request() */
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* An arch-independent view on branch trace data.
|
|
||||||
*/
|
|
||||||
enum bts_qualifier {
|
|
||||||
bts_invalid,
|
|
||||||
#define BTS_INVALID bts_invalid
|
|
||||||
|
|
||||||
bts_branch,
|
|
||||||
#define BTS_BRANCH bts_branch
|
|
||||||
|
|
||||||
bts_task_arrives,
|
|
||||||
#define BTS_TASK_ARRIVES bts_task_arrives
|
|
||||||
|
|
||||||
bts_task_departs,
|
|
||||||
#define BTS_TASK_DEPARTS bts_task_departs
|
|
||||||
|
|
||||||
bts_qual_bit_size = 4,
|
|
||||||
bts_qual_max = (1 << bts_qual_bit_size),
|
|
||||||
};
|
|
||||||
|
|
||||||
struct bts_struct {
|
|
||||||
__u64 qualifier;
|
|
||||||
union {
|
|
||||||
/* BTS_BRANCH */
|
|
||||||
struct {
|
|
||||||
__u64 from;
|
|
||||||
__u64 to;
|
|
||||||
} lbr;
|
|
||||||
/* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */
|
|
||||||
struct {
|
|
||||||
__u64 clock;
|
|
||||||
pid_t pid;
|
|
||||||
} event;
|
|
||||||
} variant;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The BTS state.
|
|
||||||
*
|
|
||||||
* This gives access to the raw DS state and adds functions to provide
|
|
||||||
* an arch-independent view of the BTS data.
|
|
||||||
*/
|
|
||||||
struct bts_trace {
|
|
||||||
struct ds_trace ds;
|
|
||||||
|
|
||||||
int (*read)(struct bts_tracer *tracer, const void *at,
|
|
||||||
struct bts_struct *out);
|
|
||||||
int (*write)(struct bts_tracer *tracer, const struct bts_struct *in);
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The PEBS state.
|
|
||||||
*
|
|
||||||
* This gives access to the raw DS state and the PEBS-specific counter
|
|
||||||
* reset value.
|
|
||||||
*/
|
|
||||||
struct pebs_trace {
|
|
||||||
struct ds_trace ds;
|
|
||||||
|
|
||||||
/* the number of valid counters in the below array */
|
|
||||||
unsigned int counters;
|
|
||||||
|
|
||||||
#define MAX_PEBS_COUNTERS 4
|
|
||||||
/* the counter reset value */
|
|
||||||
unsigned long long counter_reset[MAX_PEBS_COUNTERS];
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read the BTS or PEBS trace.
|
|
||||||
*
|
|
||||||
* Returns a view on the trace collected for the parameter tracer.
|
|
||||||
*
|
|
||||||
* The view remains valid as long as the traced task is not running or
|
|
||||||
* the tracer is suspended.
|
|
||||||
* Writes into the trace buffer are not reflected.
|
|
||||||
*
|
|
||||||
* tracer: the tracer handle returned from ds_request_~()
|
|
||||||
*/
|
|
||||||
extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer);
|
|
||||||
extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Reset the write pointer of the BTS/PEBS buffer.
|
|
||||||
*
|
|
||||||
* Returns 0 on success; -Eerrno on error
|
|
||||||
*
|
|
||||||
* tracer: the tracer handle returned from ds_request_~()
|
|
||||||
*/
|
|
||||||
extern int ds_reset_bts(struct bts_tracer *tracer);
|
|
||||||
extern int ds_reset_pebs(struct pebs_tracer *tracer);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the PEBS counter reset value.
|
|
||||||
*
|
|
||||||
* Returns 0 on success; -Eerrno on error
|
|
||||||
*
|
|
||||||
* tracer: the tracer handle returned from ds_request_pebs()
|
|
||||||
* counter: the index of the counter
|
|
||||||
* value: the new counter reset value
|
|
||||||
*/
|
|
||||||
extern int ds_set_pebs_reset(struct pebs_tracer *tracer,
|
|
||||||
unsigned int counter, u64 value);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialization
|
|
||||||
*/
|
|
||||||
struct cpuinfo_x86;
|
|
||||||
extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Context switch work
|
|
||||||
*/
|
|
||||||
extern void ds_switch_to(struct task_struct *prev, struct task_struct *next);
|
|
||||||
|
|
||||||
#else /* CONFIG_X86_DS */
|
|
||||||
|
|
||||||
struct cpuinfo_x86;
|
|
||||||
static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
|
|
||||||
static inline void ds_switch_to(struct task_struct *prev,
|
|
||||||
struct task_struct *next) {}
|
|
||||||
|
|
||||||
#endif /* CONFIG_X86_DS */
|
|
||||||
#endif /* _ASM_X86_DS_H */
|
|
|
@ -68,6 +68,8 @@ struct insn {
|
||||||
const insn_byte_t *next_byte;
|
const insn_byte_t *next_byte;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define MAX_INSN_SIZE 16
|
||||||
|
|
||||||
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
|
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
|
||||||
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
|
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
|
||||||
#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
|
#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
|
#include <asm/insn.h>
|
||||||
|
|
||||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||||
|
|
||||||
|
@ -36,7 +37,6 @@ typedef u8 kprobe_opcode_t;
|
||||||
#define RELATIVEJUMP_SIZE 5
|
#define RELATIVEJUMP_SIZE 5
|
||||||
#define RELATIVECALL_OPCODE 0xe8
|
#define RELATIVECALL_OPCODE 0xe8
|
||||||
#define RELATIVE_ADDR_SIZE 4
|
#define RELATIVE_ADDR_SIZE 4
|
||||||
#define MAX_INSN_SIZE 16
|
|
||||||
#define MAX_STACK_SIZE 64
|
#define MAX_STACK_SIZE 64
|
||||||
#define MIN_STACK_SIZE(ADDR) \
|
#define MIN_STACK_SIZE(ADDR) \
|
||||||
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
|
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
|
||||||
|
|
|
@ -71,11 +71,14 @@
|
||||||
#define MSR_IA32_LASTINTTOIP 0x000001de
|
#define MSR_IA32_LASTINTTOIP 0x000001de
|
||||||
|
|
||||||
/* DEBUGCTLMSR bits (others vary by model): */
|
/* DEBUGCTLMSR bits (others vary by model): */
|
||||||
#define _DEBUGCTLMSR_LBR 0 /* last branch recording */
|
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
|
||||||
#define _DEBUGCTLMSR_BTF 1 /* single-step on branches */
|
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
|
||||||
|
#define DEBUGCTLMSR_TR (1UL << 6)
|
||||||
#define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR)
|
#define DEBUGCTLMSR_BTS (1UL << 7)
|
||||||
#define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF)
|
#define DEBUGCTLMSR_BTINT (1UL << 8)
|
||||||
|
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
|
||||||
|
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
|
||||||
|
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
|
||||||
|
|
||||||
#define MSR_IA32_MC0_CTL 0x00000400
|
#define MSR_IA32_MC0_CTL 0x00000400
|
||||||
#define MSR_IA32_MC0_STATUS 0x00000401
|
#define MSR_IA32_MC0_STATUS 0x00000401
|
||||||
|
@ -359,6 +362,8 @@
|
||||||
#define MSR_P4_U2L_ESCR0 0x000003b0
|
#define MSR_P4_U2L_ESCR0 0x000003b0
|
||||||
#define MSR_P4_U2L_ESCR1 0x000003b1
|
#define MSR_P4_U2L_ESCR1 0x000003b1
|
||||||
|
|
||||||
|
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
|
||||||
|
|
||||||
/* Intel Core-based CPU performance counters */
|
/* Intel Core-based CPU performance counters */
|
||||||
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
|
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
|
||||||
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
|
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
* Performance event hw details:
|
* Performance event hw details:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define X86_PMC_MAX_GENERIC 8
|
#define X86_PMC_MAX_GENERIC 32
|
||||||
#define X86_PMC_MAX_FIXED 3
|
#define X86_PMC_MAX_FIXED 3
|
||||||
|
|
||||||
#define X86_PMC_IDX_GENERIC 0
|
#define X86_PMC_IDX_GENERIC 0
|
||||||
|
@ -18,39 +18,31 @@
|
||||||
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
|
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
|
||||||
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
|
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
|
||||||
|
|
||||||
#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
|
#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
|
||||||
#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
|
#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
|
||||||
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
|
#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
|
||||||
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
|
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
|
||||||
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
|
#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
|
||||||
|
|
||||||
/*
|
#define AMD64_EVENTSEL_EVENT \
|
||||||
* Includes eventsel and unit mask as well:
|
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
|
||||||
*/
|
#define INTEL_ARCH_EVENT_MASK \
|
||||||
|
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
|
||||||
|
|
||||||
|
#define X86_RAW_EVENT_MASK \
|
||||||
#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
|
(ARCH_PERFMON_EVENTSEL_EVENT | \
|
||||||
#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
|
ARCH_PERFMON_EVENTSEL_UMASK | \
|
||||||
#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
|
ARCH_PERFMON_EVENTSEL_EDGE | \
|
||||||
#define INTEL_ARCH_INV_MASK 0x00800000ULL
|
ARCH_PERFMON_EVENTSEL_INV | \
|
||||||
#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
|
ARCH_PERFMON_EVENTSEL_CMASK)
|
||||||
#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
|
#define AMD64_RAW_EVENT_MASK \
|
||||||
|
(X86_RAW_EVENT_MASK | \
|
||||||
/*
|
AMD64_EVENTSEL_EVENT)
|
||||||
* filter mask to validate fixed counter events.
|
|
||||||
* the following filters disqualify for fixed counters:
|
|
||||||
* - inv
|
|
||||||
* - edge
|
|
||||||
* - cnt-mask
|
|
||||||
* The other filters are supported by fixed counters.
|
|
||||||
* The any-thread option is supported starting with v3.
|
|
||||||
*/
|
|
||||||
#define INTEL_ARCH_FIXED_MASK \
|
|
||||||
(INTEL_ARCH_CNT_MASK| \
|
|
||||||
INTEL_ARCH_INV_MASK| \
|
|
||||||
INTEL_ARCH_EDGE_MASK|\
|
|
||||||
INTEL_ARCH_UNIT_MASK|\
|
|
||||||
INTEL_ARCH_EVENT_MASK)
|
|
||||||
|
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||||
|
@ -67,7 +59,7 @@
|
||||||
union cpuid10_eax {
|
union cpuid10_eax {
|
||||||
struct {
|
struct {
|
||||||
unsigned int version_id:8;
|
unsigned int version_id:8;
|
||||||
unsigned int num_events:8;
|
unsigned int num_counters:8;
|
||||||
unsigned int bit_width:8;
|
unsigned int bit_width:8;
|
||||||
unsigned int mask_length:8;
|
unsigned int mask_length:8;
|
||||||
} split;
|
} split;
|
||||||
|
@ -76,7 +68,7 @@ union cpuid10_eax {
|
||||||
|
|
||||||
union cpuid10_edx {
|
union cpuid10_edx {
|
||||||
struct {
|
struct {
|
||||||
unsigned int num_events_fixed:4;
|
unsigned int num_counters_fixed:4;
|
||||||
unsigned int reserved:28;
|
unsigned int reserved:28;
|
||||||
} split;
|
} split;
|
||||||
unsigned int full;
|
unsigned int full;
|
||||||
|
@ -136,6 +128,18 @@ extern void perf_events_lapic_init(void);
|
||||||
|
|
||||||
#define PERF_EVENT_INDEX_OFFSET 0
|
#define PERF_EVENT_INDEX_OFFSET 0
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
|
||||||
|
* This flag is otherwise unused and ABI specified to be 0, so nobody should
|
||||||
|
* care what we do with it.
|
||||||
|
*/
|
||||||
|
#define PERF_EFLAGS_EXACT (1UL << 3)
|
||||||
|
|
||||||
|
struct pt_regs;
|
||||||
|
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||||
|
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||||
|
#define perf_misc_flags(regs) perf_misc_flags(regs)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void init_hw_perf_events(void) { }
|
static inline void init_hw_perf_events(void) { }
|
||||||
static inline void perf_events_lapic_init(void) { }
|
static inline void perf_events_lapic_init(void) { }
|
||||||
|
|
|
@ -0,0 +1,794 @@
|
||||||
|
/*
|
||||||
|
* Netburst Perfomance Events (P4, old Xeon)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef PERF_EVENT_P4_H
|
||||||
|
#define PERF_EVENT_P4_H
|
||||||
|
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/bitops.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NetBurst has perfomance MSRs shared between
|
||||||
|
* threads if HT is turned on, ie for both logical
|
||||||
|
* processors (mem: in turn in Atom with HT support
|
||||||
|
* perf-MSRs are not shared and every thread has its
|
||||||
|
* own perf-MSRs set)
|
||||||
|
*/
|
||||||
|
#define ARCH_P4_TOTAL_ESCR (46)
|
||||||
|
#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */
|
||||||
|
#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
|
||||||
|
#define ARCH_P4_MAX_CCCR (18)
|
||||||
|
#define ARCH_P4_MAX_COUNTER (ARCH_P4_MAX_CCCR / 2)
|
||||||
|
|
||||||
|
#define P4_ESCR_EVENT_MASK 0x7e000000U
|
||||||
|
#define P4_ESCR_EVENT_SHIFT 25
|
||||||
|
#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
|
||||||
|
#define P4_ESCR_EVENTMASK_SHIFT 9
|
||||||
|
#define P4_ESCR_TAG_MASK 0x000001e0U
|
||||||
|
#define P4_ESCR_TAG_SHIFT 5
|
||||||
|
#define P4_ESCR_TAG_ENABLE 0x00000010U
|
||||||
|
#define P4_ESCR_T0_OS 0x00000008U
|
||||||
|
#define P4_ESCR_T0_USR 0x00000004U
|
||||||
|
#define P4_ESCR_T1_OS 0x00000002U
|
||||||
|
#define P4_ESCR_T1_USR 0x00000001U
|
||||||
|
|
||||||
|
#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
|
||||||
|
#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
|
||||||
|
#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
|
||||||
|
|
||||||
|
/* Non HT mask */
|
||||||
|
#define P4_ESCR_MASK \
|
||||||
|
(P4_ESCR_EVENT_MASK | \
|
||||||
|
P4_ESCR_EVENTMASK_MASK | \
|
||||||
|
P4_ESCR_TAG_MASK | \
|
||||||
|
P4_ESCR_TAG_ENABLE | \
|
||||||
|
P4_ESCR_T0_OS | \
|
||||||
|
P4_ESCR_T0_USR)
|
||||||
|
|
||||||
|
/* HT mask */
|
||||||
|
#define P4_ESCR_MASK_HT \
|
||||||
|
(P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
|
||||||
|
|
||||||
|
#define P4_CCCR_OVF 0x80000000U
|
||||||
|
#define P4_CCCR_CASCADE 0x40000000U
|
||||||
|
#define P4_CCCR_OVF_PMI_T0 0x04000000U
|
||||||
|
#define P4_CCCR_OVF_PMI_T1 0x08000000U
|
||||||
|
#define P4_CCCR_FORCE_OVF 0x02000000U
|
||||||
|
#define P4_CCCR_EDGE 0x01000000U
|
||||||
|
#define P4_CCCR_THRESHOLD_MASK 0x00f00000U
|
||||||
|
#define P4_CCCR_THRESHOLD_SHIFT 20
|
||||||
|
#define P4_CCCR_COMPLEMENT 0x00080000U
|
||||||
|
#define P4_CCCR_COMPARE 0x00040000U
|
||||||
|
#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U
|
||||||
|
#define P4_CCCR_ESCR_SELECT_SHIFT 13
|
||||||
|
#define P4_CCCR_ENABLE 0x00001000U
|
||||||
|
#define P4_CCCR_THREAD_SINGLE 0x00010000U
|
||||||
|
#define P4_CCCR_THREAD_BOTH 0x00020000U
|
||||||
|
#define P4_CCCR_THREAD_ANY 0x00030000U
|
||||||
|
#define P4_CCCR_RESERVED 0x00000fffU
|
||||||
|
|
||||||
|
#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
|
||||||
|
#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
|
||||||
|
|
||||||
|
/* Custom bits in reerved CCCR area */
|
||||||
|
#define P4_CCCR_CACHE_OPS_MASK 0x0000003fU
|
||||||
|
|
||||||
|
|
||||||
|
/* Non HT mask */
|
||||||
|
#define P4_CCCR_MASK \
|
||||||
|
(P4_CCCR_OVF | \
|
||||||
|
P4_CCCR_CASCADE | \
|
||||||
|
P4_CCCR_OVF_PMI_T0 | \
|
||||||
|
P4_CCCR_FORCE_OVF | \
|
||||||
|
P4_CCCR_EDGE | \
|
||||||
|
P4_CCCR_THRESHOLD_MASK | \
|
||||||
|
P4_CCCR_COMPLEMENT | \
|
||||||
|
P4_CCCR_COMPARE | \
|
||||||
|
P4_CCCR_ESCR_SELECT_MASK | \
|
||||||
|
P4_CCCR_ENABLE)
|
||||||
|
|
||||||
|
/* HT mask */
|
||||||
|
#define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY)
|
||||||
|
|
||||||
|
#define P4_GEN_ESCR_EMASK(class, name, bit) \
|
||||||
|
class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
|
||||||
|
#define P4_ESCR_EMASK_BIT(class, name) class##__##name
|
||||||
|
|
||||||
|
/*
|
||||||
|
* config field is 64bit width and consists of
|
||||||
|
* HT << 63 | ESCR << 32 | CCCR
|
||||||
|
* where HT is HyperThreading bit (since ESCR
|
||||||
|
* has it reserved we may use it for own purpose)
|
||||||
|
*
|
||||||
|
* note that this is NOT the addresses of respective
|
||||||
|
* ESCR and CCCR but rather an only packed value should
|
||||||
|
* be unpacked and written to a proper addresses
|
||||||
|
*
|
||||||
|
* the base idea is to pack as much info as
|
||||||
|
* possible
|
||||||
|
*/
|
||||||
|
#define p4_config_pack_escr(v) (((u64)(v)) << 32)
|
||||||
|
#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL)
|
||||||
|
#define p4_config_unpack_escr(v) (((u64)(v)) >> 32)
|
||||||
|
#define p4_config_unpack_cccr(v) (((u64)(v)) & 0xffffffffULL)
|
||||||
|
|
||||||
|
#define p4_config_unpack_emask(v) \
|
||||||
|
({ \
|
||||||
|
u32 t = p4_config_unpack_escr((v)); \
|
||||||
|
t = t & P4_ESCR_EVENTMASK_MASK; \
|
||||||
|
t = t >> P4_ESCR_EVENTMASK_SHIFT; \
|
||||||
|
t; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define p4_config_unpack_event(v) \
|
||||||
|
({ \
|
||||||
|
u32 t = p4_config_unpack_escr((v)); \
|
||||||
|
t = t & P4_ESCR_EVENT_MASK; \
|
||||||
|
t = t >> P4_ESCR_EVENT_SHIFT; \
|
||||||
|
t; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define p4_config_unpack_cache_event(v) (((u64)(v)) & P4_CCCR_CACHE_OPS_MASK)
|
||||||
|
|
||||||
|
#define P4_CONFIG_HT_SHIFT 63
|
||||||
|
#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
|
||||||
|
|
||||||
|
static inline bool p4_is_event_cascaded(u64 config)
|
||||||
|
{
|
||||||
|
u32 cccr = p4_config_unpack_cccr(config);
|
||||||
|
return !!(cccr & P4_CCCR_CASCADE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int p4_ht_config_thread(u64 config)
|
||||||
|
{
|
||||||
|
return !!(config & P4_CONFIG_HT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 p4_set_ht_bit(u64 config)
|
||||||
|
{
|
||||||
|
return config | P4_CONFIG_HT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 p4_clear_ht_bit(u64 config)
|
||||||
|
{
|
||||||
|
return config & ~P4_CONFIG_HT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int p4_ht_active(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
return smp_num_siblings > 1;
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int p4_ht_thread(int cpu)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
if (smp_num_siblings == 2)
|
||||||
|
return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int p4_should_swap_ts(u64 config, int cpu)
|
||||||
|
{
|
||||||
|
return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 p4_default_cccr_conf(int cpu)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Note that P4_CCCR_THREAD_ANY is "required" on
|
||||||
|
* non-HT machines (on HT machines we count TS events
|
||||||
|
* regardless the state of second logical processor
|
||||||
|
*/
|
||||||
|
u32 cccr = P4_CCCR_THREAD_ANY;
|
||||||
|
|
||||||
|
if (!p4_ht_thread(cpu))
|
||||||
|
cccr |= P4_CCCR_OVF_PMI_T0;
|
||||||
|
else
|
||||||
|
cccr |= P4_CCCR_OVF_PMI_T1;
|
||||||
|
|
||||||
|
return cccr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
|
||||||
|
{
|
||||||
|
u32 escr = 0;
|
||||||
|
|
||||||
|
if (!p4_ht_thread(cpu)) {
|
||||||
|
if (!exclude_os)
|
||||||
|
escr |= P4_ESCR_T0_OS;
|
||||||
|
if (!exclude_usr)
|
||||||
|
escr |= P4_ESCR_T0_USR;
|
||||||
|
} else {
|
||||||
|
if (!exclude_os)
|
||||||
|
escr |= P4_ESCR_T1_OS;
|
||||||
|
if (!exclude_usr)
|
||||||
|
escr |= P4_ESCR_T1_USR;
|
||||||
|
}
|
||||||
|
|
||||||
|
return escr;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum P4_EVENTS {
|
||||||
|
P4_EVENT_TC_DELIVER_MODE,
|
||||||
|
P4_EVENT_BPU_FETCH_REQUEST,
|
||||||
|
P4_EVENT_ITLB_REFERENCE,
|
||||||
|
P4_EVENT_MEMORY_CANCEL,
|
||||||
|
P4_EVENT_MEMORY_COMPLETE,
|
||||||
|
P4_EVENT_LOAD_PORT_REPLAY,
|
||||||
|
P4_EVENT_STORE_PORT_REPLAY,
|
||||||
|
P4_EVENT_MOB_LOAD_REPLAY,
|
||||||
|
P4_EVENT_PAGE_WALK_TYPE,
|
||||||
|
P4_EVENT_BSQ_CACHE_REFERENCE,
|
||||||
|
P4_EVENT_IOQ_ALLOCATION,
|
||||||
|
P4_EVENT_IOQ_ACTIVE_ENTRIES,
|
||||||
|
P4_EVENT_FSB_DATA_ACTIVITY,
|
||||||
|
P4_EVENT_BSQ_ALLOCATION,
|
||||||
|
P4_EVENT_BSQ_ACTIVE_ENTRIES,
|
||||||
|
P4_EVENT_SSE_INPUT_ASSIST,
|
||||||
|
P4_EVENT_PACKED_SP_UOP,
|
||||||
|
P4_EVENT_PACKED_DP_UOP,
|
||||||
|
P4_EVENT_SCALAR_SP_UOP,
|
||||||
|
P4_EVENT_SCALAR_DP_UOP,
|
||||||
|
P4_EVENT_64BIT_MMX_UOP,
|
||||||
|
P4_EVENT_128BIT_MMX_UOP,
|
||||||
|
P4_EVENT_X87_FP_UOP,
|
||||||
|
P4_EVENT_TC_MISC,
|
||||||
|
P4_EVENT_GLOBAL_POWER_EVENTS,
|
||||||
|
P4_EVENT_TC_MS_XFER,
|
||||||
|
P4_EVENT_UOP_QUEUE_WRITES,
|
||||||
|
P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE,
|
||||||
|
P4_EVENT_RETIRED_BRANCH_TYPE,
|
||||||
|
P4_EVENT_RESOURCE_STALL,
|
||||||
|
P4_EVENT_WC_BUFFER,
|
||||||
|
P4_EVENT_B2B_CYCLES,
|
||||||
|
P4_EVENT_BNR,
|
||||||
|
P4_EVENT_SNOOP,
|
||||||
|
P4_EVENT_RESPONSE,
|
||||||
|
P4_EVENT_FRONT_END_EVENT,
|
||||||
|
P4_EVENT_EXECUTION_EVENT,
|
||||||
|
P4_EVENT_REPLAY_EVENT,
|
||||||
|
P4_EVENT_INSTR_RETIRED,
|
||||||
|
P4_EVENT_UOPS_RETIRED,
|
||||||
|
P4_EVENT_UOP_TYPE,
|
||||||
|
P4_EVENT_BRANCH_RETIRED,
|
||||||
|
P4_EVENT_MISPRED_BRANCH_RETIRED,
|
||||||
|
P4_EVENT_X87_ASSIST,
|
||||||
|
P4_EVENT_MACHINE_CLEAR,
|
||||||
|
P4_EVENT_INSTR_COMPLETED,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define P4_OPCODE(event) event##_OPCODE
|
||||||
|
#define P4_OPCODE_ESEL(opcode) ((opcode & 0x00ff) >> 0)
|
||||||
|
#define P4_OPCODE_EVNT(opcode) ((opcode & 0xff00) >> 8)
|
||||||
|
#define P4_OPCODE_PACK(event, sel) (((event) << 8) | sel)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Comments below the event represent ESCR restriction
|
||||||
|
* for this event and counter index per ESCR
|
||||||
|
*
|
||||||
|
* MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
|
||||||
|
* processor builds (family 0FH, models 01H-02H). These MSRs
|
||||||
|
* are not available on later versions, so that we don't use
|
||||||
|
* them completely
|
||||||
|
*
|
||||||
|
* Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
|
||||||
|
* working so that we should not use this CCCR and respective
|
||||||
|
* counter as result
|
||||||
|
*/
|
||||||
|
enum P4_EVENT_OPCODES {
|
||||||
|
P4_OPCODE(P4_EVENT_TC_DELIVER_MODE) = P4_OPCODE_PACK(0x01, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_TC_ESCR0: 4, 5
|
||||||
|
* MSR_P4_TC_ESCR1: 6, 7
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST) = P4_OPCODE_PACK(0x03, 0x00),
|
||||||
|
/*
|
||||||
|
* MSR_P4_BPU_ESCR0: 0, 1
|
||||||
|
* MSR_P4_BPU_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_ITLB_REFERENCE) = P4_OPCODE_PACK(0x18, 0x03),
|
||||||
|
/*
|
||||||
|
* MSR_P4_ITLB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_ITLB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_MEMORY_CANCEL) = P4_OPCODE_PACK(0x02, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_DAC_ESCR0: 8, 9
|
||||||
|
* MSR_P4_DAC_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_MEMORY_COMPLETE) = P4_OPCODE_PACK(0x08, 0x02),
|
||||||
|
/*
|
||||||
|
* MSR_P4_SAAT_ESCR0: 8, 9
|
||||||
|
* MSR_P4_SAAT_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY) = P4_OPCODE_PACK(0x04, 0x02),
|
||||||
|
/*
|
||||||
|
* MSR_P4_SAAT_ESCR0: 8, 9
|
||||||
|
* MSR_P4_SAAT_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY) = P4_OPCODE_PACK(0x05, 0x02),
|
||||||
|
/*
|
||||||
|
* MSR_P4_SAAT_ESCR0: 8, 9
|
||||||
|
* MSR_P4_SAAT_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY) = P4_OPCODE_PACK(0x03, 0x02),
|
||||||
|
/*
|
||||||
|
* MSR_P4_MOB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_MOB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE) = P4_OPCODE_PACK(0x01, 0x04),
|
||||||
|
/*
|
||||||
|
* MSR_P4_PMH_ESCR0: 0, 1
|
||||||
|
* MSR_P4_PMH_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE) = P4_OPCODE_PACK(0x0c, 0x07),
|
||||||
|
/*
|
||||||
|
* MSR_P4_BSU_ESCR0: 0, 1
|
||||||
|
* MSR_P4_BSU_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_IOQ_ALLOCATION) = P4_OPCODE_PACK(0x03, 0x06),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x1a, 0x06),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY) = P4_OPCODE_PACK(0x17, 0x06),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_BSQ_ALLOCATION) = P4_OPCODE_PACK(0x05, 0x07),
|
||||||
|
/*
|
||||||
|
* MSR_P4_BSU_ESCR0: 0, 1
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x06, 0x07),
|
||||||
|
/*
|
||||||
|
* NOTE: no ESCR name in docs, it's guessed
|
||||||
|
* MSR_P4_BSU_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST) = P4_OPCODE_PACK(0x34, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_PACKED_SP_UOP) = P4_OPCODE_PACK(0x08, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_PACKED_DP_UOP) = P4_OPCODE_PACK(0x0c, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_SCALAR_SP_UOP) = P4_OPCODE_PACK(0x0a, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_SCALAR_DP_UOP) = P4_OPCODE_PACK(0x0e, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_64BIT_MMX_UOP) = P4_OPCODE_PACK(0x02, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_128BIT_MMX_UOP) = P4_OPCODE_PACK(0x1a, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_X87_FP_UOP) = P4_OPCODE_PACK(0x04, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||||
|
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_TC_MISC) = P4_OPCODE_PACK(0x06, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_TC_ESCR0: 4, 5
|
||||||
|
* MSR_P4_TC_ESCR1: 6, 7
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS) = P4_OPCODE_PACK(0x13, 0x06),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_TC_MS_XFER) = P4_OPCODE_PACK(0x05, 0x00),
|
||||||
|
/*
|
||||||
|
* MSR_P4_MS_ESCR0: 4, 5
|
||||||
|
* MSR_P4_MS_ESCR1: 6, 7
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES) = P4_OPCODE_PACK(0x09, 0x00),
|
||||||
|
/*
|
||||||
|
* MSR_P4_MS_ESCR0: 4, 5
|
||||||
|
* MSR_P4_MS_ESCR1: 6, 7
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x05, 0x02),
|
||||||
|
/*
|
||||||
|
* MSR_P4_TBPU_ESCR0: 4, 5
|
||||||
|
* MSR_P4_TBPU_ESCR1: 6, 7
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x04, 0x02),
|
||||||
|
/*
|
||||||
|
* MSR_P4_TBPU_ESCR0: 4, 5
|
||||||
|
* MSR_P4_TBPU_ESCR1: 6, 7
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_RESOURCE_STALL) = P4_OPCODE_PACK(0x01, 0x01),
|
||||||
|
/*
|
||||||
|
* MSR_P4_ALF_ESCR0: 12, 13, 16
|
||||||
|
* MSR_P4_ALF_ESCR1: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_WC_BUFFER) = P4_OPCODE_PACK(0x05, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_DAC_ESCR0: 8, 9
|
||||||
|
* MSR_P4_DAC_ESCR1: 10, 11
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_B2B_CYCLES) = P4_OPCODE_PACK(0x16, 0x03),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_BNR) = P4_OPCODE_PACK(0x08, 0x03),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_SNOOP) = P4_OPCODE_PACK(0x06, 0x03),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_RESPONSE) = P4_OPCODE_PACK(0x04, 0x03),
|
||||||
|
/*
|
||||||
|
* MSR_P4_FSB_ESCR0: 0, 1
|
||||||
|
* MSR_P4_FSB_ESCR1: 2, 3
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_FRONT_END_EVENT) = P4_OPCODE_PACK(0x08, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_EXECUTION_EVENT) = P4_OPCODE_PACK(0x0c, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_REPLAY_EVENT) = P4_OPCODE_PACK(0x09, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_INSTR_RETIRED) = P4_OPCODE_PACK(0x02, 0x04),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_UOPS_RETIRED) = P4_OPCODE_PACK(0x01, 0x04),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_UOP_TYPE) = P4_OPCODE_PACK(0x02, 0x02),
|
||||||
|
/*
|
||||||
|
* MSR_P4_RAT_ESCR0: 12, 13, 16
|
||||||
|
* MSR_P4_RAT_ESCR1: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_BRANCH_RETIRED) = P4_OPCODE_PACK(0x06, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED) = P4_OPCODE_PACK(0x03, 0x04),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_X87_ASSIST) = P4_OPCODE_PACK(0x03, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_MACHINE_CLEAR) = P4_OPCODE_PACK(0x02, 0x05),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||||
|
*/
|
||||||
|
|
||||||
|
P4_OPCODE(P4_EVENT_INSTR_COMPLETED) = P4_OPCODE_PACK(0x07, 0x04),
|
||||||
|
/*
|
||||||
|
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||||
|
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||||
|
*/
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* a caller should use P4_ESCR_EMASK_NAME helper to
|
||||||
|
* pick the EventMask needed, for example
|
||||||
|
*
|
||||||
|
* P4_ESCR_EMASK_NAME(P4_EVENT_TC_DELIVER_MODE, DD)
|
||||||
|
*/
|
||||||
|
enum P4_ESCR_EMASKS {
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DB, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DI, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BD, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BB, 4),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BI, 5),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, ID, 6),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BPU_FETCH_REQUEST, TCMISS, 0),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, MISS, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT_UK, 2),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, 64K_CONF, 3),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, LSC, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, SSC, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STA, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STD, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA, 4),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR, 5),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, DTMISS, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, ITMISS, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE, 4),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM, 5),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS, 8),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS, 9),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS, 10),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, DEFAULT, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_READ, 5),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE, 6),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_UC, 7),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WC, 8),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WT, 9),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WP, 10),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WB, 11),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OWN, 13),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OTHER, 14),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, PREFETCH, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ, 5),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE, 6),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC, 7),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC, 8),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT, 9),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP, 10),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB, 11),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN, 13),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER, 14),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN, 4),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER, 5),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE, 5),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE, 6),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE, 7),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE, 8),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE, 9),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE, 10),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0, 11),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1, 12),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2, 13),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE, 5),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE, 6),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE, 7),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE, 8),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE, 9),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE, 10),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0, 11),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1, 12),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2, 13),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_SSE_INPUT_ASSIST, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_SP_UOP, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_DP_UOP, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_SP_UOP, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_DP_UOP, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_64BIT_MMX_UOP, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_128BIT_MMX_UOP, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_X87_FP_UOP, ALL, 15),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_MISC, FLUSH, 4),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING, 0),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_TC_MS_XFER, CISC, 0),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM, 2),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT, 4),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CALL, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT, 4),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_RESOURCE_STALL, SBFULL, 5),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_EVICTS, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, NBOGUS, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, BOGUS, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS0, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS1, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS2, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS3, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS0, 4),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS1, 5),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS2, 6),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS3, 7),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, NBOGUS, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, BOGUS, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSTAG, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSNTAG, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSTAG, 3),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, NBOGUS, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, BOGUS, 1),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGLOADS, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGSTORES, 2),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNP, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNM, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTP, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTM, 3),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS, 0),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSU, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSO, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAO, 2),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAU, 3),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, PREA, 4),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, CLEAR, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, MOCLEAR, 1),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, SMCLEAR, 2),
|
||||||
|
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, NBOGUS, 0),
|
||||||
|
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1),
|
||||||
|
};
|
||||||
|
|
||||||
|
/* P4 PEBS: stale for a while */
|
||||||
|
#define P4_PEBS_METRIC_MASK 0x00001fffU
|
||||||
|
#define P4_PEBS_UOB_TAG 0x01000000U
|
||||||
|
#define P4_PEBS_ENABLE 0x02000000U
|
||||||
|
|
||||||
|
/* Replay metrics for MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT */
|
||||||
|
#define P4_PEBS__1stl_cache_load_miss_retired 0x3000001
|
||||||
|
#define P4_PEBS__2ndl_cache_load_miss_retired 0x3000002
|
||||||
|
#define P4_PEBS__dtlb_load_miss_retired 0x3000004
|
||||||
|
#define P4_PEBS__dtlb_store_miss_retired 0x3000004
|
||||||
|
#define P4_PEBS__dtlb_all_miss_retired 0x3000004
|
||||||
|
#define P4_PEBS__tagged_mispred_branch 0x3018000
|
||||||
|
#define P4_PEBS__mob_load_replay_retired 0x3000200
|
||||||
|
#define P4_PEBS__split_load_retired 0x3000400
|
||||||
|
#define P4_PEBS__split_store_retired 0x3000400
|
||||||
|
|
||||||
|
#define P4_VERT__1stl_cache_load_miss_retired 0x0000001
|
||||||
|
#define P4_VERT__2ndl_cache_load_miss_retired 0x0000001
|
||||||
|
#define P4_VERT__dtlb_load_miss_retired 0x0000001
|
||||||
|
#define P4_VERT__dtlb_store_miss_retired 0x0000002
|
||||||
|
#define P4_VERT__dtlb_all_miss_retired 0x0000003
|
||||||
|
#define P4_VERT__tagged_mispred_branch 0x0000010
|
||||||
|
#define P4_VERT__mob_load_replay_retired 0x0000001
|
||||||
|
#define P4_VERT__split_load_retired 0x0000001
|
||||||
|
#define P4_VERT__split_store_retired 0x0000002
|
||||||
|
|
||||||
|
enum P4_CACHE_EVENTS {
|
||||||
|
P4_CACHE__NONE,
|
||||||
|
|
||||||
|
P4_CACHE__1stl_cache_load_miss_retired,
|
||||||
|
P4_CACHE__2ndl_cache_load_miss_retired,
|
||||||
|
P4_CACHE__dtlb_load_miss_retired,
|
||||||
|
P4_CACHE__dtlb_store_miss_retired,
|
||||||
|
P4_CACHE__itlb_reference_hit,
|
||||||
|
P4_CACHE__itlb_reference_miss,
|
||||||
|
|
||||||
|
P4_CACHE__MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* PERF_EVENT_P4_H */
|
|
@ -21,7 +21,6 @@ struct mm_struct;
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/desc_defs.h>
|
#include <asm/desc_defs.h>
|
||||||
#include <asm/nops.h>
|
#include <asm/nops.h>
|
||||||
#include <asm/ds.h>
|
|
||||||
|
|
||||||
#include <linux/personality.h>
|
#include <linux/personality.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
@ -29,6 +28,7 @@ struct mm_struct;
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/math64.h>
|
#include <linux/math64.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
|
||||||
#define HBP_NUM 4
|
#define HBP_NUM 4
|
||||||
/*
|
/*
|
||||||
|
@ -473,10 +473,6 @@ struct thread_struct {
|
||||||
unsigned long iopl;
|
unsigned long iopl;
|
||||||
/* Max allowed port in the bitmap, in bytes: */
|
/* Max allowed port in the bitmap, in bytes: */
|
||||||
unsigned io_bitmap_max;
|
unsigned io_bitmap_max;
|
||||||
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
|
|
||||||
unsigned long debugctlmsr;
|
|
||||||
/* Debug Store context; see asm/ds.h */
|
|
||||||
struct ds_context *ds_ctx;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline unsigned long native_get_debugreg(int regno)
|
static inline unsigned long native_get_debugreg(int regno)
|
||||||
|
@ -803,7 +799,7 @@ extern void cpu_init(void);
|
||||||
|
|
||||||
static inline unsigned long get_debugctlmsr(void)
|
static inline unsigned long get_debugctlmsr(void)
|
||||||
{
|
{
|
||||||
unsigned long debugctlmsr = 0;
|
unsigned long debugctlmsr = 0;
|
||||||
|
|
||||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||||
if (boot_cpu_data.x86 < 6)
|
if (boot_cpu_data.x86 < 6)
|
||||||
|
@ -811,21 +807,6 @@ static inline unsigned long get_debugctlmsr(void)
|
||||||
#endif
|
#endif
|
||||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||||
|
|
||||||
return debugctlmsr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long get_debugctlmsr_on_cpu(int cpu)
|
|
||||||
{
|
|
||||||
u64 debugctlmsr = 0;
|
|
||||||
u32 val1, val2;
|
|
||||||
|
|
||||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
|
||||||
if (boot_cpu_data.x86 < 6)
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2);
|
|
||||||
debugctlmsr = val1 | ((u64)val2 << 32);
|
|
||||||
|
|
||||||
return debugctlmsr;
|
return debugctlmsr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -838,18 +819,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
|
||||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void update_debugctlmsr_on_cpu(int cpu,
|
|
||||||
unsigned long debugctlmsr)
|
|
||||||
{
|
|
||||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
|
||||||
if (boot_cpu_data.x86 < 6)
|
|
||||||
return;
|
|
||||||
#endif
|
|
||||||
wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR,
|
|
||||||
(u32)((u64)debugctlmsr),
|
|
||||||
(u32)((u64)debugctlmsr >> 32));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* from system description table in BIOS. Mostly for MCA use, but
|
* from system description table in BIOS. Mostly for MCA use, but
|
||||||
* others may find it useful:
|
* others may find it useful:
|
||||||
|
|
|
@ -82,61 +82,6 @@
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#endif
|
||||||
/* configuration/status structure used in PTRACE_BTS_CONFIG and
|
|
||||||
PTRACE_BTS_STATUS commands.
|
|
||||||
*/
|
|
||||||
struct ptrace_bts_config {
|
|
||||||
/* requested or actual size of BTS buffer in bytes */
|
|
||||||
__u32 size;
|
|
||||||
/* bitmask of below flags */
|
|
||||||
__u32 flags;
|
|
||||||
/* buffer overflow signal */
|
|
||||||
__u32 signal;
|
|
||||||
/* actual size of bts_struct in bytes */
|
|
||||||
__u32 bts_size;
|
|
||||||
};
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
|
|
||||||
#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
|
|
||||||
#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
|
|
||||||
instead of wrapping around */
|
|
||||||
#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
|
|
||||||
|
|
||||||
#define PTRACE_BTS_CONFIG 40
|
|
||||||
/* Configure branch trace recording.
|
|
||||||
ADDR points to a struct ptrace_bts_config.
|
|
||||||
DATA gives the size of that buffer.
|
|
||||||
A new buffer is allocated, if requested in the flags.
|
|
||||||
An overflow signal may only be requested for new buffers.
|
|
||||||
Returns the number of bytes read.
|
|
||||||
*/
|
|
||||||
#define PTRACE_BTS_STATUS 41
|
|
||||||
/* Return the current configuration in a struct ptrace_bts_config
|
|
||||||
pointed to by ADDR; DATA gives the size of that buffer.
|
|
||||||
Returns the number of bytes written.
|
|
||||||
*/
|
|
||||||
#define PTRACE_BTS_SIZE 42
|
|
||||||
/* Return the number of available BTS records for draining.
|
|
||||||
DATA and ADDR are ignored.
|
|
||||||
*/
|
|
||||||
#define PTRACE_BTS_GET 43
|
|
||||||
/* Get a single BTS record.
|
|
||||||
DATA defines the index into the BTS array, where 0 is the newest
|
|
||||||
entry, and higher indices refer to older entries.
|
|
||||||
ADDR is pointing to struct bts_struct (see asm/ds.h).
|
|
||||||
*/
|
|
||||||
#define PTRACE_BTS_CLEAR 44
|
|
||||||
/* Clear the BTS buffer.
|
|
||||||
DATA and ADDR are ignored.
|
|
||||||
*/
|
|
||||||
#define PTRACE_BTS_DRAIN 45
|
|
||||||
/* Read all available BTS records and clear the buffer.
|
|
||||||
ADDR points to an array of struct bts_struct.
|
|
||||||
DATA gives the size of that buffer.
|
|
||||||
BTS records are read from oldest to newest.
|
|
||||||
Returns number of BTS records drained.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_PTRACE_ABI_H */
|
#endif /* _ASM_X86_PTRACE_ABI_H */
|
||||||
|
|
|
@ -289,12 +289,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
|
||||||
extern int do_set_thread_area(struct task_struct *p, int idx,
|
extern int do_set_thread_area(struct task_struct *p, int idx,
|
||||||
struct user_desc __user *info, int can_allocate);
|
struct user_desc __user *info, int can_allocate);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PTRACE_BTS
|
|
||||||
extern void ptrace_bts_untrace(struct task_struct *tsk);
|
|
||||||
|
|
||||||
#define arch_ptrace_untrace(tsk) ptrace_bts_untrace(tsk)
|
|
||||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
|
@ -92,8 +92,7 @@ struct thread_info {
|
||||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||||
#define TIF_FREEZE 23 /* is freezing for suspend */
|
#define TIF_FREEZE 23 /* is freezing for suspend */
|
||||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||||
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
|
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
||||||
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
|
|
||||||
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
||||||
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
|
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
|
||||||
|
|
||||||
|
@ -115,8 +114,7 @@ struct thread_info {
|
||||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||||
#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
|
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||||
#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
|
|
||||||
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
||||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||||
|
|
||||||
|
@ -147,7 +145,7 @@ struct thread_info {
|
||||||
|
|
||||||
/* flags to check in __switch_to() */
|
/* flags to check in __switch_to() */
|
||||||
#define _TIF_WORK_CTXSW \
|
#define _TIF_WORK_CTXSW \
|
||||||
(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC)
|
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
|
||||||
|
|
||||||
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
||||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
||||||
|
|
|
@ -47,8 +47,6 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
||||||
obj-y += process.o
|
obj-y += process.o
|
||||||
obj-y += i387.o xsave.o
|
obj-y += i387.o xsave.o
|
||||||
obj-y += ptrace.o
|
obj-y += ptrace.o
|
||||||
obj-$(CONFIG_X86_DS) += ds.o
|
|
||||||
obj-$(CONFIG_X86_DS_SELFTEST) += ds_selftest.o
|
|
||||||
obj-$(CONFIG_X86_32) += tls.o
|
obj-$(CONFIG_X86_32) += tls.o
|
||||||
obj-$(CONFIG_IA32_EMULATION) += tls.o
|
obj-$(CONFIG_IA32_EMULATION) += tls.o
|
||||||
obj-y += step.o
|
obj-y += step.o
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/ds.h>
|
|
||||||
#include <asm/bugs.h>
|
#include <asm/bugs.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
|
||||||
|
@ -367,7 +366,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||||
set_cpu_cap(c, X86_FEATURE_BTS);
|
set_cpu_cap(c, X86_FEATURE_BTS);
|
||||||
if (!(l1 & (1<<12)))
|
if (!(l1 & (1<<12)))
|
||||||
set_cpu_cap(c, X86_FEATURE_PEBS);
|
set_cpu_cap(c, X86_FEATURE_PEBS);
|
||||||
ds_init_intel(c);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
|
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
|
||||||
|
|
|
@ -31,46 +31,51 @@
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
#include <asm/compat.h>
|
#include <asm/compat.h>
|
||||||
|
|
||||||
static u64 perf_event_mask __read_mostly;
|
#if 0
|
||||||
|
#undef wrmsrl
|
||||||
/* The maximal number of PEBS events: */
|
#define wrmsrl(msr, val) \
|
||||||
#define MAX_PEBS_EVENTS 4
|
do { \
|
||||||
|
trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
|
||||||
/* The size of a BTS record in bytes: */
|
(unsigned long)(val)); \
|
||||||
#define BTS_RECORD_SIZE 24
|
native_write_msr((msr), (u32)((u64)(val)), \
|
||||||
|
(u32)((u64)(val) >> 32)); \
|
||||||
/* The size of a per-cpu BTS buffer in bytes: */
|
} while (0)
|
||||||
#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
|
#endif
|
||||||
|
|
||||||
/* The BTS overflow threshold in bytes from the end of the buffer: */
|
|
||||||
#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bits in the debugctlmsr controlling branch tracing.
|
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
|
||||||
*/
|
*/
|
||||||
#define X86_DEBUGCTL_TR (1 << 6)
|
static unsigned long
|
||||||
#define X86_DEBUGCTL_BTS (1 << 7)
|
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
||||||
#define X86_DEBUGCTL_BTINT (1 << 8)
|
{
|
||||||
#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
|
unsigned long offset, addr = (unsigned long)from;
|
||||||
#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
|
int type = in_nmi() ? KM_NMI : KM_IRQ0;
|
||||||
|
unsigned long size, len = 0;
|
||||||
|
struct page *page;
|
||||||
|
void *map;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/*
|
do {
|
||||||
* A debug store configuration.
|
ret = __get_user_pages_fast(addr, 1, 0, &page);
|
||||||
*
|
if (!ret)
|
||||||
* We only support architectures that use 64bit fields.
|
break;
|
||||||
*/
|
|
||||||
struct debug_store {
|
offset = addr & (PAGE_SIZE - 1);
|
||||||
u64 bts_buffer_base;
|
size = min(PAGE_SIZE - offset, n - len);
|
||||||
u64 bts_index;
|
|
||||||
u64 bts_absolute_maximum;
|
map = kmap_atomic(page, type);
|
||||||
u64 bts_interrupt_threshold;
|
memcpy(to, map+offset, size);
|
||||||
u64 pebs_buffer_base;
|
kunmap_atomic(map, type);
|
||||||
u64 pebs_index;
|
put_page(page);
|
||||||
u64 pebs_absolute_maximum;
|
|
||||||
u64 pebs_interrupt_threshold;
|
len += size;
|
||||||
u64 pebs_event_reset[MAX_PEBS_EVENTS];
|
to += size;
|
||||||
};
|
addr += size;
|
||||||
|
|
||||||
|
} while (len < n);
|
||||||
|
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
struct event_constraint {
|
struct event_constraint {
|
||||||
union {
|
union {
|
||||||
|
@ -89,18 +94,39 @@ struct amd_nb {
|
||||||
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
|
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define MAX_LBR_ENTRIES 16
|
||||||
|
|
||||||
struct cpu_hw_events {
|
struct cpu_hw_events {
|
||||||
|
/*
|
||||||
|
* Generic x86 PMC bits
|
||||||
|
*/
|
||||||
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
|
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
|
||||||
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||||
unsigned long interrupts;
|
|
||||||
int enabled;
|
int enabled;
|
||||||
struct debug_store *ds;
|
|
||||||
|
|
||||||
int n_events;
|
int n_events;
|
||||||
int n_added;
|
int n_added;
|
||||||
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
|
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
|
||||||
u64 tags[X86_PMC_IDX_MAX];
|
u64 tags[X86_PMC_IDX_MAX];
|
||||||
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
|
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel DebugStore bits
|
||||||
|
*/
|
||||||
|
struct debug_store *ds;
|
||||||
|
u64 pebs_enabled;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel LBR bits
|
||||||
|
*/
|
||||||
|
int lbr_users;
|
||||||
|
void *lbr_context;
|
||||||
|
struct perf_branch_stack lbr_stack;
|
||||||
|
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AMD specific bits
|
||||||
|
*/
|
||||||
struct amd_nb *amd_nb;
|
struct amd_nb *amd_nb;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -114,11 +140,31 @@ struct cpu_hw_events {
|
||||||
#define EVENT_CONSTRAINT(c, n, m) \
|
#define EVENT_CONSTRAINT(c, n, m) \
|
||||||
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
|
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constraint on the Event code.
|
||||||
|
*/
|
||||||
#define INTEL_EVENT_CONSTRAINT(c, n) \
|
#define INTEL_EVENT_CONSTRAINT(c, n) \
|
||||||
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
|
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constraint on the Event code + UMask + fixed-mask
|
||||||
|
*
|
||||||
|
* filter mask to validate fixed counter events.
|
||||||
|
* the following filters disqualify for fixed counters:
|
||||||
|
* - inv
|
||||||
|
* - edge
|
||||||
|
* - cnt-mask
|
||||||
|
* The other filters are supported by fixed counters.
|
||||||
|
* The any-thread option is supported starting with v3.
|
||||||
|
*/
|
||||||
#define FIXED_EVENT_CONSTRAINT(c, n) \
|
#define FIXED_EVENT_CONSTRAINT(c, n) \
|
||||||
EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
|
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constraint on the Event code + UMask
|
||||||
|
*/
|
||||||
|
#define PEBS_EVENT_CONSTRAINT(c, n) \
|
||||||
|
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
|
||||||
|
|
||||||
#define EVENT_CONSTRAINT_END \
|
#define EVENT_CONSTRAINT_END \
|
||||||
EVENT_CONSTRAINT(0, 0, 0)
|
EVENT_CONSTRAINT(0, 0, 0)
|
||||||
|
@ -126,32 +172,43 @@ struct cpu_hw_events {
|
||||||
#define for_each_event_constraint(e, c) \
|
#define for_each_event_constraint(e, c) \
|
||||||
for ((e) = (c); (e)->cmask; (e)++)
|
for ((e) = (c); (e)->cmask; (e)++)
|
||||||
|
|
||||||
|
union perf_capabilities {
|
||||||
|
struct {
|
||||||
|
u64 lbr_format : 6;
|
||||||
|
u64 pebs_trap : 1;
|
||||||
|
u64 pebs_arch_reg : 1;
|
||||||
|
u64 pebs_format : 4;
|
||||||
|
u64 smm_freeze : 1;
|
||||||
|
};
|
||||||
|
u64 capabilities;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct x86_pmu - generic x86 pmu
|
* struct x86_pmu - generic x86 pmu
|
||||||
*/
|
*/
|
||||||
struct x86_pmu {
|
struct x86_pmu {
|
||||||
|
/*
|
||||||
|
* Generic x86 PMC bits
|
||||||
|
*/
|
||||||
const char *name;
|
const char *name;
|
||||||
int version;
|
int version;
|
||||||
int (*handle_irq)(struct pt_regs *);
|
int (*handle_irq)(struct pt_regs *);
|
||||||
void (*disable_all)(void);
|
void (*disable_all)(void);
|
||||||
void (*enable_all)(void);
|
void (*enable_all)(int added);
|
||||||
void (*enable)(struct perf_event *);
|
void (*enable)(struct perf_event *);
|
||||||
void (*disable)(struct perf_event *);
|
void (*disable)(struct perf_event *);
|
||||||
|
int (*hw_config)(struct perf_event *event);
|
||||||
|
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
|
||||||
unsigned eventsel;
|
unsigned eventsel;
|
||||||
unsigned perfctr;
|
unsigned perfctr;
|
||||||
u64 (*event_map)(int);
|
u64 (*event_map)(int);
|
||||||
u64 (*raw_event)(u64);
|
|
||||||
int max_events;
|
int max_events;
|
||||||
int num_events;
|
int num_counters;
|
||||||
int num_events_fixed;
|
int num_counters_fixed;
|
||||||
int event_bits;
|
int cntval_bits;
|
||||||
u64 event_mask;
|
u64 cntval_mask;
|
||||||
int apic;
|
int apic;
|
||||||
u64 max_period;
|
u64 max_period;
|
||||||
u64 intel_ctrl;
|
|
||||||
void (*enable_bts)(u64 config);
|
|
||||||
void (*disable_bts)(void);
|
|
||||||
|
|
||||||
struct event_constraint *
|
struct event_constraint *
|
||||||
(*get_event_constraints)(struct cpu_hw_events *cpuc,
|
(*get_event_constraints)(struct cpu_hw_events *cpuc,
|
||||||
struct perf_event *event);
|
struct perf_event *event);
|
||||||
|
@ -159,11 +216,32 @@ struct x86_pmu {
|
||||||
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
|
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
|
||||||
struct perf_event *event);
|
struct perf_event *event);
|
||||||
struct event_constraint *event_constraints;
|
struct event_constraint *event_constraints;
|
||||||
|
void (*quirks)(void);
|
||||||
|
|
||||||
int (*cpu_prepare)(int cpu);
|
int (*cpu_prepare)(int cpu);
|
||||||
void (*cpu_starting)(int cpu);
|
void (*cpu_starting)(int cpu);
|
||||||
void (*cpu_dying)(int cpu);
|
void (*cpu_dying)(int cpu);
|
||||||
void (*cpu_dead)(int cpu);
|
void (*cpu_dead)(int cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel Arch Perfmon v2+
|
||||||
|
*/
|
||||||
|
u64 intel_ctrl;
|
||||||
|
union perf_capabilities intel_cap;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel DebugStore bits
|
||||||
|
*/
|
||||||
|
int bts, pebs;
|
||||||
|
int pebs_record_size;
|
||||||
|
void (*drain_pebs)(struct pt_regs *regs);
|
||||||
|
struct event_constraint *pebs_constraints;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel LBR
|
||||||
|
*/
|
||||||
|
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
|
||||||
|
int lbr_nr; /* hardware stack size */
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct x86_pmu x86_pmu __read_mostly;
|
static struct x86_pmu x86_pmu __read_mostly;
|
||||||
|
@ -198,7 +276,7 @@ static u64
|
||||||
x86_perf_event_update(struct perf_event *event)
|
x86_perf_event_update(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int shift = 64 - x86_pmu.event_bits;
|
int shift = 64 - x86_pmu.cntval_bits;
|
||||||
u64 prev_raw_count, new_raw_count;
|
u64 prev_raw_count, new_raw_count;
|
||||||
int idx = hwc->idx;
|
int idx = hwc->idx;
|
||||||
s64 delta;
|
s64 delta;
|
||||||
|
@ -241,33 +319,32 @@ again:
|
||||||
static atomic_t active_events;
|
static atomic_t active_events;
|
||||||
static DEFINE_MUTEX(pmc_reserve_mutex);
|
static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
|
||||||
static bool reserve_pmc_hardware(void)
|
static bool reserve_pmc_hardware(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||||
disable_lapic_nmi_watchdog();
|
disable_lapic_nmi_watchdog();
|
||||||
|
|
||||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||||
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
|
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
|
||||||
goto perfctr_fail;
|
goto perfctr_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||||
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
|
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
|
||||||
goto eventsel_fail;
|
goto eventsel_fail;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
|
||||||
eventsel_fail:
|
eventsel_fail:
|
||||||
for (i--; i >= 0; i--)
|
for (i--; i >= 0; i--)
|
||||||
release_evntsel_nmi(x86_pmu.eventsel + i);
|
release_evntsel_nmi(x86_pmu.eventsel + i);
|
||||||
|
|
||||||
i = x86_pmu.num_events;
|
i = x86_pmu.num_counters;
|
||||||
|
|
||||||
perfctr_fail:
|
perfctr_fail:
|
||||||
for (i--; i >= 0; i--)
|
for (i--; i >= 0; i--)
|
||||||
|
@ -277,128 +354,36 @@ perfctr_fail:
|
||||||
enable_lapic_nmi_watchdog();
|
enable_lapic_nmi_watchdog();
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_pmc_hardware(void)
|
static void release_pmc_hardware(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||||
release_perfctr_nmi(x86_pmu.perfctr + i);
|
release_perfctr_nmi(x86_pmu.perfctr + i);
|
||||||
release_evntsel_nmi(x86_pmu.eventsel + i);
|
release_evntsel_nmi(x86_pmu.eventsel + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||||
enable_lapic_nmi_watchdog();
|
enable_lapic_nmi_watchdog();
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static bool reserve_pmc_hardware(void) { return true; }
|
||||||
|
static void release_pmc_hardware(void) {}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool bts_available(void)
|
static int reserve_ds_buffers(void);
|
||||||
{
|
static void release_ds_buffers(void);
|
||||||
return x86_pmu.enable_bts != NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void init_debug_store_on_cpu(int cpu)
|
|
||||||
{
|
|
||||||
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
|
||||||
|
|
||||||
if (!ds)
|
|
||||||
return;
|
|
||||||
|
|
||||||
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
|
|
||||||
(u32)((u64)(unsigned long)ds),
|
|
||||||
(u32)((u64)(unsigned long)ds >> 32));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void fini_debug_store_on_cpu(int cpu)
|
|
||||||
{
|
|
||||||
if (!per_cpu(cpu_hw_events, cpu).ds)
|
|
||||||
return;
|
|
||||||
|
|
||||||
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void release_bts_hardware(void)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
if (!bts_available())
|
|
||||||
return;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
fini_debug_store_on_cpu(cpu);
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
|
||||||
|
|
||||||
if (!ds)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
per_cpu(cpu_hw_events, cpu).ds = NULL;
|
|
||||||
|
|
||||||
kfree((void *)(unsigned long)ds->bts_buffer_base);
|
|
||||||
kfree(ds);
|
|
||||||
}
|
|
||||||
|
|
||||||
put_online_cpus();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int reserve_bts_hardware(void)
|
|
||||||
{
|
|
||||||
int cpu, err = 0;
|
|
||||||
|
|
||||||
if (!bts_available())
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
struct debug_store *ds;
|
|
||||||
void *buffer;
|
|
||||||
|
|
||||||
err = -ENOMEM;
|
|
||||||
buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
|
|
||||||
if (unlikely(!buffer))
|
|
||||||
break;
|
|
||||||
|
|
||||||
ds = kzalloc(sizeof(*ds), GFP_KERNEL);
|
|
||||||
if (unlikely(!ds)) {
|
|
||||||
kfree(buffer);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
ds->bts_buffer_base = (u64)(unsigned long)buffer;
|
|
||||||
ds->bts_index = ds->bts_buffer_base;
|
|
||||||
ds->bts_absolute_maximum =
|
|
||||||
ds->bts_buffer_base + BTS_BUFFER_SIZE;
|
|
||||||
ds->bts_interrupt_threshold =
|
|
||||||
ds->bts_absolute_maximum - BTS_OVFL_TH;
|
|
||||||
|
|
||||||
per_cpu(cpu_hw_events, cpu).ds = ds;
|
|
||||||
err = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
release_bts_hardware();
|
|
||||||
else {
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
init_debug_store_on_cpu(cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
put_online_cpus();
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void hw_perf_event_destroy(struct perf_event *event)
|
static void hw_perf_event_destroy(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
|
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
|
||||||
release_pmc_hardware();
|
release_pmc_hardware();
|
||||||
release_bts_hardware();
|
release_ds_buffers();
|
||||||
mutex_unlock(&pmc_reserve_mutex);
|
mutex_unlock(&pmc_reserve_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -441,6 +426,28 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int x86_pmu_hw_config(struct perf_event *event)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Generate PMC IRQs:
|
||||||
|
* (keep 'enabled' bit clear for now)
|
||||||
|
*/
|
||||||
|
event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Count user and OS events unless requested not to
|
||||||
|
*/
|
||||||
|
if (!event->attr.exclude_user)
|
||||||
|
event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
|
||||||
|
if (!event->attr.exclude_kernel)
|
||||||
|
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
|
||||||
|
|
||||||
|
if (event->attr.type == PERF_TYPE_RAW)
|
||||||
|
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the hardware configuration for a given attr_type
|
* Setup the hardware configuration for a given attr_type
|
||||||
*/
|
*/
|
||||||
|
@ -460,8 +467,11 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||||
if (atomic_read(&active_events) == 0) {
|
if (atomic_read(&active_events) == 0) {
|
||||||
if (!reserve_pmc_hardware())
|
if (!reserve_pmc_hardware())
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
else
|
else {
|
||||||
err = reserve_bts_hardware();
|
err = reserve_ds_buffers();
|
||||||
|
if (err)
|
||||||
|
release_pmc_hardware();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (!err)
|
if (!err)
|
||||||
atomic_inc(&active_events);
|
atomic_inc(&active_events);
|
||||||
|
@ -472,23 +482,14 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||||
|
|
||||||
event->destroy = hw_perf_event_destroy;
|
event->destroy = hw_perf_event_destroy;
|
||||||
|
|
||||||
/*
|
|
||||||
* Generate PMC IRQs:
|
|
||||||
* (keep 'enabled' bit clear for now)
|
|
||||||
*/
|
|
||||||
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
|
|
||||||
|
|
||||||
hwc->idx = -1;
|
hwc->idx = -1;
|
||||||
hwc->last_cpu = -1;
|
hwc->last_cpu = -1;
|
||||||
hwc->last_tag = ~0ULL;
|
hwc->last_tag = ~0ULL;
|
||||||
|
|
||||||
/*
|
/* Processor specifics */
|
||||||
* Count user and OS events unless requested not to.
|
err = x86_pmu.hw_config(event);
|
||||||
*/
|
if (err)
|
||||||
if (!attr->exclude_user)
|
return err;
|
||||||
hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
|
|
||||||
if (!attr->exclude_kernel)
|
|
||||||
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
|
|
||||||
|
|
||||||
if (!hwc->sample_period) {
|
if (!hwc->sample_period) {
|
||||||
hwc->sample_period = x86_pmu.max_period;
|
hwc->sample_period = x86_pmu.max_period;
|
||||||
|
@ -505,16 +506,8 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
if (attr->type == PERF_TYPE_RAW)
|
||||||
* Raw hw_event type provide the config in the hw_event structure
|
|
||||||
*/
|
|
||||||
if (attr->type == PERF_TYPE_RAW) {
|
|
||||||
hwc->config |= x86_pmu.raw_event(attr->config);
|
|
||||||
if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
|
|
||||||
perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
|
||||||
return -EACCES;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
if (attr->type == PERF_TYPE_HW_CACHE)
|
if (attr->type == PERF_TYPE_HW_CACHE)
|
||||||
return set_ext_hw_attr(hwc, attr);
|
return set_ext_hw_attr(hwc, attr);
|
||||||
|
@ -539,11 +532,11 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||||
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
|
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
|
||||||
(hwc->sample_period == 1)) {
|
(hwc->sample_period == 1)) {
|
||||||
/* BTS is not supported by this architecture. */
|
/* BTS is not supported by this architecture. */
|
||||||
if (!bts_available())
|
if (!x86_pmu.bts)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* BTS is currently only allowed for user-mode. */
|
/* BTS is currently only allowed for user-mode. */
|
||||||
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
|
if (!attr->exclude_kernel)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -557,7 +550,7 @@ static void x86_pmu_disable_all(void)
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
@ -587,12 +580,12 @@ void hw_perf_disable(void)
|
||||||
x86_pmu.disable_all();
|
x86_pmu.disable_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x86_pmu_enable_all(void)
|
static void x86_pmu_enable_all(int added)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
|
@ -667,14 +660,14 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||||
* assign events to counters starting with most
|
* assign events to counters starting with most
|
||||||
* constrained events.
|
* constrained events.
|
||||||
*/
|
*/
|
||||||
wmax = x86_pmu.num_events;
|
wmax = x86_pmu.num_counters;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* when fixed event counters are present,
|
* when fixed event counters are present,
|
||||||
* wmax is incremented by 1 to account
|
* wmax is incremented by 1 to account
|
||||||
* for one more choice
|
* for one more choice
|
||||||
*/
|
*/
|
||||||
if (x86_pmu.num_events_fixed)
|
if (x86_pmu.num_counters_fixed)
|
||||||
wmax++;
|
wmax++;
|
||||||
|
|
||||||
for (w = 1, num = n; num && w <= wmax; w++) {
|
for (w = 1, num = n; num && w <= wmax; w++) {
|
||||||
|
@ -724,7 +717,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
int n, max_count;
|
int n, max_count;
|
||||||
|
|
||||||
max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
|
max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
|
||||||
|
|
||||||
/* current number of events already accepted */
|
/* current number of events already accepted */
|
||||||
n = cpuc->n_events;
|
n = cpuc->n_events;
|
||||||
|
@ -795,7 +788,7 @@ void hw_perf_enable(void)
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
int i;
|
int i, added = cpuc->n_added;
|
||||||
|
|
||||||
if (!x86_pmu_initialized())
|
if (!x86_pmu_initialized())
|
||||||
return;
|
return;
|
||||||
|
@ -847,19 +840,20 @@ void hw_perf_enable(void)
|
||||||
cpuc->enabled = 1;
|
cpuc->enabled = 1;
|
||||||
barrier();
|
barrier();
|
||||||
|
|
||||||
x86_pmu.enable_all();
|
x86_pmu.enable_all(added);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
|
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
|
||||||
{
|
{
|
||||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
wrmsrl(hwc->config_base + hwc->idx,
|
||||||
hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
|
hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void x86_pmu_disable_event(struct perf_event *event)
|
static inline void x86_pmu_disable_event(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
|
|
||||||
|
wrmsrl(hwc->config_base + hwc->idx, hwc->config);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
||||||
|
@ -874,7 +868,7 @@ x86_perf_event_set_period(struct perf_event *event)
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
s64 left = atomic64_read(&hwc->period_left);
|
s64 left = atomic64_read(&hwc->period_left);
|
||||||
s64 period = hwc->sample_period;
|
s64 period = hwc->sample_period;
|
||||||
int err, ret = 0, idx = hwc->idx;
|
int ret = 0, idx = hwc->idx;
|
||||||
|
|
||||||
if (idx == X86_PMC_IDX_FIXED_BTS)
|
if (idx == X86_PMC_IDX_FIXED_BTS)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -912,8 +906,8 @@ x86_perf_event_set_period(struct perf_event *event)
|
||||||
*/
|
*/
|
||||||
atomic64_set(&hwc->prev_count, (u64)-left);
|
atomic64_set(&hwc->prev_count, (u64)-left);
|
||||||
|
|
||||||
err = checking_wrmsrl(hwc->event_base + idx,
|
wrmsrl(hwc->event_base + idx,
|
||||||
(u64)(-left) & x86_pmu.event_mask);
|
(u64)(-left) & x86_pmu.cntval_mask);
|
||||||
|
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
|
|
||||||
|
@ -950,7 +944,7 @@ static int x86_pmu_enable(struct perf_event *event)
|
||||||
if (n < 0)
|
if (n < 0)
|
||||||
return n;
|
return n;
|
||||||
|
|
||||||
ret = x86_schedule_events(cpuc, n, assign);
|
ret = x86_pmu.schedule_events(cpuc, n, assign);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
/*
|
/*
|
||||||
|
@ -991,11 +985,12 @@ static void x86_pmu_unthrottle(struct perf_event *event)
|
||||||
void perf_event_print_debug(void)
|
void perf_event_print_debug(void)
|
||||||
{
|
{
|
||||||
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
|
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
|
||||||
|
u64 pebs;
|
||||||
struct cpu_hw_events *cpuc;
|
struct cpu_hw_events *cpuc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int cpu, idx;
|
int cpu, idx;
|
||||||
|
|
||||||
if (!x86_pmu.num_events)
|
if (!x86_pmu.num_counters)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
@ -1008,16 +1003,18 @@ void perf_event_print_debug(void)
|
||||||
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
||||||
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
|
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
|
||||||
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
|
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
|
||||||
|
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
|
||||||
|
|
||||||
pr_info("\n");
|
pr_info("\n");
|
||||||
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
|
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
|
||||||
pr_info("CPU#%d: status: %016llx\n", cpu, status);
|
pr_info("CPU#%d: status: %016llx\n", cpu, status);
|
||||||
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
|
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
|
||||||
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
|
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
|
||||||
|
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
|
||||||
}
|
}
|
||||||
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
|
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
|
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
|
||||||
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
|
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
|
||||||
|
|
||||||
|
@ -1030,7 +1027,7 @@ void perf_event_print_debug(void)
|
||||||
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
|
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
|
||||||
cpu, idx, prev_left);
|
cpu, idx, prev_left);
|
||||||
}
|
}
|
||||||
for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
|
||||||
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
|
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
|
||||||
|
|
||||||
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
|
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
|
||||||
|
@ -1095,7 +1092,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -1103,7 +1100,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
|
|
||||||
val = x86_perf_event_update(event);
|
val = x86_perf_event_update(event);
|
||||||
if (val & (1ULL << (x86_pmu.event_bits - 1)))
|
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1146,7 +1143,6 @@ void set_perf_event_pending(void)
|
||||||
|
|
||||||
void perf_events_lapic_init(void)
|
void perf_events_lapic_init(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
|
||||||
if (!x86_pmu.apic || !x86_pmu_initialized())
|
if (!x86_pmu.apic || !x86_pmu_initialized())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1154,7 +1150,6 @@ void perf_events_lapic_init(void)
|
||||||
* Always use NMI for PMU
|
* Always use NMI for PMU
|
||||||
*/
|
*/
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __kprobes
|
static int __kprobes
|
||||||
|
@ -1178,9 +1173,7 @@ perf_event_nmi_handler(struct notifier_block *self,
|
||||||
|
|
||||||
regs = args->regs;
|
regs = args->regs;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
#endif
|
|
||||||
/*
|
/*
|
||||||
* Can't rely on the handled return value to say it was our NMI, two
|
* Can't rely on the handled return value to say it was our NMI, two
|
||||||
* events could trigger 'simultaneously' raising two back-to-back NMIs.
|
* events could trigger 'simultaneously' raising two back-to-back NMIs.
|
||||||
|
@ -1274,12 +1267,15 @@ int hw_perf_group_sched_in(struct perf_event *leader,
|
||||||
int assign[X86_PMC_IDX_MAX];
|
int assign[X86_PMC_IDX_MAX];
|
||||||
int n0, n1, ret;
|
int n0, n1, ret;
|
||||||
|
|
||||||
|
if (!x86_pmu_initialized())
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* n0 = total number of events */
|
/* n0 = total number of events */
|
||||||
n0 = collect_events(cpuc, leader, true);
|
n0 = collect_events(cpuc, leader, true);
|
||||||
if (n0 < 0)
|
if (n0 < 0)
|
||||||
return n0;
|
return n0;
|
||||||
|
|
||||||
ret = x86_schedule_events(cpuc, n0, assign);
|
ret = x86_pmu.schedule_events(cpuc, n0, assign);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1329,6 +1325,9 @@ undo:
|
||||||
|
|
||||||
#include "perf_event_amd.c"
|
#include "perf_event_amd.c"
|
||||||
#include "perf_event_p6.c"
|
#include "perf_event_p6.c"
|
||||||
|
#include "perf_event_p4.c"
|
||||||
|
#include "perf_event_intel_lbr.c"
|
||||||
|
#include "perf_event_intel_ds.c"
|
||||||
#include "perf_event_intel.c"
|
#include "perf_event_intel.c"
|
||||||
|
|
||||||
static int __cpuinit
|
static int __cpuinit
|
||||||
|
@ -1402,48 +1401,50 @@ void __init init_hw_perf_events(void)
|
||||||
|
|
||||||
pr_cont("%s PMU driver.\n", x86_pmu.name);
|
pr_cont("%s PMU driver.\n", x86_pmu.name);
|
||||||
|
|
||||||
if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
|
if (x86_pmu.quirks)
|
||||||
|
x86_pmu.quirks();
|
||||||
|
|
||||||
|
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
|
||||||
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
|
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
|
||||||
x86_pmu.num_events, X86_PMC_MAX_GENERIC);
|
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
|
||||||
x86_pmu.num_events = X86_PMC_MAX_GENERIC;
|
x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
|
||||||
}
|
}
|
||||||
perf_event_mask = (1 << x86_pmu.num_events) - 1;
|
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
|
||||||
perf_max_events = x86_pmu.num_events;
|
perf_max_events = x86_pmu.num_counters;
|
||||||
|
|
||||||
if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
|
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
|
||||||
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
|
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
|
||||||
x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
|
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
|
||||||
x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
|
x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_event_mask |=
|
x86_pmu.intel_ctrl |=
|
||||||
((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
|
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
|
||||||
x86_pmu.intel_ctrl = perf_event_mask;
|
|
||||||
|
|
||||||
perf_events_lapic_init();
|
perf_events_lapic_init();
|
||||||
register_die_notifier(&perf_event_nmi_notifier);
|
register_die_notifier(&perf_event_nmi_notifier);
|
||||||
|
|
||||||
unconstrained = (struct event_constraint)
|
unconstrained = (struct event_constraint)
|
||||||
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
|
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
|
||||||
0, x86_pmu.num_events);
|
0, x86_pmu.num_counters);
|
||||||
|
|
||||||
if (x86_pmu.event_constraints) {
|
if (x86_pmu.event_constraints) {
|
||||||
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
||||||
if (c->cmask != INTEL_ARCH_FIXED_MASK)
|
if (c->cmask != X86_RAW_EVENT_MASK)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
|
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
||||||
c->weight += x86_pmu.num_events;
|
c->weight += x86_pmu.num_counters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_info("... version: %d\n", x86_pmu.version);
|
pr_info("... version: %d\n", x86_pmu.version);
|
||||||
pr_info("... bit width: %d\n", x86_pmu.event_bits);
|
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
|
||||||
pr_info("... generic registers: %d\n", x86_pmu.num_events);
|
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
|
||||||
pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
|
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
|
||||||
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
|
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
|
||||||
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
|
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
|
||||||
pr_info("... event mask: %016Lx\n", perf_event_mask);
|
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
|
||||||
|
|
||||||
perf_cpu_notifier(x86_pmu_notifier);
|
perf_cpu_notifier(x86_pmu_notifier);
|
||||||
}
|
}
|
||||||
|
@ -1462,6 +1463,32 @@ static const struct pmu pmu = {
|
||||||
.unthrottle = x86_pmu_unthrottle,
|
.unthrottle = x86_pmu_unthrottle,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* validate that we can schedule this event
|
||||||
|
*/
|
||||||
|
static int validate_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *fake_cpuc;
|
||||||
|
struct event_constraint *c;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
|
||||||
|
if (!fake_cpuc)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
c = x86_pmu.get_event_constraints(fake_cpuc, event);
|
||||||
|
|
||||||
|
if (!c || !c->weight)
|
||||||
|
ret = -ENOSPC;
|
||||||
|
|
||||||
|
if (x86_pmu.put_event_constraints)
|
||||||
|
x86_pmu.put_event_constraints(fake_cpuc, event);
|
||||||
|
|
||||||
|
kfree(fake_cpuc);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* validate a single event group
|
* validate a single event group
|
||||||
*
|
*
|
||||||
|
@ -1502,7 +1529,7 @@ static int validate_group(struct perf_event *event)
|
||||||
|
|
||||||
fake_cpuc->n_events = n;
|
fake_cpuc->n_events = n;
|
||||||
|
|
||||||
ret = x86_schedule_events(fake_cpuc, n, NULL);
|
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
kfree(fake_cpuc);
|
kfree(fake_cpuc);
|
||||||
|
@ -1527,6 +1554,8 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
|
||||||
|
|
||||||
if (event->group_leader != event)
|
if (event->group_leader != event)
|
||||||
err = validate_group(event);
|
err = validate_group(event);
|
||||||
|
else
|
||||||
|
err = validate_event(event);
|
||||||
|
|
||||||
event->pmu = tmp;
|
event->pmu = tmp;
|
||||||
}
|
}
|
||||||
|
@ -1574,8 +1603,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = data;
|
struct perf_callchain_entry *entry = data;
|
||||||
|
|
||||||
if (reliable)
|
callchain_store(entry, addr);
|
||||||
callchain_store(entry, addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct stacktrace_ops backtrace_ops = {
|
static const struct stacktrace_ops backtrace_ops = {
|
||||||
|
@ -1597,41 +1625,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||||
dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
|
dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
|
|
||||||
*/
|
|
||||||
static unsigned long
|
|
||||||
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
||||||
{
|
|
||||||
unsigned long offset, addr = (unsigned long)from;
|
|
||||||
int type = in_nmi() ? KM_NMI : KM_IRQ0;
|
|
||||||
unsigned long size, len = 0;
|
|
||||||
struct page *page;
|
|
||||||
void *map;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
do {
|
|
||||||
ret = __get_user_pages_fast(addr, 1, 0, &page);
|
|
||||||
if (!ret)
|
|
||||||
break;
|
|
||||||
|
|
||||||
offset = addr & (PAGE_SIZE - 1);
|
|
||||||
size = min(PAGE_SIZE - offset, n - len);
|
|
||||||
|
|
||||||
map = kmap_atomic(page, type);
|
|
||||||
memcpy(to, map+offset, size);
|
|
||||||
kunmap_atomic(map, type);
|
|
||||||
put_page(page);
|
|
||||||
|
|
||||||
len += size;
|
|
||||||
to += size;
|
|
||||||
addr += size;
|
|
||||||
|
|
||||||
} while (len < n);
|
|
||||||
|
|
||||||
return len;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
static inline int
|
static inline int
|
||||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||||
|
@ -1727,6 +1720,11 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry;
|
struct perf_callchain_entry *entry;
|
||||||
|
|
||||||
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||||
|
/* TODO: We don't support guest os callchain now */
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (in_nmi())
|
if (in_nmi())
|
||||||
entry = &__get_cpu_var(pmc_nmi_entry);
|
entry = &__get_cpu_var(pmc_nmi_entry);
|
||||||
else
|
else
|
||||||
|
@ -1750,3 +1748,29 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
|
||||||
regs->cs = __KERNEL_CS;
|
regs->cs = __KERNEL_CS;
|
||||||
local_save_flags(regs->flags);
|
local_save_flags(regs->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long ip;
|
||||||
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||||
|
ip = perf_guest_cbs->get_guest_ip();
|
||||||
|
else
|
||||||
|
ip = instruction_pointer(regs);
|
||||||
|
return ip;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
int misc = 0;
|
||||||
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||||
|
misc |= perf_guest_cbs->is_user_mode() ?
|
||||||
|
PERF_RECORD_MISC_GUEST_USER :
|
||||||
|
PERF_RECORD_MISC_GUEST_KERNEL;
|
||||||
|
} else
|
||||||
|
misc |= user_mode(regs) ? PERF_RECORD_MISC_USER :
|
||||||
|
PERF_RECORD_MISC_KERNEL;
|
||||||
|
if (regs->flags & PERF_EFLAGS_EXACT)
|
||||||
|
misc |= PERF_RECORD_MISC_EXACT;
|
||||||
|
|
||||||
|
return misc;
|
||||||
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
static DEFINE_RAW_SPINLOCK(amd_nb_lock);
|
static DEFINE_RAW_SPINLOCK(amd_nb_lock);
|
||||||
|
|
||||||
static __initconst u64 amd_hw_cache_event_ids
|
static __initconst const u64 amd_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||||
|
@ -111,22 +111,19 @@ static u64 amd_pmu_event_map(int hw_event)
|
||||||
return amd_perfmon_event_map[hw_event];
|
return amd_perfmon_event_map[hw_event];
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 amd_pmu_raw_event(u64 hw_event)
|
static int amd_pmu_hw_config(struct perf_event *event)
|
||||||
{
|
{
|
||||||
#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
|
int ret = x86_pmu_hw_config(event);
|
||||||
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
|
|
||||||
#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
|
|
||||||
#define K7_EVNTSEL_INV_MASK 0x000800000ULL
|
|
||||||
#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
|
|
||||||
|
|
||||||
#define K7_EVNTSEL_MASK \
|
if (ret)
|
||||||
(K7_EVNTSEL_EVENT_MASK | \
|
return ret;
|
||||||
K7_EVNTSEL_UNIT_MASK | \
|
|
||||||
K7_EVNTSEL_EDGE_MASK | \
|
|
||||||
K7_EVNTSEL_INV_MASK | \
|
|
||||||
K7_EVNTSEL_REG_MASK)
|
|
||||||
|
|
||||||
return hw_event & K7_EVNTSEL_MASK;
|
if (event->attr.type != PERF_TYPE_RAW)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -165,7 +162,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
||||||
* be removed on one CPU at a time AND PMU is disabled
|
* be removed on one CPU at a time AND PMU is disabled
|
||||||
* when we come here
|
* when we come here
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||||
if (nb->owners[i] == event) {
|
if (nb->owners[i] == event) {
|
||||||
cmpxchg(nb->owners+i, event, NULL);
|
cmpxchg(nb->owners+i, event, NULL);
|
||||||
break;
|
break;
|
||||||
|
@ -215,7 +212,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
struct amd_nb *nb = cpuc->amd_nb;
|
struct amd_nb *nb = cpuc->amd_nb;
|
||||||
struct perf_event *old = NULL;
|
struct perf_event *old = NULL;
|
||||||
int max = x86_pmu.num_events;
|
int max = x86_pmu.num_counters;
|
||||||
int i, j, k = -1;
|
int i, j, k = -1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -293,7 +290,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
|
||||||
/*
|
/*
|
||||||
* initialize all possible NB constraints
|
* initialize all possible NB constraints
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||||
__set_bit(i, nb->event_constraints[i].idxmsk);
|
__set_bit(i, nb->event_constraints[i].idxmsk);
|
||||||
nb->event_constraints[i].weight = 1;
|
nb->event_constraints[i].weight = 1;
|
||||||
}
|
}
|
||||||
|
@ -371,21 +368,22 @@ static void amd_pmu_cpu_dead(int cpu)
|
||||||
raw_spin_unlock(&amd_nb_lock);
|
raw_spin_unlock(&amd_nb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __initconst struct x86_pmu amd_pmu = {
|
static __initconst const struct x86_pmu amd_pmu = {
|
||||||
.name = "AMD",
|
.name = "AMD",
|
||||||
.handle_irq = x86_pmu_handle_irq,
|
.handle_irq = x86_pmu_handle_irq,
|
||||||
.disable_all = x86_pmu_disable_all,
|
.disable_all = x86_pmu_disable_all,
|
||||||
.enable_all = x86_pmu_enable_all,
|
.enable_all = x86_pmu_enable_all,
|
||||||
.enable = x86_pmu_enable_event,
|
.enable = x86_pmu_enable_event,
|
||||||
.disable = x86_pmu_disable_event,
|
.disable = x86_pmu_disable_event,
|
||||||
|
.hw_config = amd_pmu_hw_config,
|
||||||
|
.schedule_events = x86_schedule_events,
|
||||||
.eventsel = MSR_K7_EVNTSEL0,
|
.eventsel = MSR_K7_EVNTSEL0,
|
||||||
.perfctr = MSR_K7_PERFCTR0,
|
.perfctr = MSR_K7_PERFCTR0,
|
||||||
.event_map = amd_pmu_event_map,
|
.event_map = amd_pmu_event_map,
|
||||||
.raw_event = amd_pmu_raw_event,
|
|
||||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||||
.num_events = 4,
|
.num_counters = 4,
|
||||||
.event_bits = 48,
|
.cntval_bits = 48,
|
||||||
.event_mask = (1ULL << 48) - 1,
|
.cntval_mask = (1ULL << 48) - 1,
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
/* use highest bit to detect overflow */
|
/* use highest bit to detect overflow */
|
||||||
.max_period = (1ULL << 47) - 1,
|
.max_period = (1ULL << 47) - 1,
|
||||||
|
|
|
@ -88,7 +88,7 @@ static u64 intel_pmu_event_map(int hw_event)
|
||||||
return intel_perfmon_event_map[hw_event];
|
return intel_perfmon_event_map[hw_event];
|
||||||
}
|
}
|
||||||
|
|
||||||
static __initconst u64 westmere_hw_cache_event_ids
|
static __initconst const u64 westmere_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||||
|
@ -179,7 +179,7 @@ static __initconst u64 westmere_hw_cache_event_ids
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static __initconst u64 nehalem_hw_cache_event_ids
|
static __initconst const u64 nehalem_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||||
|
@ -270,7 +270,7 @@ static __initconst u64 nehalem_hw_cache_event_ids
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static __initconst u64 core2_hw_cache_event_ids
|
static __initconst const u64 core2_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||||
|
@ -361,7 +361,7 @@ static __initconst u64 core2_hw_cache_event_ids
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static __initconst u64 atom_hw_cache_event_ids
|
static __initconst const u64 atom_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||||
|
@ -452,60 +452,6 @@ static __initconst u64 atom_hw_cache_event_ids
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 intel_pmu_raw_event(u64 hw_event)
|
|
||||||
{
|
|
||||||
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
|
|
||||||
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
|
||||||
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
|
|
||||||
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
|
|
||||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
|
||||||
|
|
||||||
#define CORE_EVNTSEL_MASK \
|
|
||||||
(INTEL_ARCH_EVTSEL_MASK | \
|
|
||||||
INTEL_ARCH_UNIT_MASK | \
|
|
||||||
INTEL_ARCH_EDGE_MASK | \
|
|
||||||
INTEL_ARCH_INV_MASK | \
|
|
||||||
INTEL_ARCH_CNT_MASK)
|
|
||||||
|
|
||||||
return hw_event & CORE_EVNTSEL_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_pmu_enable_bts(u64 config)
|
|
||||||
{
|
|
||||||
unsigned long debugctlmsr;
|
|
||||||
|
|
||||||
debugctlmsr = get_debugctlmsr();
|
|
||||||
|
|
||||||
debugctlmsr |= X86_DEBUGCTL_TR;
|
|
||||||
debugctlmsr |= X86_DEBUGCTL_BTS;
|
|
||||||
debugctlmsr |= X86_DEBUGCTL_BTINT;
|
|
||||||
|
|
||||||
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
|
|
||||||
debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
|
|
||||||
|
|
||||||
if (!(config & ARCH_PERFMON_EVENTSEL_USR))
|
|
||||||
debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
|
|
||||||
|
|
||||||
update_debugctlmsr(debugctlmsr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_pmu_disable_bts(void)
|
|
||||||
{
|
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
||||||
unsigned long debugctlmsr;
|
|
||||||
|
|
||||||
if (!cpuc->ds)
|
|
||||||
return;
|
|
||||||
|
|
||||||
debugctlmsr = get_debugctlmsr();
|
|
||||||
|
|
||||||
debugctlmsr &=
|
|
||||||
~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
|
|
||||||
X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
|
|
||||||
|
|
||||||
update_debugctlmsr(debugctlmsr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_pmu_disable_all(void)
|
static void intel_pmu_disable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
@ -514,12 +460,17 @@ static void intel_pmu_disable_all(void)
|
||||||
|
|
||||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||||
intel_pmu_disable_bts();
|
intel_pmu_disable_bts();
|
||||||
|
|
||||||
|
intel_pmu_pebs_disable_all();
|
||||||
|
intel_pmu_lbr_disable_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_enable_all(void)
|
static void intel_pmu_enable_all(int added)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
intel_pmu_pebs_enable_all();
|
||||||
|
intel_pmu_lbr_enable_all();
|
||||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
||||||
|
|
||||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
||||||
|
@ -533,6 +484,41 @@ static void intel_pmu_enable_all(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Workaround for:
|
||||||
|
* Intel Errata AAK100 (model 26)
|
||||||
|
* Intel Errata AAP53 (model 30)
|
||||||
|
* Intel Errata BD53 (model 44)
|
||||||
|
*
|
||||||
|
* These chips need to be 'reset' when adding counters by programming
|
||||||
|
* the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
|
||||||
|
* either in sequence on the same PMC or on different PMCs.
|
||||||
|
*/
|
||||||
|
static void intel_pmu_nhm_enable_all(int added)
|
||||||
|
{
|
||||||
|
if (added) {
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
|
||||||
|
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
|
||||||
|
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
|
||||||
|
|
||||||
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
|
||||||
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
|
||||||
|
|
||||||
|
for (i = 0; i < 3; i++) {
|
||||||
|
struct perf_event *event = cpuc->events[i];
|
||||||
|
|
||||||
|
if (!event)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
__x86_pmu_enable_event(&event->hw);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intel_pmu_enable_all(added);
|
||||||
|
}
|
||||||
|
|
||||||
static inline u64 intel_pmu_get_status(void)
|
static inline u64 intel_pmu_get_status(void)
|
||||||
{
|
{
|
||||||
u64 status;
|
u64 status;
|
||||||
|
@ -547,8 +533,7 @@ static inline void intel_pmu_ack_status(u64 ack)
|
||||||
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
|
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
||||||
intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
|
||||||
{
|
{
|
||||||
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
||||||
u64 ctrl_val, mask;
|
u64 ctrl_val, mask;
|
||||||
|
@ -557,71 +542,10 @@ intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
||||||
|
|
||||||
rdmsrl(hwc->config_base, ctrl_val);
|
rdmsrl(hwc->config_base, ctrl_val);
|
||||||
ctrl_val &= ~mask;
|
ctrl_val &= ~mask;
|
||||||
(void)checking_wrmsrl(hwc->config_base, ctrl_val);
|
wrmsrl(hwc->config_base, ctrl_val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_drain_bts_buffer(void)
|
static void intel_pmu_disable_event(struct perf_event *event)
|
||||||
{
|
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
||||||
struct debug_store *ds = cpuc->ds;
|
|
||||||
struct bts_record {
|
|
||||||
u64 from;
|
|
||||||
u64 to;
|
|
||||||
u64 flags;
|
|
||||||
};
|
|
||||||
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
|
||||||
struct bts_record *at, *top;
|
|
||||||
struct perf_output_handle handle;
|
|
||||||
struct perf_event_header header;
|
|
||||||
struct perf_sample_data data;
|
|
||||||
struct pt_regs regs;
|
|
||||||
|
|
||||||
if (!event)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!ds)
|
|
||||||
return;
|
|
||||||
|
|
||||||
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
|
|
||||||
top = (struct bts_record *)(unsigned long)ds->bts_index;
|
|
||||||
|
|
||||||
if (top <= at)
|
|
||||||
return;
|
|
||||||
|
|
||||||
ds->bts_index = ds->bts_buffer_base;
|
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
data.period = event->hw.last_period;
|
|
||||||
regs.ip = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Prepare a generic sample, i.e. fill in the invariant fields.
|
|
||||||
* We will overwrite the from and to address before we output
|
|
||||||
* the sample.
|
|
||||||
*/
|
|
||||||
perf_prepare_sample(&header, &data, event, ®s);
|
|
||||||
|
|
||||||
if (perf_output_begin(&handle, event,
|
|
||||||
header.size * (top - at), 1, 1))
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (; at < top; at++) {
|
|
||||||
data.ip = at->from;
|
|
||||||
data.addr = at->to;
|
|
||||||
|
|
||||||
perf_output_sample(&handle, &header, &data, event);
|
|
||||||
}
|
|
||||||
|
|
||||||
perf_output_end(&handle);
|
|
||||||
|
|
||||||
/* There's new data available. */
|
|
||||||
event->hw.interrupts++;
|
|
||||||
event->pending_kill = POLL_IN;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
intel_pmu_disable_event(struct perf_event *event)
|
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
@ -637,14 +561,15 @@ intel_pmu_disable_event(struct perf_event *event)
|
||||||
}
|
}
|
||||||
|
|
||||||
x86_pmu_disable_event(event);
|
x86_pmu_disable_event(event);
|
||||||
|
|
||||||
|
if (unlikely(event->attr.precise))
|
||||||
|
intel_pmu_pebs_disable(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
||||||
intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
|
||||||
{
|
{
|
||||||
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
||||||
u64 ctrl_val, bits, mask;
|
u64 ctrl_val, bits, mask;
|
||||||
int err;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable IRQ generation (0x8),
|
* Enable IRQ generation (0x8),
|
||||||
|
@ -669,7 +594,7 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
||||||
rdmsrl(hwc->config_base, ctrl_val);
|
rdmsrl(hwc->config_base, ctrl_val);
|
||||||
ctrl_val &= ~mask;
|
ctrl_val &= ~mask;
|
||||||
ctrl_val |= bits;
|
ctrl_val |= bits;
|
||||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
wrmsrl(hwc->config_base, ctrl_val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pmu_enable_event(struct perf_event *event)
|
static void intel_pmu_enable_event(struct perf_event *event)
|
||||||
|
@ -689,6 +614,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(event->attr.precise))
|
||||||
|
intel_pmu_pebs_enable(event);
|
||||||
|
|
||||||
__x86_pmu_enable_event(hwc);
|
__x86_pmu_enable_event(hwc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,20 +636,20 @@ static void intel_pmu_reset(void)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
if (!x86_pmu.num_events)
|
if (!x86_pmu.num_counters)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
|
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
|
||||||
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
|
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
|
||||||
}
|
}
|
||||||
for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
|
||||||
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
||||||
}
|
|
||||||
if (ds)
|
if (ds)
|
||||||
ds->bts_index = ds->bts_buffer_base;
|
ds->bts_index = ds->bts_buffer_base;
|
||||||
|
|
||||||
|
@ -747,7 +675,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||||
intel_pmu_drain_bts_buffer();
|
intel_pmu_drain_bts_buffer();
|
||||||
status = intel_pmu_get_status();
|
status = intel_pmu_get_status();
|
||||||
if (!status) {
|
if (!status) {
|
||||||
intel_pmu_enable_all();
|
intel_pmu_enable_all(0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -762,6 +690,15 @@ again:
|
||||||
|
|
||||||
inc_irq_stat(apic_perf_irqs);
|
inc_irq_stat(apic_perf_irqs);
|
||||||
ack = status;
|
ack = status;
|
||||||
|
|
||||||
|
intel_pmu_lbr_read();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PEBS overflow sets bit 62 in the global status register
|
||||||
|
*/
|
||||||
|
if (__test_and_clear_bit(62, (unsigned long *)&status))
|
||||||
|
x86_pmu.drain_pebs(regs);
|
||||||
|
|
||||||
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||||
struct perf_event *event = cpuc->events[bit];
|
struct perf_event *event = cpuc->events[bit];
|
||||||
|
|
||||||
|
@ -787,26 +724,22 @@ again:
|
||||||
goto again;
|
goto again;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
intel_pmu_enable_all();
|
intel_pmu_enable_all(0);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct event_constraint bts_constraint =
|
|
||||||
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
|
|
||||||
|
|
||||||
static struct event_constraint *
|
static struct event_constraint *
|
||||||
intel_special_constraints(struct perf_event *event)
|
intel_bts_constraints(struct perf_event *event)
|
||||||
{
|
{
|
||||||
unsigned int hw_event;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
unsigned int hw_event, bts_event;
|
||||||
|
|
||||||
hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
|
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
||||||
|
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
||||||
if (unlikely((hw_event ==
|
|
||||||
x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
|
|
||||||
(event->hw.sample_period == 1))) {
|
|
||||||
|
|
||||||
|
if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
|
||||||
return &bts_constraint;
|
return &bts_constraint;
|
||||||
}
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -815,24 +748,53 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
|
||||||
{
|
{
|
||||||
struct event_constraint *c;
|
struct event_constraint *c;
|
||||||
|
|
||||||
c = intel_special_constraints(event);
|
c = intel_bts_constraints(event);
|
||||||
|
if (c)
|
||||||
|
return c;
|
||||||
|
|
||||||
|
c = intel_pebs_constraints(event);
|
||||||
if (c)
|
if (c)
|
||||||
return c;
|
return c;
|
||||||
|
|
||||||
return x86_get_event_constraints(cpuc, event);
|
return x86_get_event_constraints(cpuc, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __initconst struct x86_pmu core_pmu = {
|
static int intel_pmu_hw_config(struct perf_event *event)
|
||||||
|
{
|
||||||
|
int ret = x86_pmu_hw_config(event);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (event->attr.type != PERF_TYPE_RAW)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (x86_pmu.version < 3)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
||||||
|
return -EACCES;
|
||||||
|
|
||||||
|
event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __initconst const struct x86_pmu core_pmu = {
|
||||||
.name = "core",
|
.name = "core",
|
||||||
.handle_irq = x86_pmu_handle_irq,
|
.handle_irq = x86_pmu_handle_irq,
|
||||||
.disable_all = x86_pmu_disable_all,
|
.disable_all = x86_pmu_disable_all,
|
||||||
.enable_all = x86_pmu_enable_all,
|
.enable_all = x86_pmu_enable_all,
|
||||||
.enable = x86_pmu_enable_event,
|
.enable = x86_pmu_enable_event,
|
||||||
.disable = x86_pmu_disable_event,
|
.disable = x86_pmu_disable_event,
|
||||||
|
.hw_config = x86_pmu_hw_config,
|
||||||
|
.schedule_events = x86_schedule_events,
|
||||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||||
.event_map = intel_pmu_event_map,
|
.event_map = intel_pmu_event_map,
|
||||||
.raw_event = intel_pmu_raw_event,
|
|
||||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
/*
|
/*
|
||||||
|
@ -845,17 +807,32 @@ static __initconst struct x86_pmu core_pmu = {
|
||||||
.event_constraints = intel_core_event_constraints,
|
.event_constraints = intel_core_event_constraints,
|
||||||
};
|
};
|
||||||
|
|
||||||
static __initconst struct x86_pmu intel_pmu = {
|
static void intel_pmu_cpu_starting(int cpu)
|
||||||
|
{
|
||||||
|
init_debug_store_on_cpu(cpu);
|
||||||
|
/*
|
||||||
|
* Deal with CPUs that don't clear their LBRs on power-up.
|
||||||
|
*/
|
||||||
|
intel_pmu_lbr_reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_cpu_dying(int cpu)
|
||||||
|
{
|
||||||
|
fini_debug_store_on_cpu(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __initconst const struct x86_pmu intel_pmu = {
|
||||||
.name = "Intel",
|
.name = "Intel",
|
||||||
.handle_irq = intel_pmu_handle_irq,
|
.handle_irq = intel_pmu_handle_irq,
|
||||||
.disable_all = intel_pmu_disable_all,
|
.disable_all = intel_pmu_disable_all,
|
||||||
.enable_all = intel_pmu_enable_all,
|
.enable_all = intel_pmu_enable_all,
|
||||||
.enable = intel_pmu_enable_event,
|
.enable = intel_pmu_enable_event,
|
||||||
.disable = intel_pmu_disable_event,
|
.disable = intel_pmu_disable_event,
|
||||||
|
.hw_config = intel_pmu_hw_config,
|
||||||
|
.schedule_events = x86_schedule_events,
|
||||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||||
.event_map = intel_pmu_event_map,
|
.event_map = intel_pmu_event_map,
|
||||||
.raw_event = intel_pmu_raw_event,
|
|
||||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
/*
|
/*
|
||||||
|
@ -864,14 +841,38 @@ static __initconst struct x86_pmu intel_pmu = {
|
||||||
* the generic event period:
|
* the generic event period:
|
||||||
*/
|
*/
|
||||||
.max_period = (1ULL << 31) - 1,
|
.max_period = (1ULL << 31) - 1,
|
||||||
.enable_bts = intel_pmu_enable_bts,
|
|
||||||
.disable_bts = intel_pmu_disable_bts,
|
|
||||||
.get_event_constraints = intel_get_event_constraints,
|
.get_event_constraints = intel_get_event_constraints,
|
||||||
|
|
||||||
.cpu_starting = init_debug_store_on_cpu,
|
.cpu_starting = intel_pmu_cpu_starting,
|
||||||
.cpu_dying = fini_debug_store_on_cpu,
|
.cpu_dying = intel_pmu_cpu_dying,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void intel_clovertown_quirks(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* PEBS is unreliable due to:
|
||||||
|
*
|
||||||
|
* AJ67 - PEBS may experience CPL leaks
|
||||||
|
* AJ68 - PEBS PMI may be delayed by one event
|
||||||
|
* AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
|
||||||
|
* AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
|
||||||
|
*
|
||||||
|
* AJ67 could be worked around by restricting the OS/USR flags.
|
||||||
|
* AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
|
||||||
|
*
|
||||||
|
* AJ106 could possibly be worked around by not allowing LBR
|
||||||
|
* usage from PEBS, including the fixup.
|
||||||
|
* AJ68 could possibly be worked around by always programming
|
||||||
|
* a pebs_event_reset[0] value and coping with the lost events.
|
||||||
|
*
|
||||||
|
* But taken together it might just make sense to not enable PEBS on
|
||||||
|
* these chips.
|
||||||
|
*/
|
||||||
|
printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
|
||||||
|
x86_pmu.pebs = 0;
|
||||||
|
x86_pmu.pebs_constraints = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static __init int intel_pmu_init(void)
|
static __init int intel_pmu_init(void)
|
||||||
{
|
{
|
||||||
union cpuid10_edx edx;
|
union cpuid10_edx edx;
|
||||||
|
@ -881,12 +882,13 @@ static __init int intel_pmu_init(void)
|
||||||
int version;
|
int version;
|
||||||
|
|
||||||
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
||||||
/* check for P6 processor family */
|
switch (boot_cpu_data.x86) {
|
||||||
if (boot_cpu_data.x86 == 6) {
|
case 0x6:
|
||||||
return p6_pmu_init();
|
return p6_pmu_init();
|
||||||
} else {
|
case 0xf:
|
||||||
|
return p4_pmu_init();
|
||||||
|
}
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -904,16 +906,28 @@ static __init int intel_pmu_init(void)
|
||||||
x86_pmu = intel_pmu;
|
x86_pmu = intel_pmu;
|
||||||
|
|
||||||
x86_pmu.version = version;
|
x86_pmu.version = version;
|
||||||
x86_pmu.num_events = eax.split.num_events;
|
x86_pmu.num_counters = eax.split.num_counters;
|
||||||
x86_pmu.event_bits = eax.split.bit_width;
|
x86_pmu.cntval_bits = eax.split.bit_width;
|
||||||
x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
|
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
||||||
* assume at least 3 events:
|
* assume at least 3 events:
|
||||||
*/
|
*/
|
||||||
if (version > 1)
|
if (version > 1)
|
||||||
x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
|
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* v2 and above have a perf capabilities MSR
|
||||||
|
*/
|
||||||
|
if (version > 1) {
|
||||||
|
u64 capabilities;
|
||||||
|
|
||||||
|
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
|
||||||
|
x86_pmu.intel_cap.capabilities = capabilities;
|
||||||
|
}
|
||||||
|
|
||||||
|
intel_ds_init();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Install the hw-cache-events table:
|
* Install the hw-cache-events table:
|
||||||
|
@ -924,12 +938,15 @@ static __init int intel_pmu_init(void)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
|
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
|
||||||
|
x86_pmu.quirks = intel_clovertown_quirks;
|
||||||
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
|
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
|
||||||
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
|
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
|
||||||
case 29: /* six-core 45 nm xeon "Dunnington" */
|
case 29: /* six-core 45 nm xeon "Dunnington" */
|
||||||
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
||||||
|
intel_pmu_lbr_init_core();
|
||||||
|
|
||||||
x86_pmu.event_constraints = intel_core2_event_constraints;
|
x86_pmu.event_constraints = intel_core2_event_constraints;
|
||||||
pr_cont("Core2 events, ");
|
pr_cont("Core2 events, ");
|
||||||
break;
|
break;
|
||||||
|
@ -940,13 +957,19 @@ static __init int intel_pmu_init(void)
|
||||||
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
||||||
|
intel_pmu_lbr_init_nhm();
|
||||||
|
|
||||||
x86_pmu.event_constraints = intel_nehalem_event_constraints;
|
x86_pmu.event_constraints = intel_nehalem_event_constraints;
|
||||||
pr_cont("Nehalem/Corei7 events, ");
|
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
||||||
|
pr_cont("Nehalem events, ");
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 28: /* Atom */
|
case 28: /* Atom */
|
||||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
||||||
|
intel_pmu_lbr_init_atom();
|
||||||
|
|
||||||
x86_pmu.event_constraints = intel_gen_event_constraints;
|
x86_pmu.event_constraints = intel_gen_event_constraints;
|
||||||
pr_cont("Atom events, ");
|
pr_cont("Atom events, ");
|
||||||
break;
|
break;
|
||||||
|
@ -956,7 +979,10 @@ static __init int intel_pmu_init(void)
|
||||||
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
||||||
|
intel_pmu_lbr_init_nhm();
|
||||||
|
|
||||||
x86_pmu.event_constraints = intel_westmere_event_constraints;
|
x86_pmu.event_constraints = intel_westmere_event_constraints;
|
||||||
|
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
||||||
pr_cont("Westmere events, ");
|
pr_cont("Westmere events, ");
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,664 @@
|
||||||
|
#ifdef CONFIG_CPU_SUP_INTEL
|
||||||
|
|
||||||
|
/* The maximal number of PEBS events: */
|
||||||
|
#define MAX_PEBS_EVENTS 4
|
||||||
|
|
||||||
|
/* The size of a BTS record in bytes: */
|
||||||
|
#define BTS_RECORD_SIZE 24
|
||||||
|
|
||||||
|
#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
|
||||||
|
#define PEBS_BUFFER_SIZE PAGE_SIZE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* pebs_record_32 for p4 and core not supported
|
||||||
|
|
||||||
|
struct pebs_record_32 {
|
||||||
|
u32 flags, ip;
|
||||||
|
u32 ax, bc, cx, dx;
|
||||||
|
u32 si, di, bp, sp;
|
||||||
|
};
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct pebs_record_core {
|
||||||
|
u64 flags, ip;
|
||||||
|
u64 ax, bx, cx, dx;
|
||||||
|
u64 si, di, bp, sp;
|
||||||
|
u64 r8, r9, r10, r11;
|
||||||
|
u64 r12, r13, r14, r15;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct pebs_record_nhm {
|
||||||
|
u64 flags, ip;
|
||||||
|
u64 ax, bx, cx, dx;
|
||||||
|
u64 si, di, bp, sp;
|
||||||
|
u64 r8, r9, r10, r11;
|
||||||
|
u64 r12, r13, r14, r15;
|
||||||
|
u64 status, dla, dse, lat;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A debug store configuration.
|
||||||
|
*
|
||||||
|
* We only support architectures that use 64bit fields.
|
||||||
|
*/
|
||||||
|
struct debug_store {
|
||||||
|
u64 bts_buffer_base;
|
||||||
|
u64 bts_index;
|
||||||
|
u64 bts_absolute_maximum;
|
||||||
|
u64 bts_interrupt_threshold;
|
||||||
|
u64 pebs_buffer_base;
|
||||||
|
u64 pebs_index;
|
||||||
|
u64 pebs_absolute_maximum;
|
||||||
|
u64 pebs_interrupt_threshold;
|
||||||
|
u64 pebs_event_reset[MAX_PEBS_EVENTS];
|
||||||
|
};
|
||||||
|
|
||||||
|
static void init_debug_store_on_cpu(int cpu)
|
||||||
|
{
|
||||||
|
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
||||||
|
|
||||||
|
if (!ds)
|
||||||
|
return;
|
||||||
|
|
||||||
|
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
|
||||||
|
(u32)((u64)(unsigned long)ds),
|
||||||
|
(u32)((u64)(unsigned long)ds >> 32));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void fini_debug_store_on_cpu(int cpu)
|
||||||
|
{
|
||||||
|
if (!per_cpu(cpu_hw_events, cpu).ds)
|
||||||
|
return;
|
||||||
|
|
||||||
|
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void release_ds_buffers(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (!x86_pmu.bts && !x86_pmu.pebs)
|
||||||
|
return;
|
||||||
|
|
||||||
|
get_online_cpus();
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
fini_debug_store_on_cpu(cpu);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
||||||
|
|
||||||
|
if (!ds)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
per_cpu(cpu_hw_events, cpu).ds = NULL;
|
||||||
|
|
||||||
|
kfree((void *)(unsigned long)ds->pebs_buffer_base);
|
||||||
|
kfree((void *)(unsigned long)ds->bts_buffer_base);
|
||||||
|
kfree(ds);
|
||||||
|
}
|
||||||
|
|
||||||
|
put_online_cpus();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int reserve_ds_buffers(void)
|
||||||
|
{
|
||||||
|
int cpu, err = 0;
|
||||||
|
|
||||||
|
if (!x86_pmu.bts && !x86_pmu.pebs)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
get_online_cpus();
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct debug_store *ds;
|
||||||
|
void *buffer;
|
||||||
|
int max, thresh;
|
||||||
|
|
||||||
|
err = -ENOMEM;
|
||||||
|
ds = kzalloc(sizeof(*ds), GFP_KERNEL);
|
||||||
|
if (unlikely(!ds))
|
||||||
|
break;
|
||||||
|
per_cpu(cpu_hw_events, cpu).ds = ds;
|
||||||
|
|
||||||
|
if (x86_pmu.bts) {
|
||||||
|
buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
|
||||||
|
if (unlikely(!buffer))
|
||||||
|
break;
|
||||||
|
|
||||||
|
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
|
||||||
|
thresh = max / 16;
|
||||||
|
|
||||||
|
ds->bts_buffer_base = (u64)(unsigned long)buffer;
|
||||||
|
ds->bts_index = ds->bts_buffer_base;
|
||||||
|
ds->bts_absolute_maximum = ds->bts_buffer_base +
|
||||||
|
max * BTS_RECORD_SIZE;
|
||||||
|
ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
|
||||||
|
thresh * BTS_RECORD_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (x86_pmu.pebs) {
|
||||||
|
buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
|
||||||
|
if (unlikely(!buffer))
|
||||||
|
break;
|
||||||
|
|
||||||
|
max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
|
||||||
|
|
||||||
|
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
|
||||||
|
ds->pebs_index = ds->pebs_buffer_base;
|
||||||
|
ds->pebs_absolute_maximum = ds->pebs_buffer_base +
|
||||||
|
max * x86_pmu.pebs_record_size;
|
||||||
|
/*
|
||||||
|
* Always use single record PEBS
|
||||||
|
*/
|
||||||
|
ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
|
||||||
|
x86_pmu.pebs_record_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
release_ds_buffers();
|
||||||
|
else {
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
init_debug_store_on_cpu(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* BTS
|
||||||
|
*/
|
||||||
|
|
||||||
|
static struct event_constraint bts_constraint =
|
||||||
|
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
|
||||||
|
|
||||||
|
static void intel_pmu_enable_bts(u64 config)
|
||||||
|
{
|
||||||
|
unsigned long debugctlmsr;
|
||||||
|
|
||||||
|
debugctlmsr = get_debugctlmsr();
|
||||||
|
|
||||||
|
debugctlmsr |= DEBUGCTLMSR_TR;
|
||||||
|
debugctlmsr |= DEBUGCTLMSR_BTS;
|
||||||
|
debugctlmsr |= DEBUGCTLMSR_BTINT;
|
||||||
|
|
||||||
|
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
|
||||||
|
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
|
||||||
|
|
||||||
|
if (!(config & ARCH_PERFMON_EVENTSEL_USR))
|
||||||
|
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
|
||||||
|
|
||||||
|
update_debugctlmsr(debugctlmsr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_disable_bts(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
unsigned long debugctlmsr;
|
||||||
|
|
||||||
|
if (!cpuc->ds)
|
||||||
|
return;
|
||||||
|
|
||||||
|
debugctlmsr = get_debugctlmsr();
|
||||||
|
|
||||||
|
debugctlmsr &=
|
||||||
|
~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
|
||||||
|
DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
|
||||||
|
|
||||||
|
update_debugctlmsr(debugctlmsr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_drain_bts_buffer(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct debug_store *ds = cpuc->ds;
|
||||||
|
struct bts_record {
|
||||||
|
u64 from;
|
||||||
|
u64 to;
|
||||||
|
u64 flags;
|
||||||
|
};
|
||||||
|
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
||||||
|
struct bts_record *at, *top;
|
||||||
|
struct perf_output_handle handle;
|
||||||
|
struct perf_event_header header;
|
||||||
|
struct perf_sample_data data;
|
||||||
|
struct pt_regs regs;
|
||||||
|
|
||||||
|
if (!event)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!ds)
|
||||||
|
return;
|
||||||
|
|
||||||
|
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
|
||||||
|
top = (struct bts_record *)(unsigned long)ds->bts_index;
|
||||||
|
|
||||||
|
if (top <= at)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ds->bts_index = ds->bts_buffer_base;
|
||||||
|
|
||||||
|
perf_sample_data_init(&data, 0);
|
||||||
|
data.period = event->hw.last_period;
|
||||||
|
regs.ip = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prepare a generic sample, i.e. fill in the invariant fields.
|
||||||
|
* We will overwrite the from and to address before we output
|
||||||
|
* the sample.
|
||||||
|
*/
|
||||||
|
perf_prepare_sample(&header, &data, event, ®s);
|
||||||
|
|
||||||
|
if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (; at < top; at++) {
|
||||||
|
data.ip = at->from;
|
||||||
|
data.addr = at->to;
|
||||||
|
|
||||||
|
perf_output_sample(&handle, &header, &data, event);
|
||||||
|
}
|
||||||
|
|
||||||
|
perf_output_end(&handle);
|
||||||
|
|
||||||
|
/* There's new data available. */
|
||||||
|
event->hw.interrupts++;
|
||||||
|
event->pending_kill = POLL_IN;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PEBS
|
||||||
|
*/
|
||||||
|
|
||||||
|
static struct event_constraint intel_core_pebs_events[] = {
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
|
||||||
|
EVENT_CONSTRAINT_END
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct event_constraint intel_nehalem_pebs_events[] = {
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
|
||||||
|
PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
|
||||||
|
EVENT_CONSTRAINT_END
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct event_constraint *
|
||||||
|
intel_pebs_constraints(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct event_constraint *c;
|
||||||
|
|
||||||
|
if (!event->attr.precise)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (x86_pmu.pebs_constraints) {
|
||||||
|
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
|
||||||
|
if ((event->hw.config & c->cmask) == c->code)
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &emptyconstraint;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_pebs_enable(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
|
||||||
|
|
||||||
|
cpuc->pebs_enabled |= 1ULL << hwc->idx;
|
||||||
|
WARN_ON_ONCE(cpuc->enabled);
|
||||||
|
|
||||||
|
if (x86_pmu.intel_cap.pebs_trap)
|
||||||
|
intel_pmu_lbr_enable(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_pebs_disable(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
|
||||||
|
if (cpuc->enabled)
|
||||||
|
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
||||||
|
|
||||||
|
hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
|
||||||
|
|
||||||
|
if (x86_pmu.intel_cap.pebs_trap)
|
||||||
|
intel_pmu_lbr_disable(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_pebs_enable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
if (cpuc->pebs_enabled)
|
||||||
|
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_pebs_disable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
if (cpuc->pebs_enabled)
|
||||||
|
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#include <asm/insn.h>
|
||||||
|
|
||||||
|
static inline bool kernel_ip(unsigned long ip)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
return ip > PAGE_OFFSET;
|
||||||
|
#else
|
||||||
|
return (long)ip < 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
unsigned long from = cpuc->lbr_entries[0].from;
|
||||||
|
unsigned long old_to, to = cpuc->lbr_entries[0].to;
|
||||||
|
unsigned long ip = regs->ip;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't need to fixup if the PEBS assist is fault like
|
||||||
|
*/
|
||||||
|
if (!x86_pmu.intel_cap.pebs_trap)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No LBR entry, no basic block, no rewinding
|
||||||
|
*/
|
||||||
|
if (!cpuc->lbr_stack.nr || !from || !to)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Basic blocks should never cross user/kernel boundaries
|
||||||
|
*/
|
||||||
|
if (kernel_ip(ip) != kernel_ip(to))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* unsigned math, either ip is before the start (impossible) or
|
||||||
|
* the basic block is larger than 1 page (sanity)
|
||||||
|
*/
|
||||||
|
if ((ip - to) > PAGE_SIZE)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We sampled a branch insn, rewind using the LBR stack
|
||||||
|
*/
|
||||||
|
if (ip == to) {
|
||||||
|
regs->ip = from;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct insn insn;
|
||||||
|
u8 buf[MAX_INSN_SIZE];
|
||||||
|
void *kaddr;
|
||||||
|
|
||||||
|
old_to = to;
|
||||||
|
if (!kernel_ip(ip)) {
|
||||||
|
int bytes, size = MAX_INSN_SIZE;
|
||||||
|
|
||||||
|
bytes = copy_from_user_nmi(buf, (void __user *)to, size);
|
||||||
|
if (bytes != size)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
kaddr = buf;
|
||||||
|
} else
|
||||||
|
kaddr = (void *)to;
|
||||||
|
|
||||||
|
kernel_insn_init(&insn, kaddr);
|
||||||
|
insn_get_length(&insn);
|
||||||
|
to += insn.length;
|
||||||
|
} while (to < ip);
|
||||||
|
|
||||||
|
if (to == ip) {
|
||||||
|
regs->ip = old_to;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even though we decoded the basic block, the instruction stream
|
||||||
|
* never matched the given IP, either the TO or the IP got corrupted.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_pmu_save_and_restart(struct perf_event *event);
|
||||||
|
|
||||||
|
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct debug_store *ds = cpuc->ds;
|
||||||
|
struct perf_event *event = cpuc->events[0]; /* PMC0 only */
|
||||||
|
struct pebs_record_core *at, *top;
|
||||||
|
struct perf_sample_data data;
|
||||||
|
struct perf_raw_record raw;
|
||||||
|
struct pt_regs regs;
|
||||||
|
int n;
|
||||||
|
|
||||||
|
if (!ds || !x86_pmu.pebs)
|
||||||
|
return;
|
||||||
|
|
||||||
|
at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
|
||||||
|
top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Whatever else happens, drain the thing
|
||||||
|
*/
|
||||||
|
ds->pebs_index = ds->pebs_buffer_base;
|
||||||
|
|
||||||
|
if (!test_bit(0, cpuc->active_mask))
|
||||||
|
return;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!event);
|
||||||
|
|
||||||
|
if (!event->attr.precise)
|
||||||
|
return;
|
||||||
|
|
||||||
|
n = top - at;
|
||||||
|
if (n <= 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!intel_pmu_save_and_restart(event))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Should not happen, we program the threshold at 1 and do not
|
||||||
|
* set a reset value.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(n > 1);
|
||||||
|
at += n - 1;
|
||||||
|
|
||||||
|
perf_sample_data_init(&data, 0);
|
||||||
|
data.period = event->hw.last_period;
|
||||||
|
|
||||||
|
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
|
||||||
|
raw.size = x86_pmu.pebs_record_size;
|
||||||
|
raw.data = at;
|
||||||
|
data.raw = &raw;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use the interrupt regs as a base because the PEBS record
|
||||||
|
* does not contain a full regs set, specifically it seems to
|
||||||
|
* lack segment descriptors, which get used by things like
|
||||||
|
* user_mode().
|
||||||
|
*
|
||||||
|
* In the simple case fix up only the IP and BP,SP regs, for
|
||||||
|
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
|
||||||
|
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
|
||||||
|
*/
|
||||||
|
regs = *iregs;
|
||||||
|
regs.ip = at->ip;
|
||||||
|
regs.bp = at->bp;
|
||||||
|
regs.sp = at->sp;
|
||||||
|
|
||||||
|
if (intel_pmu_pebs_fixup_ip(®s))
|
||||||
|
regs.flags |= PERF_EFLAGS_EXACT;
|
||||||
|
else
|
||||||
|
regs.flags &= ~PERF_EFLAGS_EXACT;
|
||||||
|
|
||||||
|
if (perf_event_overflow(event, 1, &data, ®s))
|
||||||
|
x86_pmu_stop(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct debug_store *ds = cpuc->ds;
|
||||||
|
struct pebs_record_nhm *at, *top;
|
||||||
|
struct perf_sample_data data;
|
||||||
|
struct perf_event *event = NULL;
|
||||||
|
struct perf_raw_record raw;
|
||||||
|
struct pt_regs regs;
|
||||||
|
u64 status = 0;
|
||||||
|
int bit, n;
|
||||||
|
|
||||||
|
if (!ds || !x86_pmu.pebs)
|
||||||
|
return;
|
||||||
|
|
||||||
|
at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
|
||||||
|
top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
|
||||||
|
|
||||||
|
ds->pebs_index = ds->pebs_buffer_base;
|
||||||
|
|
||||||
|
n = top - at;
|
||||||
|
if (n <= 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Should not happen, we program the threshold at 1 and do not
|
||||||
|
* set a reset value.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
|
||||||
|
|
||||||
|
for ( ; at < top; at++) {
|
||||||
|
for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
|
||||||
|
event = cpuc->events[bit];
|
||||||
|
if (!test_bit(bit, cpuc->active_mask))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!event);
|
||||||
|
|
||||||
|
if (!event->attr.precise)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (__test_and_set_bit(bit, (unsigned long *)&status))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!event || bit >= MAX_PEBS_EVENTS)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!intel_pmu_save_and_restart(event))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
perf_sample_data_init(&data, 0);
|
||||||
|
data.period = event->hw.last_period;
|
||||||
|
|
||||||
|
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
|
||||||
|
raw.size = x86_pmu.pebs_record_size;
|
||||||
|
raw.data = at;
|
||||||
|
data.raw = &raw;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See the comment in intel_pmu_drain_pebs_core()
|
||||||
|
*/
|
||||||
|
regs = *iregs;
|
||||||
|
regs.ip = at->ip;
|
||||||
|
regs.bp = at->bp;
|
||||||
|
regs.sp = at->sp;
|
||||||
|
|
||||||
|
if (intel_pmu_pebs_fixup_ip(®s))
|
||||||
|
regs.flags |= PERF_EFLAGS_EXACT;
|
||||||
|
else
|
||||||
|
regs.flags &= ~PERF_EFLAGS_EXACT;
|
||||||
|
|
||||||
|
if (perf_event_overflow(event, 1, &data, ®s))
|
||||||
|
x86_pmu_stop(event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* BTS, PEBS probe and setup
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void intel_ds_init(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* No support for 32bit formats
|
||||||
|
*/
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_DTES64))
|
||||||
|
return;
|
||||||
|
|
||||||
|
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
|
||||||
|
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
|
||||||
|
if (x86_pmu.pebs) {
|
||||||
|
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
|
||||||
|
int format = x86_pmu.intel_cap.pebs_format;
|
||||||
|
|
||||||
|
switch (format) {
|
||||||
|
case 0:
|
||||||
|
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
|
||||||
|
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
|
||||||
|
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
|
||||||
|
x86_pmu.pebs_constraints = intel_core_pebs_events;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
|
||||||
|
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
|
||||||
|
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
|
||||||
|
x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
|
||||||
|
x86_pmu.pebs = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_CPU_SUP_INTEL */
|
||||||
|
|
||||||
|
static int reserve_ds_buffers(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void release_ds_buffers(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
@ -0,0 +1,218 @@
|
||||||
|
#ifdef CONFIG_CPU_SUP_INTEL
|
||||||
|
|
||||||
|
enum {
|
||||||
|
LBR_FORMAT_32 = 0x00,
|
||||||
|
LBR_FORMAT_LIP = 0x01,
|
||||||
|
LBR_FORMAT_EIP = 0x02,
|
||||||
|
LBR_FORMAT_EIP_FLAGS = 0x03,
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We only support LBR implementations that have FREEZE_LBRS_ON_PMI
|
||||||
|
* otherwise it becomes near impossible to get a reliable stack.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void __intel_pmu_lbr_enable(void)
|
||||||
|
{
|
||||||
|
u64 debugctl;
|
||||||
|
|
||||||
|
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||||
|
debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||||
|
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __intel_pmu_lbr_disable(void)
|
||||||
|
{
|
||||||
|
u64 debugctl;
|
||||||
|
|
||||||
|
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||||
|
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||||
|
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_reset_32(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < x86_pmu.lbr_nr; i++)
|
||||||
|
wrmsrl(x86_pmu.lbr_from + i, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_reset_64(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
||||||
|
wrmsrl(x86_pmu.lbr_from + i, 0);
|
||||||
|
wrmsrl(x86_pmu.lbr_to + i, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_reset(void)
|
||||||
|
{
|
||||||
|
if (!x86_pmu.lbr_nr)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
|
||||||
|
intel_pmu_lbr_reset_32();
|
||||||
|
else
|
||||||
|
intel_pmu_lbr_reset_64();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_enable(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
if (!x86_pmu.lbr_nr)
|
||||||
|
return;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(cpuc->enabled);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset the LBR stack if we changed task context to
|
||||||
|
* avoid data leaks.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (event->ctx->task && cpuc->lbr_context != event->ctx) {
|
||||||
|
intel_pmu_lbr_reset();
|
||||||
|
cpuc->lbr_context = event->ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuc->lbr_users++;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_disable(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
if (!x86_pmu.lbr_nr)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cpuc->lbr_users--;
|
||||||
|
WARN_ON_ONCE(cpuc->lbr_users < 0);
|
||||||
|
|
||||||
|
if (cpuc->enabled && !cpuc->lbr_users)
|
||||||
|
__intel_pmu_lbr_disable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_enable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
if (cpuc->lbr_users)
|
||||||
|
__intel_pmu_lbr_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_disable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
if (cpuc->lbr_users)
|
||||||
|
__intel_pmu_lbr_disable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 intel_pmu_lbr_tos(void)
|
||||||
|
{
|
||||||
|
u64 tos;
|
||||||
|
|
||||||
|
rdmsrl(x86_pmu.lbr_tos, tos);
|
||||||
|
|
||||||
|
return tos;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
|
||||||
|
{
|
||||||
|
unsigned long mask = x86_pmu.lbr_nr - 1;
|
||||||
|
u64 tos = intel_pmu_lbr_tos();
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
||||||
|
unsigned long lbr_idx = (tos - i) & mask;
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
u32 from;
|
||||||
|
u32 to;
|
||||||
|
};
|
||||||
|
u64 lbr;
|
||||||
|
} msr_lastbranch;
|
||||||
|
|
||||||
|
rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
|
||||||
|
|
||||||
|
cpuc->lbr_entries[i].from = msr_lastbranch.from;
|
||||||
|
cpuc->lbr_entries[i].to = msr_lastbranch.to;
|
||||||
|
cpuc->lbr_entries[i].flags = 0;
|
||||||
|
}
|
||||||
|
cpuc->lbr_stack.nr = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Due to lack of segmentation in Linux the effective address (offset)
|
||||||
|
* is the same as the linear address, allowing us to merge the LIP and EIP
|
||||||
|
* LBR formats.
|
||||||
|
*/
|
||||||
|
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
|
||||||
|
{
|
||||||
|
unsigned long mask = x86_pmu.lbr_nr - 1;
|
||||||
|
int lbr_format = x86_pmu.intel_cap.lbr_format;
|
||||||
|
u64 tos = intel_pmu_lbr_tos();
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
||||||
|
unsigned long lbr_idx = (tos - i) & mask;
|
||||||
|
u64 from, to, flags = 0;
|
||||||
|
|
||||||
|
rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
|
||||||
|
rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
|
||||||
|
|
||||||
|
if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
|
||||||
|
flags = !!(from & LBR_FROM_FLAG_MISPRED);
|
||||||
|
from = (u64)((((s64)from) << 1) >> 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuc->lbr_entries[i].from = from;
|
||||||
|
cpuc->lbr_entries[i].to = to;
|
||||||
|
cpuc->lbr_entries[i].flags = flags;
|
||||||
|
}
|
||||||
|
cpuc->lbr_stack.nr = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_read(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
if (!cpuc->lbr_users)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
|
||||||
|
intel_pmu_lbr_read_32(cpuc);
|
||||||
|
else
|
||||||
|
intel_pmu_lbr_read_64(cpuc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_init_core(void)
|
||||||
|
{
|
||||||
|
x86_pmu.lbr_nr = 4;
|
||||||
|
x86_pmu.lbr_tos = 0x01c9;
|
||||||
|
x86_pmu.lbr_from = 0x40;
|
||||||
|
x86_pmu.lbr_to = 0x60;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_init_nhm(void)
|
||||||
|
{
|
||||||
|
x86_pmu.lbr_nr = 16;
|
||||||
|
x86_pmu.lbr_tos = 0x01c9;
|
||||||
|
x86_pmu.lbr_from = 0x680;
|
||||||
|
x86_pmu.lbr_to = 0x6c0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pmu_lbr_init_atom(void)
|
||||||
|
{
|
||||||
|
x86_pmu.lbr_nr = 8;
|
||||||
|
x86_pmu.lbr_tos = 0x01c9;
|
||||||
|
x86_pmu.lbr_from = 0x40;
|
||||||
|
x86_pmu.lbr_to = 0x60;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
@ -0,0 +1,834 @@
|
||||||
|
/*
|
||||||
|
* Netburst Perfomance Events (P4, old Xeon)
|
||||||
|
*
|
||||||
|
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
|
||||||
|
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
|
||||||
|
*
|
||||||
|
* For licencing details see kernel-base/COPYING
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_SUP_INTEL
|
||||||
|
|
||||||
|
#include <asm/perf_event_p4.h>
|
||||||
|
|
||||||
|
#define P4_CNTR_LIMIT 3
|
||||||
|
/*
|
||||||
|
* array indices: 0,1 - HT threads, used with HT enabled cpu
|
||||||
|
*/
|
||||||
|
struct p4_event_bind {
|
||||||
|
unsigned int opcode; /* Event code and ESCR selector */
|
||||||
|
unsigned int escr_msr[2]; /* ESCR MSR for this event */
|
||||||
|
unsigned char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct p4_cache_event_bind {
|
||||||
|
unsigned int metric_pebs;
|
||||||
|
unsigned int metric_vert;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define P4_GEN_CACHE_EVENT_BIND(name) \
|
||||||
|
[P4_CACHE__##name] = { \
|
||||||
|
.metric_pebs = P4_PEBS__##name, \
|
||||||
|
.metric_vert = P4_VERT__##name, \
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct p4_cache_event_bind p4_cache_event_bind_map[] = {
|
||||||
|
P4_GEN_CACHE_EVENT_BIND(1stl_cache_load_miss_retired),
|
||||||
|
P4_GEN_CACHE_EVENT_BIND(2ndl_cache_load_miss_retired),
|
||||||
|
P4_GEN_CACHE_EVENT_BIND(dtlb_load_miss_retired),
|
||||||
|
P4_GEN_CACHE_EVENT_BIND(dtlb_store_miss_retired),
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that we don't use CCCR1 here, there is an
|
||||||
|
* exception for P4_BSQ_ALLOCATION but we just have
|
||||||
|
* no workaround
|
||||||
|
*
|
||||||
|
* consider this binding as resources which particular
|
||||||
|
* event may borrow, it doesn't contain EventMask,
|
||||||
|
* Tags and friends -- they are left to a caller
|
||||||
|
*/
|
||||||
|
static struct p4_event_bind p4_event_bind_map[] = {
|
||||||
|
[P4_EVENT_TC_DELIVER_MODE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
|
||||||
|
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
|
||||||
|
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_BPU_FETCH_REQUEST] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
|
||||||
|
.escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_ITLB_REFERENCE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
|
||||||
|
.escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_MEMORY_CANCEL] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
|
||||||
|
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_MEMORY_COMPLETE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
|
||||||
|
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_LOAD_PORT_REPLAY] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
|
||||||
|
.escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_STORE_PORT_REPLAY] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
|
||||||
|
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_MOB_LOAD_REPLAY] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
|
||||||
|
.escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_PAGE_WALK_TYPE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
|
||||||
|
.escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_BSQ_CACHE_REFERENCE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
|
||||||
|
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_IOQ_ALLOCATION] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {2, -1, -1}, {3, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_FSB_DATA_ACTIVITY] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
|
||||||
|
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
|
||||||
|
.cntr = { {0, -1, -1}, {1, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
|
||||||
|
.escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
|
||||||
|
.cntr = { {2, -1, -1}, {3, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_SSE_INPUT_ASSIST] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_PACKED_SP_UOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_PACKED_DP_UOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_SCALAR_SP_UOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_SCALAR_DP_UOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_64BIT_MMX_UOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_128BIT_MMX_UOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_X87_FP_UOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
|
||||||
|
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_TC_MISC] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_TC_MISC),
|
||||||
|
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
|
||||||
|
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_GLOBAL_POWER_EVENTS] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_TC_MS_XFER] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
|
||||||
|
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
|
||||||
|
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_UOP_QUEUE_WRITES] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
|
||||||
|
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
|
||||||
|
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
|
||||||
|
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
|
||||||
|
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_RETIRED_BRANCH_TYPE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
|
||||||
|
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
|
||||||
|
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_RESOURCE_STALL] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
|
||||||
|
.escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_WC_BUFFER] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
|
||||||
|
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
|
||||||
|
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_B2B_CYCLES] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_BNR] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_BNR),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_SNOOP] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_SNOOP),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_RESPONSE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_RESPONSE),
|
||||||
|
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||||
|
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_FRONT_END_EVENT] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_EXECUTION_EVENT] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_REPLAY_EVENT] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_INSTR_RETIRED] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_UOPS_RETIRED] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_UOP_TYPE] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
|
||||||
|
.escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_BRANCH_RETIRED] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_MISPRED_BRANCH_RETIRED] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_X87_ASSIST] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_MACHINE_CLEAR] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
[P4_EVENT_INSTR_COMPLETED] = {
|
||||||
|
.opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
|
||||||
|
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||||
|
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#define P4_GEN_CACHE_EVENT(event, bit, cache_event) \
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(event) | \
|
||||||
|
P4_ESCR_EMASK_BIT(event, bit)) | \
|
||||||
|
p4_config_pack_cccr(cache_event | \
|
||||||
|
P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
|
||||||
|
|
||||||
|
static __initconst const u64 p4_hw_cache_event_ids
|
||||||
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||||
|
{
|
||||||
|
[ C(L1D ) ] = {
|
||||||
|
[ C(OP_READ) ] = {
|
||||||
|
[ C(RESULT_ACCESS) ] = 0x0,
|
||||||
|
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||||
|
P4_CACHE__1stl_cache_load_miss_retired),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[ C(LL ) ] = {
|
||||||
|
[ C(OP_READ) ] = {
|
||||||
|
[ C(RESULT_ACCESS) ] = 0x0,
|
||||||
|
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||||
|
P4_CACHE__2ndl_cache_load_miss_retired),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[ C(DTLB) ] = {
|
||||||
|
[ C(OP_READ) ] = {
|
||||||
|
[ C(RESULT_ACCESS) ] = 0x0,
|
||||||
|
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||||
|
P4_CACHE__dtlb_load_miss_retired),
|
||||||
|
},
|
||||||
|
[ C(OP_WRITE) ] = {
|
||||||
|
[ C(RESULT_ACCESS) ] = 0x0,
|
||||||
|
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||||
|
P4_CACHE__dtlb_store_miss_retired),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
[ C(ITLB) ] = {
|
||||||
|
[ C(OP_READ) ] = {
|
||||||
|
[ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
|
||||||
|
P4_CACHE__itlb_reference_hit),
|
||||||
|
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
|
||||||
|
P4_CACHE__itlb_reference_miss),
|
||||||
|
},
|
||||||
|
[ C(OP_WRITE) ] = {
|
||||||
|
[ C(RESULT_ACCESS) ] = -1,
|
||||||
|
[ C(RESULT_MISS) ] = -1,
|
||||||
|
},
|
||||||
|
[ C(OP_PREFETCH) ] = {
|
||||||
|
[ C(RESULT_ACCESS) ] = -1,
|
||||||
|
[ C(RESULT_MISS) ] = -1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
|
||||||
|
/* non-halted CPU clocks */
|
||||||
|
[PERF_COUNT_HW_CPU_CYCLES] =
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
|
||||||
|
|
||||||
|
/*
|
||||||
|
* retired instructions
|
||||||
|
* in a sake of simplicity we don't use the FSB tagging
|
||||||
|
*/
|
||||||
|
[PERF_COUNT_HW_INSTRUCTIONS] =
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
|
||||||
|
|
||||||
|
/* cache hits */
|
||||||
|
[PERF_COUNT_HW_CACHE_REFERENCES] =
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
|
||||||
|
|
||||||
|
/* cache misses */
|
||||||
|
[PERF_COUNT_HW_CACHE_MISSES] =
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
|
||||||
|
|
||||||
|
/* branch instructions retired */
|
||||||
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
|
||||||
|
|
||||||
|
/* mispredicted branches retired */
|
||||||
|
[PERF_COUNT_HW_BRANCH_MISSES] =
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
|
||||||
|
|
||||||
|
/* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
|
||||||
|
[PERF_COUNT_HW_BUS_CYCLES] =
|
||||||
|
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
|
||||||
|
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
|
||||||
|
p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct p4_event_bind *p4_config_get_bind(u64 config)
|
||||||
|
{
|
||||||
|
unsigned int evnt = p4_config_unpack_event(config);
|
||||||
|
struct p4_event_bind *bind = NULL;
|
||||||
|
|
||||||
|
if (evnt < ARRAY_SIZE(p4_event_bind_map))
|
||||||
|
bind = &p4_event_bind_map[evnt];
|
||||||
|
|
||||||
|
return bind;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u64 p4_pmu_event_map(int hw_event)
|
||||||
|
{
|
||||||
|
struct p4_event_bind *bind;
|
||||||
|
unsigned int esel;
|
||||||
|
u64 config;
|
||||||
|
|
||||||
|
if (hw_event > ARRAY_SIZE(p4_general_events)) {
|
||||||
|
printk_once(KERN_ERR "P4 PMU: Bad index: %i\n", hw_event);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
config = p4_general_events[hw_event];
|
||||||
|
bind = p4_config_get_bind(config);
|
||||||
|
esel = P4_OPCODE_ESEL(bind->opcode);
|
||||||
|
config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
|
||||||
|
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int p4_hw_config(struct perf_event *event)
|
||||||
|
{
|
||||||
|
int cpu = raw_smp_processor_id();
|
||||||
|
u32 escr, cccr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* the reason we use cpu that early is that: if we get scheduled
|
||||||
|
* first time on the same cpu -- we will not need swap thread
|
||||||
|
* specific flags in config (and will save some cpu cycles)
|
||||||
|
*/
|
||||||
|
|
||||||
|
cccr = p4_default_cccr_conf(cpu);
|
||||||
|
escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
|
||||||
|
event->attr.exclude_user);
|
||||||
|
event->hw.config = p4_config_pack_escr(escr) |
|
||||||
|
p4_config_pack_cccr(cccr);
|
||||||
|
|
||||||
|
if (p4_ht_active() && p4_ht_thread(cpu))
|
||||||
|
event->hw.config = p4_set_ht_bit(event->hw.config);
|
||||||
|
|
||||||
|
if (event->attr.type != PERF_TYPE_RAW)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't control raw events so it's up to the caller
|
||||||
|
* to pass sane values (and we don't count the thread number
|
||||||
|
* on HT machine but allow HT-compatible specifics to be
|
||||||
|
* passed on)
|
||||||
|
*
|
||||||
|
* XXX: HT wide things should check perf_paranoid_cpu() &&
|
||||||
|
* CAP_SYS_ADMIN
|
||||||
|
*/
|
||||||
|
event->hw.config |= event->attr.config &
|
||||||
|
(p4_config_pack_escr(P4_ESCR_MASK_HT) |
|
||||||
|
p4_config_pack_cccr(P4_CCCR_MASK_HT));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
|
||||||
|
{
|
||||||
|
unsigned long dummy;
|
||||||
|
|
||||||
|
rdmsrl(hwc->config_base + hwc->idx, dummy);
|
||||||
|
if (dummy & P4_CCCR_OVF) {
|
||||||
|
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||||
|
((u64)dummy) & ~P4_CCCR_OVF);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void p4_pmu_disable_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If event gets disabled while counter is in overflowed
|
||||||
|
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
|
||||||
|
* asserted again and again
|
||||||
|
*/
|
||||||
|
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||||
|
(u64)(p4_config_unpack_cccr(hwc->config)) &
|
||||||
|
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void p4_pmu_disable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
struct perf_event *event = cpuc->events[idx];
|
||||||
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
continue;
|
||||||
|
p4_pmu_disable_event(event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void p4_pmu_enable_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
int thread = p4_ht_config_thread(hwc->config);
|
||||||
|
u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
|
||||||
|
unsigned int idx = p4_config_unpack_event(hwc->config);
|
||||||
|
unsigned int idx_cache = p4_config_unpack_cache_event(hwc->config);
|
||||||
|
struct p4_event_bind *bind;
|
||||||
|
struct p4_cache_event_bind *bind_cache;
|
||||||
|
u64 escr_addr, cccr;
|
||||||
|
|
||||||
|
bind = &p4_event_bind_map[idx];
|
||||||
|
escr_addr = (u64)bind->escr_msr[thread];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* - we dont support cascaded counters yet
|
||||||
|
* - and counter 1 is broken (erratum)
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
|
||||||
|
WARN_ON_ONCE(hwc->idx == 1);
|
||||||
|
|
||||||
|
/* we need a real Event value */
|
||||||
|
escr_conf &= ~P4_ESCR_EVENT_MASK;
|
||||||
|
escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
|
||||||
|
|
||||||
|
cccr = p4_config_unpack_cccr(hwc->config);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* it could be Cache event so that we need to
|
||||||
|
* set metrics into additional MSRs
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON(P4_CACHE__MAX > P4_CCCR_CACHE_OPS_MASK);
|
||||||
|
if (idx_cache > P4_CACHE__NONE &&
|
||||||
|
idx_cache < ARRAY_SIZE(p4_cache_event_bind_map)) {
|
||||||
|
bind_cache = &p4_cache_event_bind_map[idx_cache];
|
||||||
|
(void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind_cache->metric_pebs);
|
||||||
|
(void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind_cache->metric_vert);
|
||||||
|
}
|
||||||
|
|
||||||
|
(void)checking_wrmsrl(escr_addr, escr_conf);
|
||||||
|
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||||
|
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void p4_pmu_enable_all(int added)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
struct perf_event *event = cpuc->events[idx];
|
||||||
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
continue;
|
||||||
|
p4_pmu_enable_event(event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int p4_pmu_handle_irq(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct perf_sample_data data;
|
||||||
|
struct cpu_hw_events *cpuc;
|
||||||
|
struct perf_event *event;
|
||||||
|
struct hw_perf_event *hwc;
|
||||||
|
int idx, handled = 0;
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
data.addr = 0;
|
||||||
|
data.raw = NULL;
|
||||||
|
|
||||||
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
|
||||||
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
event = cpuc->events[idx];
|
||||||
|
hwc = &event->hw;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(hwc->idx != idx);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: Redundant call, actually not needed
|
||||||
|
* but just to check if we're screwed
|
||||||
|
*/
|
||||||
|
p4_pmu_clear_cccr_ovf(hwc);
|
||||||
|
|
||||||
|
val = x86_perf_event_update(event);
|
||||||
|
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* event overflow
|
||||||
|
*/
|
||||||
|
handled = 1;
|
||||||
|
data.period = event->hw.last_period;
|
||||||
|
|
||||||
|
if (!x86_perf_event_set_period(event))
|
||||||
|
continue;
|
||||||
|
if (perf_event_overflow(event, 1, &data, regs))
|
||||||
|
p4_pmu_disable_event(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (handled) {
|
||||||
|
/* p4 quirk: unmask it again */
|
||||||
|
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
|
||||||
|
inc_irq_stat(apic_perf_irqs);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handled;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* swap thread specific fields according to a thread
|
||||||
|
* we are going to run on
|
||||||
|
*/
|
||||||
|
static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
|
||||||
|
{
|
||||||
|
u32 escr, cccr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* we either lucky and continue on same cpu or no HT support
|
||||||
|
*/
|
||||||
|
if (!p4_should_swap_ts(hwc->config, cpu))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* the event is migrated from an another logical
|
||||||
|
* cpu, so we need to swap thread specific flags
|
||||||
|
*/
|
||||||
|
|
||||||
|
escr = p4_config_unpack_escr(hwc->config);
|
||||||
|
cccr = p4_config_unpack_cccr(hwc->config);
|
||||||
|
|
||||||
|
if (p4_ht_thread(cpu)) {
|
||||||
|
cccr &= ~P4_CCCR_OVF_PMI_T0;
|
||||||
|
cccr |= P4_CCCR_OVF_PMI_T1;
|
||||||
|
if (escr & P4_ESCR_T0_OS) {
|
||||||
|
escr &= ~P4_ESCR_T0_OS;
|
||||||
|
escr |= P4_ESCR_T1_OS;
|
||||||
|
}
|
||||||
|
if (escr & P4_ESCR_T0_USR) {
|
||||||
|
escr &= ~P4_ESCR_T0_USR;
|
||||||
|
escr |= P4_ESCR_T1_USR;
|
||||||
|
}
|
||||||
|
hwc->config = p4_config_pack_escr(escr);
|
||||||
|
hwc->config |= p4_config_pack_cccr(cccr);
|
||||||
|
hwc->config |= P4_CONFIG_HT;
|
||||||
|
} else {
|
||||||
|
cccr &= ~P4_CCCR_OVF_PMI_T1;
|
||||||
|
cccr |= P4_CCCR_OVF_PMI_T0;
|
||||||
|
if (escr & P4_ESCR_T1_OS) {
|
||||||
|
escr &= ~P4_ESCR_T1_OS;
|
||||||
|
escr |= P4_ESCR_T0_OS;
|
||||||
|
}
|
||||||
|
if (escr & P4_ESCR_T1_USR) {
|
||||||
|
escr &= ~P4_ESCR_T1_USR;
|
||||||
|
escr |= P4_ESCR_T0_USR;
|
||||||
|
}
|
||||||
|
hwc->config = p4_config_pack_escr(escr);
|
||||||
|
hwc->config |= p4_config_pack_cccr(cccr);
|
||||||
|
hwc->config &= ~P4_CONFIG_HT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ESCRs are not sequential in memory so we need a map */
|
||||||
|
static const unsigned int p4_escr_map[ARCH_P4_TOTAL_ESCR] = {
|
||||||
|
MSR_P4_ALF_ESCR0, /* 0 */
|
||||||
|
MSR_P4_ALF_ESCR1, /* 1 */
|
||||||
|
MSR_P4_BPU_ESCR0, /* 2 */
|
||||||
|
MSR_P4_BPU_ESCR1, /* 3 */
|
||||||
|
MSR_P4_BSU_ESCR0, /* 4 */
|
||||||
|
MSR_P4_BSU_ESCR1, /* 5 */
|
||||||
|
MSR_P4_CRU_ESCR0, /* 6 */
|
||||||
|
MSR_P4_CRU_ESCR1, /* 7 */
|
||||||
|
MSR_P4_CRU_ESCR2, /* 8 */
|
||||||
|
MSR_P4_CRU_ESCR3, /* 9 */
|
||||||
|
MSR_P4_CRU_ESCR4, /* 10 */
|
||||||
|
MSR_P4_CRU_ESCR5, /* 11 */
|
||||||
|
MSR_P4_DAC_ESCR0, /* 12 */
|
||||||
|
MSR_P4_DAC_ESCR1, /* 13 */
|
||||||
|
MSR_P4_FIRM_ESCR0, /* 14 */
|
||||||
|
MSR_P4_FIRM_ESCR1, /* 15 */
|
||||||
|
MSR_P4_FLAME_ESCR0, /* 16 */
|
||||||
|
MSR_P4_FLAME_ESCR1, /* 17 */
|
||||||
|
MSR_P4_FSB_ESCR0, /* 18 */
|
||||||
|
MSR_P4_FSB_ESCR1, /* 19 */
|
||||||
|
MSR_P4_IQ_ESCR0, /* 20 */
|
||||||
|
MSR_P4_IQ_ESCR1, /* 21 */
|
||||||
|
MSR_P4_IS_ESCR0, /* 22 */
|
||||||
|
MSR_P4_IS_ESCR1, /* 23 */
|
||||||
|
MSR_P4_ITLB_ESCR0, /* 24 */
|
||||||
|
MSR_P4_ITLB_ESCR1, /* 25 */
|
||||||
|
MSR_P4_IX_ESCR0, /* 26 */
|
||||||
|
MSR_P4_IX_ESCR1, /* 27 */
|
||||||
|
MSR_P4_MOB_ESCR0, /* 28 */
|
||||||
|
MSR_P4_MOB_ESCR1, /* 29 */
|
||||||
|
MSR_P4_MS_ESCR0, /* 30 */
|
||||||
|
MSR_P4_MS_ESCR1, /* 31 */
|
||||||
|
MSR_P4_PMH_ESCR0, /* 32 */
|
||||||
|
MSR_P4_PMH_ESCR1, /* 33 */
|
||||||
|
MSR_P4_RAT_ESCR0, /* 34 */
|
||||||
|
MSR_P4_RAT_ESCR1, /* 35 */
|
||||||
|
MSR_P4_SAAT_ESCR0, /* 36 */
|
||||||
|
MSR_P4_SAAT_ESCR1, /* 37 */
|
||||||
|
MSR_P4_SSU_ESCR0, /* 38 */
|
||||||
|
MSR_P4_SSU_ESCR1, /* 39 */
|
||||||
|
MSR_P4_TBPU_ESCR0, /* 40 */
|
||||||
|
MSR_P4_TBPU_ESCR1, /* 41 */
|
||||||
|
MSR_P4_TC_ESCR0, /* 42 */
|
||||||
|
MSR_P4_TC_ESCR1, /* 43 */
|
||||||
|
MSR_P4_U2L_ESCR0, /* 44 */
|
||||||
|
MSR_P4_U2L_ESCR1, /* 45 */
|
||||||
|
};
|
||||||
|
|
||||||
|
static int p4_get_escr_idx(unsigned int addr)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(p4_escr_map); i++) {
|
||||||
|
if (addr == p4_escr_map[i])
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int p4_next_cntr(int thread, unsigned long *used_mask,
|
||||||
|
struct p4_event_bind *bind)
|
||||||
|
{
|
||||||
|
int i = 0, j;
|
||||||
|
|
||||||
|
for (i = 0; i < P4_CNTR_LIMIT; i++) {
|
||||||
|
j = bind->cntr[thread][i++];
|
||||||
|
if (j == -1 || !test_bit(j, used_mask))
|
||||||
|
return j;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||||
|
{
|
||||||
|
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||||
|
unsigned long escr_mask[BITS_TO_LONGS(ARCH_P4_TOTAL_ESCR)];
|
||||||
|
int cpu = raw_smp_processor_id();
|
||||||
|
struct hw_perf_event *hwc;
|
||||||
|
struct p4_event_bind *bind;
|
||||||
|
unsigned int i, thread, num;
|
||||||
|
int cntr_idx, escr_idx;
|
||||||
|
|
||||||
|
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
|
||||||
|
bitmap_zero(escr_mask, ARCH_P4_TOTAL_ESCR);
|
||||||
|
|
||||||
|
for (i = 0, num = n; i < n; i++, num--) {
|
||||||
|
|
||||||
|
hwc = &cpuc->event_list[i]->hw;
|
||||||
|
thread = p4_ht_thread(cpu);
|
||||||
|
bind = p4_config_get_bind(hwc->config);
|
||||||
|
escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
|
||||||
|
|
||||||
|
if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
|
||||||
|
cntr_idx = hwc->idx;
|
||||||
|
if (assign)
|
||||||
|
assign[i] = hwc->idx;
|
||||||
|
goto reserve;
|
||||||
|
}
|
||||||
|
|
||||||
|
cntr_idx = p4_next_cntr(thread, used_mask, bind);
|
||||||
|
if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
p4_pmu_swap_config_ts(hwc, cpu);
|
||||||
|
if (assign)
|
||||||
|
assign[i] = cntr_idx;
|
||||||
|
reserve:
|
||||||
|
set_bit(cntr_idx, used_mask);
|
||||||
|
set_bit(escr_idx, escr_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
|
return num ? -ENOSPC : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __initconst const struct x86_pmu p4_pmu = {
|
||||||
|
.name = "Netburst P4/Xeon",
|
||||||
|
.handle_irq = p4_pmu_handle_irq,
|
||||||
|
.disable_all = p4_pmu_disable_all,
|
||||||
|
.enable_all = p4_pmu_enable_all,
|
||||||
|
.enable = p4_pmu_enable_event,
|
||||||
|
.disable = p4_pmu_disable_event,
|
||||||
|
.eventsel = MSR_P4_BPU_CCCR0,
|
||||||
|
.perfctr = MSR_P4_BPU_PERFCTR0,
|
||||||
|
.event_map = p4_pmu_event_map,
|
||||||
|
.max_events = ARRAY_SIZE(p4_general_events),
|
||||||
|
.get_event_constraints = x86_get_event_constraints,
|
||||||
|
/*
|
||||||
|
* IF HT disabled we may need to use all
|
||||||
|
* ARCH_P4_MAX_CCCR counters simulaneously
|
||||||
|
* though leave it restricted at moment assuming
|
||||||
|
* HT is on
|
||||||
|
*/
|
||||||
|
.num_counters = ARCH_P4_MAX_CCCR,
|
||||||
|
.apic = 1,
|
||||||
|
.cntval_bits = 40,
|
||||||
|
.cntval_mask = (1ULL << 40) - 1,
|
||||||
|
.max_period = (1ULL << 39) - 1,
|
||||||
|
.hw_config = p4_hw_config,
|
||||||
|
.schedule_events = p4_pmu_schedule_events,
|
||||||
|
};
|
||||||
|
|
||||||
|
static __init int p4_pmu_init(void)
|
||||||
|
{
|
||||||
|
unsigned int low, high;
|
||||||
|
|
||||||
|
/* If we get stripped -- indexig fails */
|
||||||
|
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
|
||||||
|
|
||||||
|
rdmsr(MSR_IA32_MISC_ENABLE, low, high);
|
||||||
|
if (!(low & (1 << 7))) {
|
||||||
|
pr_cont("unsupported Netburst CPU model %d ",
|
||||||
|
boot_cpu_data.x86_model);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
|
||||||
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
||||||
|
pr_cont("Netburst events, ");
|
||||||
|
|
||||||
|
x86_pmu = p4_pmu;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
@ -27,24 +27,6 @@ static u64 p6_pmu_event_map(int hw_event)
|
||||||
*/
|
*/
|
||||||
#define P6_NOP_EVENT 0x0000002EULL
|
#define P6_NOP_EVENT 0x0000002EULL
|
||||||
|
|
||||||
static u64 p6_pmu_raw_event(u64 hw_event)
|
|
||||||
{
|
|
||||||
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
|
|
||||||
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
|
||||||
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
|
|
||||||
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
|
|
||||||
#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
|
|
||||||
|
|
||||||
#define P6_EVNTSEL_MASK \
|
|
||||||
(P6_EVNTSEL_EVENT_MASK | \
|
|
||||||
P6_EVNTSEL_UNIT_MASK | \
|
|
||||||
P6_EVNTSEL_EDGE_MASK | \
|
|
||||||
P6_EVNTSEL_INV_MASK | \
|
|
||||||
P6_EVNTSEL_REG_MASK)
|
|
||||||
|
|
||||||
return hw_event & P6_EVNTSEL_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct event_constraint p6_event_constraints[] =
|
static struct event_constraint p6_event_constraints[] =
|
||||||
{
|
{
|
||||||
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
||||||
|
@ -66,7 +48,7 @@ static void p6_pmu_disable_all(void)
|
||||||
wrmsrl(MSR_P6_EVNTSEL0, val);
|
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void p6_pmu_enable_all(void)
|
static void p6_pmu_enable_all(int added)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
|
||||||
|
@ -102,22 +84,23 @@ static void p6_pmu_enable_event(struct perf_event *event)
|
||||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
|
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __initconst struct x86_pmu p6_pmu = {
|
static __initconst const struct x86_pmu p6_pmu = {
|
||||||
.name = "p6",
|
.name = "p6",
|
||||||
.handle_irq = x86_pmu_handle_irq,
|
.handle_irq = x86_pmu_handle_irq,
|
||||||
.disable_all = p6_pmu_disable_all,
|
.disable_all = p6_pmu_disable_all,
|
||||||
.enable_all = p6_pmu_enable_all,
|
.enable_all = p6_pmu_enable_all,
|
||||||
.enable = p6_pmu_enable_event,
|
.enable = p6_pmu_enable_event,
|
||||||
.disable = p6_pmu_disable_event,
|
.disable = p6_pmu_disable_event,
|
||||||
|
.hw_config = x86_pmu_hw_config,
|
||||||
|
.schedule_events = x86_schedule_events,
|
||||||
.eventsel = MSR_P6_EVNTSEL0,
|
.eventsel = MSR_P6_EVNTSEL0,
|
||||||
.perfctr = MSR_P6_PERFCTR0,
|
.perfctr = MSR_P6_PERFCTR0,
|
||||||
.event_map = p6_pmu_event_map,
|
.event_map = p6_pmu_event_map,
|
||||||
.raw_event = p6_pmu_raw_event,
|
|
||||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
.max_period = (1ULL << 31) - 1,
|
.max_period = (1ULL << 31) - 1,
|
||||||
.version = 0,
|
.version = 0,
|
||||||
.num_events = 2,
|
.num_counters = 2,
|
||||||
/*
|
/*
|
||||||
* Events have 40 bits implemented. However they are designed such
|
* Events have 40 bits implemented. However they are designed such
|
||||||
* that bits [32-39] are sign extensions of bit 31. As such the
|
* that bits [32-39] are sign extensions of bit 31. As such the
|
||||||
|
@ -125,8 +108,8 @@ static __initconst struct x86_pmu p6_pmu = {
|
||||||
*
|
*
|
||||||
* See IA-32 Intel Architecture Software developer manual Vol 3B
|
* See IA-32 Intel Architecture Software developer manual Vol 3B
|
||||||
*/
|
*/
|
||||||
.event_bits = 32,
|
.cntval_bits = 32,
|
||||||
.event_mask = (1ULL << 32) - 1,
|
.cntval_mask = (1ULL << 32) - 1,
|
||||||
.get_event_constraints = x86_get_event_constraints,
|
.get_event_constraints = x86_get_event_constraints,
|
||||||
.event_constraints = p6_event_constraints,
|
.event_constraints = p6_event_constraints,
|
||||||
};
|
};
|
||||||
|
|
1437
arch/x86/kernel/ds.c
1437
arch/x86/kernel/ds.c
File diff suppressed because it is too large
Load Diff
|
@ -1,408 +0,0 @@
|
||||||
/*
|
|
||||||
* Debug Store support - selftest
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Copyright (C) 2009 Intel Corporation.
|
|
||||||
* Markus Metzger <markus.t.metzger@intel.com>, 2009
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "ds_selftest.h"
|
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
#include <linux/smp.h>
|
|
||||||
#include <linux/cpu.h>
|
|
||||||
|
|
||||||
#include <asm/ds.h>
|
|
||||||
|
|
||||||
|
|
||||||
#define BUFFER_SIZE 521 /* Intentionally chose an odd size. */
|
|
||||||
#define SMALL_BUFFER_SIZE 24 /* A single bts entry. */
|
|
||||||
|
|
||||||
struct ds_selftest_bts_conf {
|
|
||||||
struct bts_tracer *tracer;
|
|
||||||
int error;
|
|
||||||
int (*suspend)(struct bts_tracer *);
|
|
||||||
int (*resume)(struct bts_tracer *);
|
|
||||||
};
|
|
||||||
|
|
||||||
static int ds_selftest_bts_consistency(const struct bts_trace *trace)
|
|
||||||
{
|
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
if (!trace) {
|
|
||||||
printk(KERN_CONT "failed to access trace...");
|
|
||||||
/* Bail out. Other tests are pointless. */
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!trace->read) {
|
|
||||||
printk(KERN_CONT "bts read not available...");
|
|
||||||
error = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do some sanity checks on the trace configuration. */
|
|
||||||
if (!trace->ds.n) {
|
|
||||||
printk(KERN_CONT "empty bts buffer...");
|
|
||||||
error = -1;
|
|
||||||
}
|
|
||||||
if (!trace->ds.size) {
|
|
||||||
printk(KERN_CONT "bad bts trace setup...");
|
|
||||||
error = -1;
|
|
||||||
}
|
|
||||||
if (trace->ds.end !=
|
|
||||||
(char *)trace->ds.begin + (trace->ds.n * trace->ds.size)) {
|
|
||||||
printk(KERN_CONT "bad bts buffer setup...");
|
|
||||||
error = -1;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* We allow top in [begin; end], since its not clear when the
|
|
||||||
* overflow adjustment happens: after the increment or before the
|
|
||||||
* write.
|
|
||||||
*/
|
|
||||||
if ((trace->ds.top < trace->ds.begin) ||
|
|
||||||
(trace->ds.end < trace->ds.top)) {
|
|
||||||
printk(KERN_CONT "bts top out of bounds...");
|
|
||||||
error = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ds_selftest_bts_read(struct bts_tracer *tracer,
|
|
||||||
const struct bts_trace *trace,
|
|
||||||
const void *from, const void *to)
|
|
||||||
{
|
|
||||||
const unsigned char *at;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check a few things which do not belong to this test.
|
|
||||||
* They should be covered by other tests.
|
|
||||||
*/
|
|
||||||
if (!trace)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (!trace->read)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (to < from)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (from < trace->ds.begin)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (trace->ds.end < to)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (!trace->ds.size)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
/* Now to the test itself. */
|
|
||||||
for (at = from; (void *)at < to; at += trace->ds.size) {
|
|
||||||
struct bts_struct bts;
|
|
||||||
unsigned long index;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
if (((void *)at - trace->ds.begin) % trace->ds.size) {
|
|
||||||
printk(KERN_CONT
|
|
||||||
"read from non-integer index...");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
index = ((void *)at - trace->ds.begin) / trace->ds.size;
|
|
||||||
|
|
||||||
memset(&bts, 0, sizeof(bts));
|
|
||||||
error = trace->read(tracer, at, &bts);
|
|
||||||
if (error < 0) {
|
|
||||||
printk(KERN_CONT
|
|
||||||
"error reading bts trace at [%lu] (0x%p)...",
|
|
||||||
index, at);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (bts.qualifier) {
|
|
||||||
case BTS_BRANCH:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
printk(KERN_CONT
|
|
||||||
"unexpected bts entry %llu at [%lu] (0x%p)...",
|
|
||||||
bts.qualifier, index, at);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ds_selftest_bts_cpu(void *arg)
|
|
||||||
{
|
|
||||||
struct ds_selftest_bts_conf *conf = arg;
|
|
||||||
const struct bts_trace *trace;
|
|
||||||
void *top;
|
|
||||||
|
|
||||||
if (IS_ERR(conf->tracer)) {
|
|
||||||
conf->error = PTR_ERR(conf->tracer);
|
|
||||||
conf->tracer = NULL;
|
|
||||||
|
|
||||||
printk(KERN_CONT
|
|
||||||
"initialization failed (err: %d)...", conf->error);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We should meanwhile have enough trace. */
|
|
||||||
conf->error = conf->suspend(conf->tracer);
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Let's see if we can access the trace. */
|
|
||||||
trace = ds_read_bts(conf->tracer);
|
|
||||||
|
|
||||||
conf->error = ds_selftest_bts_consistency(trace);
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* If everything went well, we should have a few trace entries. */
|
|
||||||
if (trace->ds.top == trace->ds.begin) {
|
|
||||||
/*
|
|
||||||
* It is possible but highly unlikely that we got a
|
|
||||||
* buffer overflow and end up at exactly the same
|
|
||||||
* position we started from.
|
|
||||||
* Let's issue a warning, but continue.
|
|
||||||
*/
|
|
||||||
printk(KERN_CONT "no trace/overflow...");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Let's try to read the trace we collected. */
|
|
||||||
conf->error =
|
|
||||||
ds_selftest_bts_read(conf->tracer, trace,
|
|
||||||
trace->ds.begin, trace->ds.top);
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Let's read the trace again.
|
|
||||||
* Since we suspended tracing, we should get the same result.
|
|
||||||
*/
|
|
||||||
top = trace->ds.top;
|
|
||||||
|
|
||||||
trace = ds_read_bts(conf->tracer);
|
|
||||||
conf->error = ds_selftest_bts_consistency(trace);
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (top != trace->ds.top) {
|
|
||||||
printk(KERN_CONT "suspend not working...");
|
|
||||||
conf->error = -1;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Let's collect some more trace - see if resume is working. */
|
|
||||||
conf->error = conf->resume(conf->tracer);
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
conf->error = conf->suspend(conf->tracer);
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
trace = ds_read_bts(conf->tracer);
|
|
||||||
|
|
||||||
conf->error = ds_selftest_bts_consistency(trace);
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (trace->ds.top == top) {
|
|
||||||
/*
|
|
||||||
* It is possible but highly unlikely that we got a
|
|
||||||
* buffer overflow and end up at exactly the same
|
|
||||||
* position we started from.
|
|
||||||
* Let's issue a warning and check the full trace.
|
|
||||||
*/
|
|
||||||
printk(KERN_CONT
|
|
||||||
"no resume progress/overflow...");
|
|
||||||
|
|
||||||
conf->error =
|
|
||||||
ds_selftest_bts_read(conf->tracer, trace,
|
|
||||||
trace->ds.begin, trace->ds.end);
|
|
||||||
} else if (trace->ds.top < top) {
|
|
||||||
/*
|
|
||||||
* We had a buffer overflow - the entire buffer should
|
|
||||||
* contain trace records.
|
|
||||||
*/
|
|
||||||
conf->error =
|
|
||||||
ds_selftest_bts_read(conf->tracer, trace,
|
|
||||||
trace->ds.begin, trace->ds.end);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* It is quite likely that the buffer did not overflow.
|
|
||||||
* Let's just check the delta trace.
|
|
||||||
*/
|
|
||||||
conf->error =
|
|
||||||
ds_selftest_bts_read(conf->tracer, trace, top,
|
|
||||||
trace->ds.top);
|
|
||||||
}
|
|
||||||
if (conf->error < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
conf->error = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ds_suspend_bts_wrap(struct bts_tracer *tracer)
|
|
||||||
{
|
|
||||||
ds_suspend_bts(tracer);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ds_resume_bts_wrap(struct bts_tracer *tracer)
|
|
||||||
{
|
|
||||||
ds_resume_bts(tracer);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ds_release_bts_noirq_wrap(void *tracer)
|
|
||||||
{
|
|
||||||
(void)ds_release_bts_noirq(tracer);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ds_selftest_bts_bad_release_noirq(int cpu,
|
|
||||||
struct bts_tracer *tracer)
|
|
||||||
{
|
|
||||||
int error = -EPERM;
|
|
||||||
|
|
||||||
/* Try to release the tracer on the wrong cpu. */
|
|
||||||
get_cpu();
|
|
||||||
if (cpu != smp_processor_id()) {
|
|
||||||
error = ds_release_bts_noirq(tracer);
|
|
||||||
if (error != -EPERM)
|
|
||||||
printk(KERN_CONT "release on wrong cpu...");
|
|
||||||
}
|
|
||||||
put_cpu();
|
|
||||||
|
|
||||||
return error ? 0 : -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ds_selftest_bts_bad_request_cpu(int cpu, void *buffer)
|
|
||||||
{
|
|
||||||
struct bts_tracer *tracer;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
/* Try to request cpu tracing while task tracing is active. */
|
|
||||||
tracer = ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, NULL,
|
|
||||||
(size_t)-1, BTS_KERNEL);
|
|
||||||
error = PTR_ERR(tracer);
|
|
||||||
if (!IS_ERR(tracer)) {
|
|
||||||
ds_release_bts(tracer);
|
|
||||||
error = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (error != -EPERM)
|
|
||||||
printk(KERN_CONT "cpu/task tracing overlap...");
|
|
||||||
|
|
||||||
return error ? 0 : -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ds_selftest_bts_bad_request_task(void *buffer)
|
|
||||||
{
|
|
||||||
struct bts_tracer *tracer;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
/* Try to request cpu tracing while task tracing is active. */
|
|
||||||
tracer = ds_request_bts_task(current, buffer, BUFFER_SIZE, NULL,
|
|
||||||
(size_t)-1, BTS_KERNEL);
|
|
||||||
error = PTR_ERR(tracer);
|
|
||||||
if (!IS_ERR(tracer)) {
|
|
||||||
error = 0;
|
|
||||||
ds_release_bts(tracer);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (error != -EPERM)
|
|
||||||
printk(KERN_CONT "task/cpu tracing overlap...");
|
|
||||||
|
|
||||||
return error ? 0 : -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ds_selftest_bts(void)
|
|
||||||
{
|
|
||||||
struct ds_selftest_bts_conf conf;
|
|
||||||
unsigned char buffer[BUFFER_SIZE], *small_buffer;
|
|
||||||
unsigned long irq;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
printk(KERN_INFO "[ds] bts selftest...");
|
|
||||||
conf.error = 0;
|
|
||||||
|
|
||||||
small_buffer = (unsigned char *)ALIGN((unsigned long)buffer, 8) + 8;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
conf.suspend = ds_suspend_bts_wrap;
|
|
||||||
conf.resume = ds_resume_bts_wrap;
|
|
||||||
conf.tracer =
|
|
||||||
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
|
|
||||||
NULL, (size_t)-1, BTS_KERNEL);
|
|
||||||
ds_selftest_bts_cpu(&conf);
|
|
||||||
if (conf.error >= 0)
|
|
||||||
conf.error = ds_selftest_bts_bad_request_task(buffer);
|
|
||||||
ds_release_bts(conf.tracer);
|
|
||||||
if (conf.error < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
conf.suspend = ds_suspend_bts_noirq;
|
|
||||||
conf.resume = ds_resume_bts_noirq;
|
|
||||||
conf.tracer =
|
|
||||||
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
|
|
||||||
NULL, (size_t)-1, BTS_KERNEL);
|
|
||||||
smp_call_function_single(cpu, ds_selftest_bts_cpu, &conf, 1);
|
|
||||||
if (conf.error >= 0) {
|
|
||||||
conf.error =
|
|
||||||
ds_selftest_bts_bad_release_noirq(cpu,
|
|
||||||
conf.tracer);
|
|
||||||
/* We must not release the tracer twice. */
|
|
||||||
if (conf.error < 0)
|
|
||||||
conf.tracer = NULL;
|
|
||||||
}
|
|
||||||
if (conf.error >= 0)
|
|
||||||
conf.error = ds_selftest_bts_bad_request_task(buffer);
|
|
||||||
smp_call_function_single(cpu, ds_release_bts_noirq_wrap,
|
|
||||||
conf.tracer, 1);
|
|
||||||
if (conf.error < 0)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
conf.suspend = ds_suspend_bts_wrap;
|
|
||||||
conf.resume = ds_resume_bts_wrap;
|
|
||||||
conf.tracer =
|
|
||||||
ds_request_bts_task(current, buffer, BUFFER_SIZE,
|
|
||||||
NULL, (size_t)-1, BTS_KERNEL);
|
|
||||||
ds_selftest_bts_cpu(&conf);
|
|
||||||
if (conf.error >= 0)
|
|
||||||
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
|
|
||||||
ds_release_bts(conf.tracer);
|
|
||||||
if (conf.error < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
conf.suspend = ds_suspend_bts_noirq;
|
|
||||||
conf.resume = ds_resume_bts_noirq;
|
|
||||||
conf.tracer =
|
|
||||||
ds_request_bts_task(current, small_buffer, SMALL_BUFFER_SIZE,
|
|
||||||
NULL, (size_t)-1, BTS_KERNEL);
|
|
||||||
local_irq_save(irq);
|
|
||||||
ds_selftest_bts_cpu(&conf);
|
|
||||||
if (conf.error >= 0)
|
|
||||||
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
|
|
||||||
ds_release_bts_noirq(conf.tracer);
|
|
||||||
local_irq_restore(irq);
|
|
||||||
if (conf.error < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
conf.error = 0;
|
|
||||||
out:
|
|
||||||
put_online_cpus();
|
|
||||||
printk(KERN_CONT "%s.\n", (conf.error ? "failed" : "passed"));
|
|
||||||
|
|
||||||
return conf.error;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ds_selftest_pebs(void)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
/*
|
|
||||||
* Debug Store support - selftest
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Copyright (C) 2009 Intel Corporation.
|
|
||||||
* Markus Metzger <markus.t.metzger@intel.com>, 2009
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_DS_SELFTEST
|
|
||||||
extern int ds_selftest_bts(void);
|
|
||||||
extern int ds_selftest_pebs(void);
|
|
||||||
#else
|
|
||||||
static inline int ds_selftest_bts(void) { return 0; }
|
|
||||||
static inline int ds_selftest_pebs(void) { return 0; }
|
|
||||||
#endif
|
|
|
@ -224,11 +224,6 @@ unsigned __kprobes long oops_begin(void)
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* notify the hw-branch tracer so it may disable tracing and
|
|
||||||
add the last trace to the trace buffer -
|
|
||||||
the earlier this happens, the more useful the trace. */
|
|
||||||
trace_hw_branch_oops();
|
|
||||||
|
|
||||||
oops_enter();
|
oops_enter();
|
||||||
|
|
||||||
/* racy, but better than risking deadlock. */
|
/* racy, but better than risking deadlock. */
|
||||||
|
|
|
@ -422,14 +422,22 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||||
|
|
||||||
static void __kprobes clear_btf(void)
|
static void __kprobes clear_btf(void)
|
||||||
{
|
{
|
||||||
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
if (test_thread_flag(TIF_BLOCKSTEP)) {
|
||||||
update_debugctlmsr(0);
|
unsigned long debugctl = get_debugctlmsr();
|
||||||
|
|
||||||
|
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||||
|
update_debugctlmsr(debugctl);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __kprobes restore_btf(void)
|
static void __kprobes restore_btf(void)
|
||||||
{
|
{
|
||||||
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
if (test_thread_flag(TIF_BLOCKSTEP)) {
|
||||||
update_debugctlmsr(current->thread.debugctlmsr);
|
unsigned long debugctl = get_debugctlmsr();
|
||||||
|
|
||||||
|
debugctl |= DEBUGCTLMSR_BTF;
|
||||||
|
update_debugctlmsr(debugctl);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/i387.h>
|
#include <asm/i387.h>
|
||||||
#include <asm/ds.h>
|
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
|
|
||||||
unsigned long idle_halt;
|
unsigned long idle_halt;
|
||||||
|
@ -50,8 +49,6 @@ void free_thread_xstate(struct task_struct *tsk)
|
||||||
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
|
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
|
||||||
tsk->thread.xstate = NULL;
|
tsk->thread.xstate = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN(tsk->thread.ds_ctx, "leaking DS context\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_thread_info(struct thread_info *ti)
|
void free_thread_info(struct thread_info *ti)
|
||||||
|
@ -198,11 +195,16 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||||
prev = &prev_p->thread;
|
prev = &prev_p->thread;
|
||||||
next = &next_p->thread;
|
next = &next_p->thread;
|
||||||
|
|
||||||
if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
|
if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
|
||||||
test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
|
test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
|
||||||
ds_switch_to(prev_p, next_p);
|
unsigned long debugctl = get_debugctlmsr();
|
||||||
else if (next->debugctlmsr != prev->debugctlmsr)
|
|
||||||
update_debugctlmsr(next->debugctlmsr);
|
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||||
|
if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
|
||||||
|
debugctl |= DEBUGCTLMSR_BTF;
|
||||||
|
|
||||||
|
update_debugctlmsr(debugctl);
|
||||||
|
}
|
||||||
|
|
||||||
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
||||||
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
||||||
|
|
|
@ -55,7 +55,6 @@
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/syscalls.h>
|
#include <asm/syscalls.h>
|
||||||
#include <asm/ds.h>
|
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
|
|
||||||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||||
|
@ -238,13 +237,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||||
kfree(p->thread.io_bitmap_ptr);
|
kfree(p->thread.io_bitmap_ptr);
|
||||||
p->thread.io_bitmap_max = 0;
|
p->thread.io_bitmap_max = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
|
|
||||||
p->thread.ds_ctx = NULL;
|
|
||||||
|
|
||||||
clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
|
|
||||||
p->thread.debugctlmsr = 0;
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,6 @@
|
||||||
#include <asm/ia32.h>
|
#include <asm/ia32.h>
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/syscalls.h>
|
#include <asm/syscalls.h>
|
||||||
#include <asm/ds.h>
|
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
|
|
||||||
asmlinkage extern void ret_from_fork(void);
|
asmlinkage extern void ret_from_fork(void);
|
||||||
|
@ -313,13 +312,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
|
|
||||||
p->thread.ds_ctx = NULL;
|
|
||||||
|
|
||||||
clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
|
|
||||||
p->thread.debugctlmsr = 0;
|
|
||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
out:
|
out:
|
||||||
if (err && p->thread.io_bitmap_ptr) {
|
if (err && p->thread.io_bitmap_ptr) {
|
||||||
|
|
|
@ -2,9 +2,6 @@
|
||||||
/*
|
/*
|
||||||
* Pentium III FXSR, SSE support
|
* Pentium III FXSR, SSE support
|
||||||
* Gareth Hughes <gareth@valinux.com>, May 2000
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
||||||
*
|
|
||||||
* BTS tracing
|
|
||||||
* Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -22,7 +19,6 @@
|
||||||
#include <linux/audit.h>
|
#include <linux/audit.h>
|
||||||
#include <linux/seccomp.h>
|
#include <linux/seccomp.h>
|
||||||
#include <linux/signal.h>
|
#include <linux/signal.h>
|
||||||
#include <linux/workqueue.h>
|
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
#include <linux/hw_breakpoint.h>
|
#include <linux/hw_breakpoint.h>
|
||||||
|
|
||||||
|
@ -36,7 +32,6 @@
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/prctl.h>
|
#include <asm/prctl.h>
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
#include <asm/ds.h>
|
|
||||||
#include <asm/hw_breakpoint.h>
|
#include <asm/hw_breakpoint.h>
|
||||||
|
|
||||||
#include "tls.h"
|
#include "tls.h"
|
||||||
|
@ -789,342 +784,6 @@ static int ioperm_get(struct task_struct *target,
|
||||||
0, IO_BITMAP_BYTES);
|
0, IO_BITMAP_BYTES);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PTRACE_BTS
|
|
||||||
/*
|
|
||||||
* A branch trace store context.
|
|
||||||
*
|
|
||||||
* Contexts may only be installed by ptrace_bts_config() and only for
|
|
||||||
* ptraced tasks.
|
|
||||||
*
|
|
||||||
* Contexts are destroyed when the tracee is detached from the tracer.
|
|
||||||
* The actual destruction work requires interrupts enabled, so the
|
|
||||||
* work is deferred and will be scheduled during __ptrace_unlink().
|
|
||||||
*
|
|
||||||
* Contexts hold an additional task_struct reference on the traced
|
|
||||||
* task, as well as a reference on the tracer's mm.
|
|
||||||
*
|
|
||||||
* Ptrace already holds a task_struct for the duration of ptrace operations,
|
|
||||||
* but since destruction is deferred, it may be executed after both
|
|
||||||
* tracer and tracee exited.
|
|
||||||
*/
|
|
||||||
struct bts_context {
|
|
||||||
/* The branch trace handle. */
|
|
||||||
struct bts_tracer *tracer;
|
|
||||||
|
|
||||||
/* The buffer used to store the branch trace and its size. */
|
|
||||||
void *buffer;
|
|
||||||
unsigned int size;
|
|
||||||
|
|
||||||
/* The mm that paid for the above buffer. */
|
|
||||||
struct mm_struct *mm;
|
|
||||||
|
|
||||||
/* The task this context belongs to. */
|
|
||||||
struct task_struct *task;
|
|
||||||
|
|
||||||
/* The signal to send on a bts buffer overflow. */
|
|
||||||
unsigned int bts_ovfl_signal;
|
|
||||||
|
|
||||||
/* The work struct to destroy a context. */
|
|
||||||
struct work_struct work;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
|
|
||||||
{
|
|
||||||
void *buffer = NULL;
|
|
||||||
int err = -ENOMEM;
|
|
||||||
|
|
||||||
err = account_locked_memory(current->mm, current->signal->rlim, size);
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
buffer = kzalloc(size, GFP_KERNEL);
|
|
||||||
if (!buffer)
|
|
||||||
goto out_refund;
|
|
||||||
|
|
||||||
context->buffer = buffer;
|
|
||||||
context->size = size;
|
|
||||||
context->mm = get_task_mm(current);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out_refund:
|
|
||||||
refund_locked_memory(current->mm, size);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void free_bts_buffer(struct bts_context *context)
|
|
||||||
{
|
|
||||||
if (!context->buffer)
|
|
||||||
return;
|
|
||||||
|
|
||||||
kfree(context->buffer);
|
|
||||||
context->buffer = NULL;
|
|
||||||
|
|
||||||
refund_locked_memory(context->mm, context->size);
|
|
||||||
context->size = 0;
|
|
||||||
|
|
||||||
mmput(context->mm);
|
|
||||||
context->mm = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void free_bts_context_work(struct work_struct *w)
|
|
||||||
{
|
|
||||||
struct bts_context *context;
|
|
||||||
|
|
||||||
context = container_of(w, struct bts_context, work);
|
|
||||||
|
|
||||||
ds_release_bts(context->tracer);
|
|
||||||
put_task_struct(context->task);
|
|
||||||
free_bts_buffer(context);
|
|
||||||
kfree(context);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void free_bts_context(struct bts_context *context)
|
|
||||||
{
|
|
||||||
INIT_WORK(&context->work, free_bts_context_work);
|
|
||||||
schedule_work(&context->work);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct bts_context *alloc_bts_context(struct task_struct *task)
|
|
||||||
{
|
|
||||||
struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
|
|
||||||
if (context) {
|
|
||||||
context->task = task;
|
|
||||||
task->bts = context;
|
|
||||||
|
|
||||||
get_task_struct(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
return context;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ptrace_bts_read_record(struct task_struct *child, size_t index,
|
|
||||||
struct bts_struct __user *out)
|
|
||||||
{
|
|
||||||
struct bts_context *context;
|
|
||||||
const struct bts_trace *trace;
|
|
||||||
struct bts_struct bts;
|
|
||||||
const unsigned char *at;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
context = child->bts;
|
|
||||||
if (!context)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
trace = ds_read_bts(context->tracer);
|
|
||||||
if (!trace)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
at = trace->ds.top - ((index + 1) * trace->ds.size);
|
|
||||||
if ((void *)at < trace->ds.begin)
|
|
||||||
at += (trace->ds.n * trace->ds.size);
|
|
||||||
|
|
||||||
if (!trace->read)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
error = trace->read(context->tracer, at, &bts);
|
|
||||||
if (error < 0)
|
|
||||||
return error;
|
|
||||||
|
|
||||||
if (copy_to_user(out, &bts, sizeof(bts)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
return sizeof(bts);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ptrace_bts_drain(struct task_struct *child,
|
|
||||||
long size,
|
|
||||||
struct bts_struct __user *out)
|
|
||||||
{
|
|
||||||
struct bts_context *context;
|
|
||||||
const struct bts_trace *trace;
|
|
||||||
const unsigned char *at;
|
|
||||||
int error, drained = 0;
|
|
||||||
|
|
||||||
context = child->bts;
|
|
||||||
if (!context)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
trace = ds_read_bts(context->tracer);
|
|
||||||
if (!trace)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
if (!trace->read)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
if (size < (trace->ds.top - trace->ds.begin))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
for (at = trace->ds.begin; (void *)at < trace->ds.top;
|
|
||||||
out++, drained++, at += trace->ds.size) {
|
|
||||||
struct bts_struct bts;
|
|
||||||
|
|
||||||
error = trace->read(context->tracer, at, &bts);
|
|
||||||
if (error < 0)
|
|
||||||
return error;
|
|
||||||
|
|
||||||
if (copy_to_user(out, &bts, sizeof(bts)))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
|
|
||||||
|
|
||||||
error = ds_reset_bts(context->tracer);
|
|
||||||
if (error < 0)
|
|
||||||
return error;
|
|
||||||
|
|
||||||
return drained;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ptrace_bts_config(struct task_struct *child,
|
|
||||||
long cfg_size,
|
|
||||||
const struct ptrace_bts_config __user *ucfg)
|
|
||||||
{
|
|
||||||
struct bts_context *context;
|
|
||||||
struct ptrace_bts_config cfg;
|
|
||||||
unsigned int flags = 0;
|
|
||||||
|
|
||||||
if (cfg_size < sizeof(cfg))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
context = child->bts;
|
|
||||||
if (!context)
|
|
||||||
context = alloc_bts_context(child);
|
|
||||||
if (!context)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
|
|
||||||
if (!cfg.signal)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
context->bts_ovfl_signal = cfg.signal;
|
|
||||||
}
|
|
||||||
|
|
||||||
ds_release_bts(context->tracer);
|
|
||||||
context->tracer = NULL;
|
|
||||||
|
|
||||||
if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
|
|
||||||
int err;
|
|
||||||
|
|
||||||
free_bts_buffer(context);
|
|
||||||
if (!cfg.size)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err = alloc_bts_buffer(context, cfg.size);
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cfg.flags & PTRACE_BTS_O_TRACE)
|
|
||||||
flags |= BTS_USER;
|
|
||||||
|
|
||||||
if (cfg.flags & PTRACE_BTS_O_SCHED)
|
|
||||||
flags |= BTS_TIMESTAMPS;
|
|
||||||
|
|
||||||
context->tracer =
|
|
||||||
ds_request_bts_task(child, context->buffer, context->size,
|
|
||||||
NULL, (size_t)-1, flags);
|
|
||||||
if (unlikely(IS_ERR(context->tracer))) {
|
|
||||||
int error = PTR_ERR(context->tracer);
|
|
||||||
|
|
||||||
free_bts_buffer(context);
|
|
||||||
context->tracer = NULL;
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
return sizeof(cfg);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ptrace_bts_status(struct task_struct *child,
|
|
||||||
long cfg_size,
|
|
||||||
struct ptrace_bts_config __user *ucfg)
|
|
||||||
{
|
|
||||||
struct bts_context *context;
|
|
||||||
const struct bts_trace *trace;
|
|
||||||
struct ptrace_bts_config cfg;
|
|
||||||
|
|
||||||
context = child->bts;
|
|
||||||
if (!context)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
if (cfg_size < sizeof(cfg))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
trace = ds_read_bts(context->tracer);
|
|
||||||
if (!trace)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
memset(&cfg, 0, sizeof(cfg));
|
|
||||||
cfg.size = trace->ds.end - trace->ds.begin;
|
|
||||||
cfg.signal = context->bts_ovfl_signal;
|
|
||||||
cfg.bts_size = sizeof(struct bts_struct);
|
|
||||||
|
|
||||||
if (cfg.signal)
|
|
||||||
cfg.flags |= PTRACE_BTS_O_SIGNAL;
|
|
||||||
|
|
||||||
if (trace->ds.flags & BTS_USER)
|
|
||||||
cfg.flags |= PTRACE_BTS_O_TRACE;
|
|
||||||
|
|
||||||
if (trace->ds.flags & BTS_TIMESTAMPS)
|
|
||||||
cfg.flags |= PTRACE_BTS_O_SCHED;
|
|
||||||
|
|
||||||
if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
return sizeof(cfg);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ptrace_bts_clear(struct task_struct *child)
|
|
||||||
{
|
|
||||||
struct bts_context *context;
|
|
||||||
const struct bts_trace *trace;
|
|
||||||
|
|
||||||
context = child->bts;
|
|
||||||
if (!context)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
trace = ds_read_bts(context->tracer);
|
|
||||||
if (!trace)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
|
|
||||||
|
|
||||||
return ds_reset_bts(context->tracer);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ptrace_bts_size(struct task_struct *child)
|
|
||||||
{
|
|
||||||
struct bts_context *context;
|
|
||||||
const struct bts_trace *trace;
|
|
||||||
|
|
||||||
context = child->bts;
|
|
||||||
if (!context)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
trace = ds_read_bts(context->tracer);
|
|
||||||
if (!trace)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
return (trace->ds.top - trace->ds.begin) / trace->ds.size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Called from __ptrace_unlink() after the child has been moved back
|
|
||||||
* to its original parent.
|
|
||||||
*/
|
|
||||||
void ptrace_bts_untrace(struct task_struct *child)
|
|
||||||
{
|
|
||||||
if (unlikely(child->bts)) {
|
|
||||||
free_bts_context(child->bts);
|
|
||||||
child->bts = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called by kernel/ptrace.c when detaching..
|
* Called by kernel/ptrace.c when detaching..
|
||||||
*
|
*
|
||||||
|
@ -1252,39 +911,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* These bits need more cooking - not enabled yet:
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_PTRACE_BTS
|
|
||||||
case PTRACE_BTS_CONFIG:
|
|
||||||
ret = ptrace_bts_config
|
|
||||||
(child, data, (struct ptrace_bts_config __user *)addr);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PTRACE_BTS_STATUS:
|
|
||||||
ret = ptrace_bts_status
|
|
||||||
(child, data, (struct ptrace_bts_config __user *)addr);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PTRACE_BTS_SIZE:
|
|
||||||
ret = ptrace_bts_size(child);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PTRACE_BTS_GET:
|
|
||||||
ret = ptrace_bts_read_record
|
|
||||||
(child, data, (struct bts_struct __user *) addr);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PTRACE_BTS_CLEAR:
|
|
||||||
ret = ptrace_bts_clear(child);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PTRACE_BTS_DRAIN:
|
|
||||||
ret = ptrace_bts_drain
|
|
||||||
(child, data, (struct bts_struct __user *) addr);
|
|
||||||
break;
|
|
||||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
ret = ptrace_request(child, request, addr, data);
|
ret = ptrace_request(child, request, addr, data);
|
||||||
break;
|
break;
|
||||||
|
@ -1544,14 +1170,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||||
|
|
||||||
case PTRACE_GET_THREAD_AREA:
|
case PTRACE_GET_THREAD_AREA:
|
||||||
case PTRACE_SET_THREAD_AREA:
|
case PTRACE_SET_THREAD_AREA:
|
||||||
#ifdef CONFIG_X86_PTRACE_BTS
|
|
||||||
case PTRACE_BTS_CONFIG:
|
|
||||||
case PTRACE_BTS_STATUS:
|
|
||||||
case PTRACE_BTS_SIZE:
|
|
||||||
case PTRACE_BTS_GET:
|
|
||||||
case PTRACE_BTS_CLEAR:
|
|
||||||
case PTRACE_BTS_DRAIN:
|
|
||||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
|
||||||
return arch_ptrace(child, request, addr, data);
|
return arch_ptrace(child, request, addr, data);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -157,22 +157,6 @@ static int enable_single_step(struct task_struct *child)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Install this value in MSR_IA32_DEBUGCTLMSR whenever child is running.
|
|
||||||
*/
|
|
||||||
static void write_debugctlmsr(struct task_struct *child, unsigned long val)
|
|
||||||
{
|
|
||||||
if (child->thread.debugctlmsr == val)
|
|
||||||
return;
|
|
||||||
|
|
||||||
child->thread.debugctlmsr = val;
|
|
||||||
|
|
||||||
if (child != current)
|
|
||||||
return;
|
|
||||||
|
|
||||||
update_debugctlmsr(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable single or block step.
|
* Enable single or block step.
|
||||||
*/
|
*/
|
||||||
|
@ -186,15 +170,17 @@ static void enable_step(struct task_struct *child, bool block)
|
||||||
* that uses user-mode single stepping itself.
|
* that uses user-mode single stepping itself.
|
||||||
*/
|
*/
|
||||||
if (enable_single_step(child) && block) {
|
if (enable_single_step(child) && block) {
|
||||||
set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
|
unsigned long debugctl = get_debugctlmsr();
|
||||||
write_debugctlmsr(child,
|
|
||||||
child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
|
|
||||||
} else {
|
|
||||||
write_debugctlmsr(child,
|
|
||||||
child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
|
|
||||||
|
|
||||||
if (!child->thread.debugctlmsr)
|
debugctl |= DEBUGCTLMSR_BTF;
|
||||||
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
|
update_debugctlmsr(debugctl);
|
||||||
|
set_tsk_thread_flag(child, TIF_BLOCKSTEP);
|
||||||
|
} else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
|
||||||
|
unsigned long debugctl = get_debugctlmsr();
|
||||||
|
|
||||||
|
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||||
|
update_debugctlmsr(debugctl);
|
||||||
|
clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,11 +199,13 @@ void user_disable_single_step(struct task_struct *child)
|
||||||
/*
|
/*
|
||||||
* Make sure block stepping (BTF) is disabled.
|
* Make sure block stepping (BTF) is disabled.
|
||||||
*/
|
*/
|
||||||
write_debugctlmsr(child,
|
if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
|
||||||
child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
|
unsigned long debugctl = get_debugctlmsr();
|
||||||
|
|
||||||
if (!child->thread.debugctlmsr)
|
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||||
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
|
update_debugctlmsr(debugctl);
|
||||||
|
clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
|
||||||
|
}
|
||||||
|
|
||||||
/* Always clear TIF_SINGLESTEP... */
|
/* Always clear TIF_SINGLESTEP... */
|
||||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
|
|
|
@ -543,11 +543,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
||||||
|
|
||||||
/* DR6 may or may not be cleared by the CPU */
|
/* DR6 may or may not be cleared by the CPU */
|
||||||
set_debugreg(0, 6);
|
set_debugreg(0, 6);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The processor cleared BTF, so don't mark that we need it set.
|
* The processor cleared BTF, so don't mark that we need it set.
|
||||||
*/
|
*/
|
||||||
clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
|
clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
|
||||||
tsk->thread.debugctlmsr = 0;
|
|
||||||
|
|
||||||
/* Store the virtualized DR6 value */
|
/* Store the virtualized DR6 value */
|
||||||
tsk->thread.debugreg6 = dr6;
|
tsk->thread.debugreg6 = dr6;
|
||||||
|
|
|
@ -3652,8 +3652,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
|
||||||
|
|
||||||
/* We need to handle NMIs before interrupts are enabled */
|
/* We need to handle NMIs before interrupts are enabled */
|
||||||
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
|
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
|
||||||
(exit_intr_info & INTR_INFO_VALID_MASK))
|
(exit_intr_info & INTR_INFO_VALID_MASK)) {
|
||||||
|
kvm_before_handle_nmi(&vmx->vcpu);
|
||||||
asm("int $2");
|
asm("int $2");
|
||||||
|
kvm_after_handle_nmi(&vmx->vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
#include <linux/user-return-notifier.h>
|
#include <linux/user-return-notifier.h>
|
||||||
#include <linux/srcu.h>
|
#include <linux/srcu.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/perf_event.h>
|
||||||
#include <trace/events/kvm.h>
|
#include <trace/events/kvm.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
|
@ -3955,6 +3956,47 @@ static void kvm_timer_init(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
|
||||||
|
|
||||||
|
static int kvm_is_in_guest(void)
|
||||||
|
{
|
||||||
|
return percpu_read(current_vcpu) != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kvm_is_user_mode(void)
|
||||||
|
{
|
||||||
|
int user_mode = 3;
|
||||||
|
if (percpu_read(current_vcpu))
|
||||||
|
user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
|
||||||
|
return user_mode != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned long kvm_get_guest_ip(void)
|
||||||
|
{
|
||||||
|
unsigned long ip = 0;
|
||||||
|
if (percpu_read(current_vcpu))
|
||||||
|
ip = kvm_rip_read(percpu_read(current_vcpu));
|
||||||
|
return ip;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
||||||
|
.is_in_guest = kvm_is_in_guest,
|
||||||
|
.is_user_mode = kvm_is_user_mode,
|
||||||
|
.get_guest_ip = kvm_get_guest_ip,
|
||||||
|
};
|
||||||
|
|
||||||
|
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
percpu_write(current_vcpu, vcpu);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
|
||||||
|
|
||||||
|
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
percpu_write(current_vcpu, NULL);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
|
||||||
|
|
||||||
int kvm_arch_init(void *opaque)
|
int kvm_arch_init(void *opaque)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
@ -3991,6 +4033,8 @@ int kvm_arch_init(void *opaque)
|
||||||
|
|
||||||
kvm_timer_init();
|
kvm_timer_init();
|
||||||
|
|
||||||
|
perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -3999,6 +4043,8 @@ out:
|
||||||
|
|
||||||
void kvm_arch_exit(void)
|
void kvm_arch_exit(void)
|
||||||
{
|
{
|
||||||
|
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||||
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
|
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
CPUFREQ_TRANSITION_NOTIFIER);
|
||||||
|
|
|
@ -65,4 +65,7 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
|
||||||
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
|
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
|
||||||
|
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -20,7 +20,7 @@ lib-y := delay.o
|
||||||
lib-y += thunk_$(BITS).o
|
lib-y += thunk_$(BITS).o
|
||||||
lib-y += usercopy_$(BITS).o getuser.o putuser.o
|
lib-y += usercopy_$(BITS).o getuser.o putuser.o
|
||||||
lib-y += memcpy_$(BITS).o
|
lib-y += memcpy_$(BITS).o
|
||||||
lib-$(CONFIG_KPROBES) += insn.o inat.o
|
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
|
||||||
|
|
||||||
obj-y += msr.o msr-reg.o msr-reg-export.o
|
obj-y += msr.o msr-reg.o msr-reg-export.o
|
||||||
|
|
||||||
|
|
|
@ -239,11 +239,11 @@ static void arch_perfmon_setup_counters(void)
|
||||||
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
|
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
|
||||||
current_cpu_data.x86_model == 15) {
|
current_cpu_data.x86_model == 15) {
|
||||||
eax.split.version_id = 2;
|
eax.split.version_id = 2;
|
||||||
eax.split.num_events = 2;
|
eax.split.num_counters = 2;
|
||||||
eax.split.bit_width = 40;
|
eax.split.bit_width = 40;
|
||||||
}
|
}
|
||||||
|
|
||||||
num_counters = eax.split.num_events;
|
num_counters = eax.split.num_counters;
|
||||||
|
|
||||||
op_arch_perfmon_spec.num_counters = num_counters;
|
op_arch_perfmon_spec.num_counters = num_counters;
|
||||||
op_arch_perfmon_spec.num_controls = num_counters;
|
op_arch_perfmon_spec.num_controls = num_counters;
|
||||||
|
|
|
@ -504,18 +504,6 @@ extern int ftrace_dump_on_oops;
|
||||||
#define INIT_TRACE_RECURSION
|
#define INIT_TRACE_RECURSION
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HW_BRANCH_TRACER
|
|
||||||
|
|
||||||
void trace_hw_branch(u64 from, u64 to);
|
|
||||||
void trace_hw_branch_oops(void);
|
|
||||||
|
|
||||||
#else /* CONFIG_HW_BRANCH_TRACER */
|
|
||||||
|
|
||||||
static inline void trace_hw_branch(u64 from, u64 to) {}
|
|
||||||
static inline void trace_hw_branch_oops(void) {}
|
|
||||||
|
|
||||||
#endif /* CONFIG_HW_BRANCH_TRACER */
|
|
||||||
|
|
||||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||||
|
|
||||||
unsigned long arch_syscall_addr(int nr);
|
unsigned long arch_syscall_addr(int nr);
|
||||||
|
|
|
@ -19,7 +19,6 @@ struct anon_vma;
|
||||||
struct file_ra_state;
|
struct file_ra_state;
|
||||||
struct user_struct;
|
struct user_struct;
|
||||||
struct writeback_control;
|
struct writeback_control;
|
||||||
struct rlimit;
|
|
||||||
|
|
||||||
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
|
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
|
||||||
extern unsigned long max_mapnr;
|
extern unsigned long max_mapnr;
|
||||||
|
@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page,
|
||||||
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
|
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
|
||||||
void vmemmap_populate_print_last(void);
|
void vmemmap_populate_print_last(void);
|
||||||
|
|
||||||
extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
|
|
||||||
size_t size);
|
|
||||||
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
|
|
||||||
|
|
||||||
enum mf_flags {
|
enum mf_flags {
|
||||||
MF_COUNT_INCREASED = 1 << 0,
|
MF_COUNT_INCREASED = 1 << 0,
|
||||||
|
|
|
@ -203,8 +203,9 @@ struct perf_event_attr {
|
||||||
enable_on_exec : 1, /* next exec enables */
|
enable_on_exec : 1, /* next exec enables */
|
||||||
task : 1, /* trace fork/exit */
|
task : 1, /* trace fork/exit */
|
||||||
watermark : 1, /* wakeup_watermark */
|
watermark : 1, /* wakeup_watermark */
|
||||||
|
precise : 1, /* OoO invariant counter */
|
||||||
|
|
||||||
__reserved_1 : 49;
|
__reserved_1 : 48;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
__u32 wakeup_events; /* wakeup every n events */
|
__u32 wakeup_events; /* wakeup every n events */
|
||||||
|
@ -287,11 +288,19 @@ struct perf_event_mmap_page {
|
||||||
__u64 data_tail; /* user-space written tail */
|
__u64 data_tail; /* user-space written tail */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
|
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
|
||||||
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
||||||
#define PERF_RECORD_MISC_KERNEL (1 << 0)
|
#define PERF_RECORD_MISC_KERNEL (1 << 0)
|
||||||
#define PERF_RECORD_MISC_USER (2 << 0)
|
#define PERF_RECORD_MISC_USER (2 << 0)
|
||||||
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
|
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
|
||||||
|
#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
|
||||||
|
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
|
||||||
|
|
||||||
|
#define PERF_RECORD_MISC_EXACT (1 << 14)
|
||||||
|
/*
|
||||||
|
* Reserve the last bit to indicate some extended misc field
|
||||||
|
*/
|
||||||
|
#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
|
||||||
|
|
||||||
struct perf_event_header {
|
struct perf_event_header {
|
||||||
__u32 type;
|
__u32 type;
|
||||||
|
@ -439,6 +448,12 @@ enum perf_callchain_context {
|
||||||
# include <asm/perf_event.h>
|
# include <asm/perf_event.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
struct perf_guest_info_callbacks {
|
||||||
|
int (*is_in_guest) (void);
|
||||||
|
int (*is_user_mode) (void);
|
||||||
|
unsigned long (*get_guest_ip) (void);
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||||
#include <asm/hw_breakpoint.h>
|
#include <asm/hw_breakpoint.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -468,6 +483,17 @@ struct perf_raw_record {
|
||||||
void *data;
|
void *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct perf_branch_entry {
|
||||||
|
__u64 from;
|
||||||
|
__u64 to;
|
||||||
|
__u64 flags;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct perf_branch_stack {
|
||||||
|
__u64 nr;
|
||||||
|
struct perf_branch_entry entries[0];
|
||||||
|
};
|
||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -571,6 +597,14 @@ enum perf_group_flag {
|
||||||
PERF_GROUP_SOFTWARE = 0x1,
|
PERF_GROUP_SOFTWARE = 0x1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define SWEVENT_HLIST_BITS 8
|
||||||
|
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
|
||||||
|
|
||||||
|
struct swevent_hlist {
|
||||||
|
struct hlist_head heads[SWEVENT_HLIST_SIZE];
|
||||||
|
struct rcu_head rcu_head;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct perf_event - performance event kernel representation:
|
* struct perf_event - performance event kernel representation:
|
||||||
*/
|
*/
|
||||||
|
@ -579,6 +613,7 @@ struct perf_event {
|
||||||
struct list_head group_entry;
|
struct list_head group_entry;
|
||||||
struct list_head event_entry;
|
struct list_head event_entry;
|
||||||
struct list_head sibling_list;
|
struct list_head sibling_list;
|
||||||
|
struct hlist_node hlist_entry;
|
||||||
int nr_siblings;
|
int nr_siblings;
|
||||||
int group_flags;
|
int group_flags;
|
||||||
struct perf_event *group_leader;
|
struct perf_event *group_leader;
|
||||||
|
@ -726,6 +761,9 @@ struct perf_cpu_context {
|
||||||
int active_oncpu;
|
int active_oncpu;
|
||||||
int max_pertask;
|
int max_pertask;
|
||||||
int exclusive;
|
int exclusive;
|
||||||
|
struct swevent_hlist *swevent_hlist;
|
||||||
|
struct mutex hlist_mutex;
|
||||||
|
int hlist_refcount;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Recursion avoidance:
|
* Recursion avoidance:
|
||||||
|
@ -902,6 +940,12 @@ static inline void perf_event_mmap(struct vm_area_struct *vma)
|
||||||
__perf_event_mmap(vma);
|
__perf_event_mmap(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern struct perf_guest_info_callbacks *perf_guest_cbs;
|
||||||
|
extern int perf_register_guest_info_callbacks(
|
||||||
|
struct perf_guest_info_callbacks *);
|
||||||
|
extern int perf_unregister_guest_info_callbacks(
|
||||||
|
struct perf_guest_info_callbacks *);
|
||||||
|
|
||||||
extern void perf_event_comm(struct task_struct *tsk);
|
extern void perf_event_comm(struct task_struct *tsk);
|
||||||
extern void perf_event_fork(struct task_struct *tsk);
|
extern void perf_event_fork(struct task_struct *tsk);
|
||||||
|
|
||||||
|
@ -971,6 +1015,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi,
|
||||||
static inline void
|
static inline void
|
||||||
perf_bp_event(struct perf_event *event, void *data) { }
|
perf_bp_event(struct perf_event *event, void *data) { }
|
||||||
|
|
||||||
|
static inline int perf_register_guest_info_callbacks
|
||||||
|
(struct perf_guest_info_callbacks *) {return 0; }
|
||||||
|
static inline int perf_unregister_guest_info_callbacks
|
||||||
|
(struct perf_guest_info_callbacks *) {return 0; }
|
||||||
|
|
||||||
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
|
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
|
||||||
static inline void perf_event_comm(struct task_struct *tsk) { }
|
static inline void perf_event_comm(struct task_struct *tsk) { }
|
||||||
static inline void perf_event_fork(struct task_struct *tsk) { }
|
static inline void perf_event_fork(struct task_struct *tsk) { }
|
||||||
|
|
|
@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
|
||||||
#define arch_ptrace_stop(code, info) do { } while (0)
|
#define arch_ptrace_stop(code, info) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef arch_ptrace_untrace
|
|
||||||
/*
|
|
||||||
* Do machine-specific work before untracing child.
|
|
||||||
*
|
|
||||||
* This is called for a normal detach as well as from ptrace_exit()
|
|
||||||
* when the tracing task dies.
|
|
||||||
*
|
|
||||||
* Called with write_lock(&tasklist_lock) held.
|
|
||||||
*/
|
|
||||||
#define arch_ptrace_untrace(task) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern int task_current_syscall(struct task_struct *target, long *callno,
|
extern int task_current_syscall(struct task_struct *target, long *callno,
|
||||||
unsigned long args[6], unsigned int maxargs,
|
unsigned long args[6], unsigned int maxargs,
|
||||||
unsigned long *sp, unsigned long *pc);
|
unsigned long *sp, unsigned long *pc);
|
||||||
|
|
|
@ -99,7 +99,6 @@ struct futex_pi_state;
|
||||||
struct robust_list_head;
|
struct robust_list_head;
|
||||||
struct bio_list;
|
struct bio_list;
|
||||||
struct fs_struct;
|
struct fs_struct;
|
||||||
struct bts_context;
|
|
||||||
struct perf_event_context;
|
struct perf_event_context;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1272,12 +1271,6 @@ struct task_struct {
|
||||||
struct list_head ptraced;
|
struct list_head ptraced;
|
||||||
struct list_head ptrace_entry;
|
struct list_head ptrace_entry;
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the tracer handle for the ptrace BTS extension.
|
|
||||||
* This field actually belongs to the ptracer task.
|
|
||||||
*/
|
|
||||||
struct bts_context *bts;
|
|
||||||
|
|
||||||
/* PID/PID hash table linkage. */
|
/* PID/PID hash table linkage. */
|
||||||
struct pid_link pids[PIDTYPE_MAX];
|
struct pid_link pids[PIDTYPE_MAX];
|
||||||
struct list_head thread_group;
|
struct list_head thread_group;
|
||||||
|
@ -2123,10 +2116,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
|
||||||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
extern void wait_task_context_switch(struct task_struct *p);
|
|
||||||
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
|
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
|
||||||
#else
|
#else
|
||||||
static inline void wait_task_context_switch(struct task_struct *p) {}
|
|
||||||
static inline unsigned long wait_task_inactive(struct task_struct *p,
|
static inline unsigned long wait_task_inactive(struct task_struct *p,
|
||||||
long match_state)
|
long match_state)
|
||||||
{
|
{
|
||||||
|
|
|
@ -758,13 +758,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
|
||||||
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
||||||
static notrace void \
|
static notrace void \
|
||||||
perf_trace_templ_##call(struct ftrace_event_call *event_call, \
|
perf_trace_templ_##call(struct ftrace_event_call *event_call, \
|
||||||
proto) \
|
struct pt_regs *__regs, proto) \
|
||||||
{ \
|
{ \
|
||||||
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
||||||
struct ftrace_raw_##call *entry; \
|
struct ftrace_raw_##call *entry; \
|
||||||
u64 __addr = 0, __count = 1; \
|
u64 __addr = 0, __count = 1; \
|
||||||
unsigned long irq_flags; \
|
unsigned long irq_flags; \
|
||||||
struct pt_regs *__regs; \
|
|
||||||
int __entry_size; \
|
int __entry_size; \
|
||||||
int __data_size; \
|
int __data_size; \
|
||||||
int rctx; \
|
int rctx; \
|
||||||
|
@ -785,20 +784,22 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
|
||||||
\
|
\
|
||||||
{ assign; } \
|
{ assign; } \
|
||||||
\
|
\
|
||||||
__regs = &__get_cpu_var(perf_trace_regs); \
|
|
||||||
perf_fetch_caller_regs(__regs, 2); \
|
|
||||||
\
|
|
||||||
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
|
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
|
||||||
__count, irq_flags, __regs); \
|
__count, irq_flags, __regs); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef DEFINE_EVENT
|
#undef DEFINE_EVENT
|
||||||
#define DEFINE_EVENT(template, call, proto, args) \
|
#define DEFINE_EVENT(template, call, proto, args) \
|
||||||
static notrace void perf_trace_##call(proto) \
|
static notrace void perf_trace_##call(proto) \
|
||||||
{ \
|
{ \
|
||||||
struct ftrace_event_call *event_call = &event_##call; \
|
struct ftrace_event_call *event_call = &event_##call; \
|
||||||
\
|
struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
|
||||||
perf_trace_templ_##template(event_call, args); \
|
\
|
||||||
|
perf_fetch_caller_regs(__regs, 1); \
|
||||||
|
\
|
||||||
|
perf_trace_templ_##template(event_call, __regs, args); \
|
||||||
|
\
|
||||||
|
put_cpu_var(perf_trace_regs); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef DEFINE_EVENT_PRINT
|
#undef DEFINE_EVENT_PRINT
|
||||||
|
|
|
@ -1111,9 +1111,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
p->memcg_batch.do_batch = 0;
|
p->memcg_batch.do_batch = 0;
|
||||||
p->memcg_batch.memcg = NULL;
|
p->memcg_batch.memcg = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
p->bts = NULL;
|
|
||||||
|
|
||||||
p->stack_start = stack_start;
|
p->stack_start = stack_start;
|
||||||
|
|
||||||
/* Perform scheduler related setup. Assign this task to a CPU. */
|
/* Perform scheduler related setup. Assign this task to a CPU. */
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/poll.h>
|
#include <linux/poll.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/hash.h>
|
||||||
#include <linux/sysfs.h>
|
#include <linux/sysfs.h>
|
||||||
#include <linux/dcache.h>
|
#include <linux/dcache.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
|
@ -1367,6 +1368,8 @@ void perf_event_task_sched_in(struct task_struct *task)
|
||||||
if (cpuctx->task_ctx == ctx)
|
if (cpuctx->task_ctx == ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
perf_disable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to keep the following priority order:
|
* We want to keep the following priority order:
|
||||||
* cpu pinned (that don't need to move), task pinned,
|
* cpu pinned (that don't need to move), task pinned,
|
||||||
|
@ -1379,6 +1382,8 @@ void perf_event_task_sched_in(struct task_struct *task)
|
||||||
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
|
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
|
||||||
|
|
||||||
cpuctx->task_ctx = ctx;
|
cpuctx->task_ctx = ctx;
|
||||||
|
|
||||||
|
perf_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_INTERRUPTS (~0ULL)
|
#define MAX_INTERRUPTS (~0ULL)
|
||||||
|
@ -2642,6 +2647,7 @@ static int perf_fasync(int fd, struct file *filp, int on)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations perf_fops = {
|
static const struct file_operations perf_fops = {
|
||||||
|
.llseek = no_llseek,
|
||||||
.release = perf_release,
|
.release = perf_release,
|
||||||
.read = perf_read,
|
.read = perf_read,
|
||||||
.poll = perf_poll,
|
.poll = perf_poll,
|
||||||
|
@ -2791,6 +2797,27 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We assume there is only KVM supporting the callbacks.
|
||||||
|
* Later on, we might change it to a list if there is
|
||||||
|
* another virtualization implementation supporting the callbacks.
|
||||||
|
*/
|
||||||
|
struct perf_guest_info_callbacks *perf_guest_cbs;
|
||||||
|
|
||||||
|
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
|
||||||
|
{
|
||||||
|
perf_guest_cbs = cbs;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
|
||||||
|
|
||||||
|
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
|
||||||
|
{
|
||||||
|
perf_guest_cbs = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Output
|
* Output
|
||||||
*/
|
*/
|
||||||
|
@ -3743,7 +3770,7 @@ void __perf_event_mmap(struct vm_area_struct *vma)
|
||||||
.event_id = {
|
.event_id = {
|
||||||
.header = {
|
.header = {
|
||||||
.type = PERF_RECORD_MMAP,
|
.type = PERF_RECORD_MMAP,
|
||||||
.misc = 0,
|
.misc = PERF_RECORD_MISC_USER,
|
||||||
/* .size */
|
/* .size */
|
||||||
},
|
},
|
||||||
/* .pid */
|
/* .pid */
|
||||||
|
@ -3961,36 +3988,6 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
|
||||||
perf_swevent_overflow(event, 0, nmi, data, regs);
|
perf_swevent_overflow(event, 0, nmi, data, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int perf_swevent_is_counting(struct perf_event *event)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The event is active, we're good!
|
|
||||||
*/
|
|
||||||
if (event->state == PERF_EVENT_STATE_ACTIVE)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The event is off/error, not counting.
|
|
||||||
*/
|
|
||||||
if (event->state != PERF_EVENT_STATE_INACTIVE)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The event is inactive, if the context is active
|
|
||||||
* we're part of a group that didn't make it on the 'pmu',
|
|
||||||
* not counting.
|
|
||||||
*/
|
|
||||||
if (event->ctx->is_active)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We're inactive and the context is too, this means the
|
|
||||||
* task is scheduled out, we're counting events that happen
|
|
||||||
* to us, like migration events.
|
|
||||||
*/
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int perf_tp_event_match(struct perf_event *event,
|
static int perf_tp_event_match(struct perf_event *event,
|
||||||
struct perf_sample_data *data);
|
struct perf_sample_data *data);
|
||||||
|
|
||||||
|
@ -4014,12 +4011,6 @@ static int perf_swevent_match(struct perf_event *event,
|
||||||
struct perf_sample_data *data,
|
struct perf_sample_data *data,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!perf_swevent_is_counting(event))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (event->attr.type != type)
|
if (event->attr.type != type)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -4036,18 +4027,53 @@ static int perf_swevent_match(struct perf_event *event,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_swevent_ctx_event(struct perf_event_context *ctx,
|
static inline u64 swevent_hash(u64 type, u32 event_id)
|
||||||
enum perf_type_id type,
|
|
||||||
u32 event_id, u64 nr, int nmi,
|
|
||||||
struct perf_sample_data *data,
|
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
{
|
||||||
struct perf_event *event;
|
u64 val = event_id | (type << 32);
|
||||||
|
|
||||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
return hash_64(val, SWEVENT_HLIST_BITS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct hlist_head *
|
||||||
|
find_swevent_head(struct perf_cpu_context *ctx, u64 type, u32 event_id)
|
||||||
|
{
|
||||||
|
u64 hash;
|
||||||
|
struct swevent_hlist *hlist;
|
||||||
|
|
||||||
|
hash = swevent_hash(type, event_id);
|
||||||
|
|
||||||
|
hlist = rcu_dereference(ctx->swevent_hlist);
|
||||||
|
if (!hlist)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return &hlist->heads[hash];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
||||||
|
u64 nr, int nmi,
|
||||||
|
struct perf_sample_data *data,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct perf_cpu_context *cpuctx;
|
||||||
|
struct perf_event *event;
|
||||||
|
struct hlist_node *node;
|
||||||
|
struct hlist_head *head;
|
||||||
|
|
||||||
|
cpuctx = &__get_cpu_var(perf_cpu_context);
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
head = find_swevent_head(cpuctx, type, event_id);
|
||||||
|
|
||||||
|
if (!head)
|
||||||
|
goto end;
|
||||||
|
|
||||||
|
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
|
||||||
if (perf_swevent_match(event, type, event_id, data, regs))
|
if (perf_swevent_match(event, type, event_id, data, regs))
|
||||||
perf_swevent_add(event, nr, nmi, data, regs);
|
perf_swevent_add(event, nr, nmi, data, regs);
|
||||||
}
|
}
|
||||||
|
end:
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
int perf_swevent_get_recursion_context(void)
|
int perf_swevent_get_recursion_context(void)
|
||||||
|
@ -4085,27 +4111,6 @@ void perf_swevent_put_recursion_context(int rctx)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
|
EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
|
||||||
|
|
||||||
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
|
||||||
u64 nr, int nmi,
|
|
||||||
struct perf_sample_data *data,
|
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct perf_cpu_context *cpuctx;
|
|
||||||
struct perf_event_context *ctx;
|
|
||||||
|
|
||||||
cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
||||||
rcu_read_lock();
|
|
||||||
perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
|
|
||||||
nr, nmi, data, regs);
|
|
||||||
/*
|
|
||||||
* doesn't really matter which of the child contexts the
|
|
||||||
* events ends up in.
|
|
||||||
*/
|
|
||||||
ctx = rcu_dereference(current->perf_event_ctxp);
|
|
||||||
if (ctx)
|
|
||||||
perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
|
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
|
||||||
struct pt_regs *regs, u64 addr)
|
struct pt_regs *regs, u64 addr)
|
||||||
|
@ -4131,16 +4136,28 @@ static void perf_swevent_read(struct perf_event *event)
|
||||||
static int perf_swevent_enable(struct perf_event *event)
|
static int perf_swevent_enable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
struct perf_cpu_context *cpuctx;
|
||||||
|
struct hlist_head *head;
|
||||||
|
|
||||||
|
cpuctx = &__get_cpu_var(perf_cpu_context);
|
||||||
|
|
||||||
if (hwc->sample_period) {
|
if (hwc->sample_period) {
|
||||||
hwc->last_period = hwc->sample_period;
|
hwc->last_period = hwc->sample_period;
|
||||||
perf_swevent_set_period(event);
|
perf_swevent_set_period(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
head = find_swevent_head(cpuctx, event->attr.type, event->attr.config);
|
||||||
|
if (WARN_ON_ONCE(!head))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
hlist_add_head_rcu(&event->hlist_entry, head);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_swevent_disable(struct perf_event *event)
|
static void perf_swevent_disable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
|
hlist_del_rcu(&event->hlist_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pmu perf_ops_generic = {
|
static const struct pmu perf_ops_generic = {
|
||||||
|
@ -4168,15 +4185,8 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
regs = get_irq_regs();
|
regs = get_irq_regs();
|
||||||
/*
|
|
||||||
* In case we exclude kernel IPs or are somehow not in interrupt
|
|
||||||
* context, provide the next best thing, the user IP.
|
|
||||||
*/
|
|
||||||
if ((event->attr.exclude_kernel || !regs) &&
|
|
||||||
!event->attr.exclude_user)
|
|
||||||
regs = task_pt_regs(current);
|
|
||||||
|
|
||||||
if (regs) {
|
if (regs && !perf_exclude_event(event, regs)) {
|
||||||
if (!(event->attr.exclude_idle && current->pid == 0))
|
if (!(event->attr.exclude_idle && current->pid == 0))
|
||||||
if (perf_event_overflow(event, 0, &data, regs))
|
if (perf_event_overflow(event, 0, &data, regs))
|
||||||
ret = HRTIMER_NORESTART;
|
ret = HRTIMER_NORESTART;
|
||||||
|
@ -4324,6 +4334,105 @@ static const struct pmu perf_ops_task_clock = {
|
||||||
.read = task_clock_perf_event_read,
|
.read = task_clock_perf_event_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
|
||||||
|
{
|
||||||
|
struct swevent_hlist *hlist;
|
||||||
|
|
||||||
|
hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
|
||||||
|
kfree(hlist);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
|
||||||
|
{
|
||||||
|
struct swevent_hlist *hlist;
|
||||||
|
|
||||||
|
if (!cpuctx->swevent_hlist)
|
||||||
|
return;
|
||||||
|
|
||||||
|
hlist = cpuctx->swevent_hlist;
|
||||||
|
rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
|
||||||
|
call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
|
||||||
|
{
|
||||||
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||||
|
|
||||||
|
mutex_lock(&cpuctx->hlist_mutex);
|
||||||
|
|
||||||
|
if (!--cpuctx->hlist_refcount)
|
||||||
|
swevent_hlist_release(cpuctx);
|
||||||
|
|
||||||
|
mutex_unlock(&cpuctx->hlist_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void swevent_hlist_put(struct perf_event *event)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (event->cpu != -1) {
|
||||||
|
swevent_hlist_put_cpu(event, event->cpu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
swevent_hlist_put_cpu(event, cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
|
||||||
|
{
|
||||||
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
mutex_lock(&cpuctx->hlist_mutex);
|
||||||
|
|
||||||
|
if (!cpuctx->swevent_hlist && cpu_online(cpu)) {
|
||||||
|
struct swevent_hlist *hlist;
|
||||||
|
|
||||||
|
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
|
||||||
|
if (!hlist) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
|
||||||
|
}
|
||||||
|
cpuctx->hlist_refcount++;
|
||||||
|
exit:
|
||||||
|
mutex_unlock(&cpuctx->hlist_mutex);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int swevent_hlist_get(struct perf_event *event)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
int cpu, failed_cpu;
|
||||||
|
|
||||||
|
if (event->cpu != -1)
|
||||||
|
return swevent_hlist_get_cpu(event, event->cpu);
|
||||||
|
|
||||||
|
get_online_cpus();
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
err = swevent_hlist_get_cpu(event, cpu);
|
||||||
|
if (err) {
|
||||||
|
failed_cpu = cpu;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
fail:
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
if (cpu == failed_cpu)
|
||||||
|
break;
|
||||||
|
swevent_hlist_put_cpu(event, cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
put_online_cpus();
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_EVENT_TRACING
|
#ifdef CONFIG_EVENT_TRACING
|
||||||
|
|
||||||
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||||
|
@ -4357,10 +4466,13 @@ static int perf_tp_event_match(struct perf_event *event,
|
||||||
static void tp_perf_event_destroy(struct perf_event *event)
|
static void tp_perf_event_destroy(struct perf_event *event)
|
||||||
{
|
{
|
||||||
perf_trace_disable(event->attr.config);
|
perf_trace_disable(event->attr.config);
|
||||||
|
swevent_hlist_put(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
||||||
{
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Raw tracepoint data is a severe data leak, only allow root to
|
* Raw tracepoint data is a severe data leak, only allow root to
|
||||||
* have these.
|
* have these.
|
||||||
|
@ -4374,6 +4486,11 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
event->destroy = tp_perf_event_destroy;
|
event->destroy = tp_perf_event_destroy;
|
||||||
|
err = swevent_hlist_get(event);
|
||||||
|
if (err) {
|
||||||
|
perf_trace_disable(event->attr.config);
|
||||||
|
return ERR_PTR(err);
|
||||||
|
}
|
||||||
|
|
||||||
return &perf_ops_generic;
|
return &perf_ops_generic;
|
||||||
}
|
}
|
||||||
|
@ -4474,6 +4591,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
|
||||||
WARN_ON(event->parent);
|
WARN_ON(event->parent);
|
||||||
|
|
||||||
atomic_dec(&perf_swevent_enabled[event_id]);
|
atomic_dec(&perf_swevent_enabled[event_id]);
|
||||||
|
swevent_hlist_put(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pmu *sw_perf_event_init(struct perf_event *event)
|
static const struct pmu *sw_perf_event_init(struct perf_event *event)
|
||||||
|
@ -4512,6 +4630,12 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event)
|
||||||
case PERF_COUNT_SW_ALIGNMENT_FAULTS:
|
case PERF_COUNT_SW_ALIGNMENT_FAULTS:
|
||||||
case PERF_COUNT_SW_EMULATION_FAULTS:
|
case PERF_COUNT_SW_EMULATION_FAULTS:
|
||||||
if (!event->parent) {
|
if (!event->parent) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = swevent_hlist_get(event);
|
||||||
|
if (err)
|
||||||
|
return ERR_PTR(err);
|
||||||
|
|
||||||
atomic_inc(&perf_swevent_enabled[event_id]);
|
atomic_inc(&perf_swevent_enabled[event_id]);
|
||||||
event->destroy = sw_perf_event_destroy;
|
event->destroy = sw_perf_event_destroy;
|
||||||
}
|
}
|
||||||
|
@ -5384,6 +5508,7 @@ static void __init perf_event_init_all_cpus(void)
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||||
|
mutex_init(&cpuctx->hlist_mutex);
|
||||||
__perf_event_init_context(&cpuctx->ctx, NULL);
|
__perf_event_init_context(&cpuctx->ctx, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5397,6 +5522,16 @@ static void __cpuinit perf_event_init_cpu(int cpu)
|
||||||
spin_lock(&perf_resource_lock);
|
spin_lock(&perf_resource_lock);
|
||||||
cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
|
cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
|
||||||
spin_unlock(&perf_resource_lock);
|
spin_unlock(&perf_resource_lock);
|
||||||
|
|
||||||
|
mutex_lock(&cpuctx->hlist_mutex);
|
||||||
|
if (cpuctx->hlist_refcount > 0) {
|
||||||
|
struct swevent_hlist *hlist;
|
||||||
|
|
||||||
|
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
|
||||||
|
WARN_ON_ONCE(!hlist);
|
||||||
|
rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
|
||||||
|
}
|
||||||
|
mutex_unlock(&cpuctx->hlist_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
@ -5416,6 +5551,10 @@ static void perf_event_exit_cpu(int cpu)
|
||||||
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||||
struct perf_event_context *ctx = &cpuctx->ctx;
|
struct perf_event_context *ctx = &cpuctx->ctx;
|
||||||
|
|
||||||
|
mutex_lock(&cpuctx->hlist_mutex);
|
||||||
|
swevent_hlist_release(cpuctx);
|
||||||
|
mutex_unlock(&cpuctx->hlist_mutex);
|
||||||
|
|
||||||
mutex_lock(&ctx->mutex);
|
mutex_lock(&ctx->mutex);
|
||||||
smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
|
smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
|
||||||
mutex_unlock(&ctx->mutex);
|
mutex_unlock(&ctx->mutex);
|
||||||
|
|
|
@ -76,7 +76,6 @@ void __ptrace_unlink(struct task_struct *child)
|
||||||
child->parent = child->real_parent;
|
child->parent = child->real_parent;
|
||||||
list_del_init(&child->ptrace_entry);
|
list_del_init(&child->ptrace_entry);
|
||||||
|
|
||||||
arch_ptrace_untrace(child);
|
|
||||||
if (task_is_traced(child))
|
if (task_is_traced(child))
|
||||||
ptrace_untrace(child);
|
ptrace_untrace(child);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2077,49 +2077,6 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* wait_task_context_switch - wait for a thread to complete at least one
|
|
||||||
* context switch.
|
|
||||||
*
|
|
||||||
* @p must not be current.
|
|
||||||
*/
|
|
||||||
void wait_task_context_switch(struct task_struct *p)
|
|
||||||
{
|
|
||||||
unsigned long nvcsw, nivcsw, flags;
|
|
||||||
int running;
|
|
||||||
struct rq *rq;
|
|
||||||
|
|
||||||
nvcsw = p->nvcsw;
|
|
||||||
nivcsw = p->nivcsw;
|
|
||||||
for (;;) {
|
|
||||||
/*
|
|
||||||
* The runqueue is assigned before the actual context
|
|
||||||
* switch. We need to take the runqueue lock.
|
|
||||||
*
|
|
||||||
* We could check initially without the lock but it is
|
|
||||||
* very likely that we need to take the lock in every
|
|
||||||
* iteration.
|
|
||||||
*/
|
|
||||||
rq = task_rq_lock(p, &flags);
|
|
||||||
running = task_running(rq, p);
|
|
||||||
task_rq_unlock(rq, &flags);
|
|
||||||
|
|
||||||
if (likely(!running))
|
|
||||||
break;
|
|
||||||
/*
|
|
||||||
* The switch count is incremented before the actual
|
|
||||||
* context switch. We thus wait for two switches to be
|
|
||||||
* sure at least one completed.
|
|
||||||
*/
|
|
||||||
if ((p->nvcsw - nvcsw) > 1)
|
|
||||||
break;
|
|
||||||
if ((p->nivcsw - nivcsw) > 1)
|
|
||||||
break;
|
|
||||||
|
|
||||||
cpu_relax();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* wait_task_inactive - wait for a thread to unschedule.
|
* wait_task_inactive - wait for a thread to unschedule.
|
||||||
*
|
*
|
||||||
|
|
|
@ -44,9 +44,6 @@ config HAVE_FTRACE_MCOUNT_RECORD
|
||||||
help
|
help
|
||||||
See Documentation/trace/ftrace-design.txt
|
See Documentation/trace/ftrace-design.txt
|
||||||
|
|
||||||
config HAVE_HW_BRANCH_TRACER
|
|
||||||
bool
|
|
||||||
|
|
||||||
config HAVE_SYSCALL_TRACEPOINTS
|
config HAVE_SYSCALL_TRACEPOINTS
|
||||||
bool
|
bool
|
||||||
help
|
help
|
||||||
|
@ -374,14 +371,6 @@ config STACK_TRACER
|
||||||
|
|
||||||
Say N if unsure.
|
Say N if unsure.
|
||||||
|
|
||||||
config HW_BRANCH_TRACER
|
|
||||||
depends on HAVE_HW_BRANCH_TRACER
|
|
||||||
bool "Trace hw branches"
|
|
||||||
select GENERIC_TRACER
|
|
||||||
help
|
|
||||||
This tracer records all branches on the system in a circular
|
|
||||||
buffer, giving access to the last N branches for each cpu.
|
|
||||||
|
|
||||||
config KMEMTRACE
|
config KMEMTRACE
|
||||||
bool "Trace SLAB allocations"
|
bool "Trace SLAB allocations"
|
||||||
select GENERIC_TRACER
|
select GENERIC_TRACER
|
||||||
|
|
|
@ -41,7 +41,6 @@ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
|
||||||
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
|
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
|
||||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
||||||
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
||||||
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
|
|
||||||
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
|
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
|
||||||
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
|
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
|
||||||
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
|
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
|
||||||
|
|
|
@ -34,7 +34,6 @@ enum trace_type {
|
||||||
TRACE_GRAPH_RET,
|
TRACE_GRAPH_RET,
|
||||||
TRACE_GRAPH_ENT,
|
TRACE_GRAPH_ENT,
|
||||||
TRACE_USER_STACK,
|
TRACE_USER_STACK,
|
||||||
TRACE_HW_BRANCHES,
|
|
||||||
TRACE_KMEM_ALLOC,
|
TRACE_KMEM_ALLOC,
|
||||||
TRACE_KMEM_FREE,
|
TRACE_KMEM_FREE,
|
||||||
TRACE_BLK,
|
TRACE_BLK,
|
||||||
|
@ -103,29 +102,17 @@ struct syscall_trace_exit {
|
||||||
long ret;
|
long ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kprobe_trace_entry {
|
struct kprobe_trace_entry_head {
|
||||||
struct trace_entry ent;
|
struct trace_entry ent;
|
||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
int nargs;
|
|
||||||
unsigned long args[];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SIZEOF_KPROBE_TRACE_ENTRY(n) \
|
struct kretprobe_trace_entry_head {
|
||||||
(offsetof(struct kprobe_trace_entry, args) + \
|
|
||||||
(sizeof(unsigned long) * (n)))
|
|
||||||
|
|
||||||
struct kretprobe_trace_entry {
|
|
||||||
struct trace_entry ent;
|
struct trace_entry ent;
|
||||||
unsigned long func;
|
unsigned long func;
|
||||||
unsigned long ret_ip;
|
unsigned long ret_ip;
|
||||||
int nargs;
|
|
||||||
unsigned long args[];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \
|
|
||||||
(offsetof(struct kretprobe_trace_entry, args) + \
|
|
||||||
(sizeof(unsigned long) * (n)))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* trace_flag_type is an enumeration that holds different
|
* trace_flag_type is an enumeration that holds different
|
||||||
* states when a trace occurs. These are:
|
* states when a trace occurs. These are:
|
||||||
|
@ -229,7 +216,6 @@ extern void __ftrace_bad_type(void);
|
||||||
TRACE_GRAPH_ENT); \
|
TRACE_GRAPH_ENT); \
|
||||||
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
|
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
|
||||||
TRACE_GRAPH_RET); \
|
TRACE_GRAPH_RET); \
|
||||||
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
|
|
||||||
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
|
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
|
||||||
TRACE_KMEM_ALLOC); \
|
TRACE_KMEM_ALLOC); \
|
||||||
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
|
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
|
||||||
|
@ -467,8 +453,6 @@ extern int trace_selftest_startup_sysprof(struct tracer *trace,
|
||||||
struct trace_array *tr);
|
struct trace_array *tr);
|
||||||
extern int trace_selftest_startup_branch(struct tracer *trace,
|
extern int trace_selftest_startup_branch(struct tracer *trace,
|
||||||
struct trace_array *tr);
|
struct trace_array *tr);
|
||||||
extern int trace_selftest_startup_hw_branches(struct tracer *trace,
|
|
||||||
struct trace_array *tr);
|
|
||||||
extern int trace_selftest_startup_ksym(struct tracer *trace,
|
extern int trace_selftest_startup_ksym(struct tracer *trace,
|
||||||
struct trace_array *tr);
|
struct trace_array *tr);
|
||||||
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
||||||
|
|
|
@ -318,18 +318,6 @@ FTRACE_ENTRY(branch, trace_branch,
|
||||||
__entry->func, __entry->file, __entry->correct)
|
__entry->func, __entry->file, __entry->correct)
|
||||||
);
|
);
|
||||||
|
|
||||||
FTRACE_ENTRY(hw_branch, hw_branch_entry,
|
|
||||||
|
|
||||||
TRACE_HW_BRANCHES,
|
|
||||||
|
|
||||||
F_STRUCT(
|
|
||||||
__field( u64, from )
|
|
||||||
__field( u64, to )
|
|
||||||
),
|
|
||||||
|
|
||||||
F_printk("from: %llx to: %llx", __entry->from, __entry->to)
|
|
||||||
);
|
|
||||||
|
|
||||||
FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
|
FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
|
||||||
|
|
||||||
TRACE_KMEM_ALLOC,
|
TRACE_KMEM_ALLOC,
|
||||||
|
|
|
@ -1,312 +0,0 @@
|
||||||
/*
|
|
||||||
* h/w branch tracer for x86 based on BTS
|
|
||||||
*
|
|
||||||
* Copyright (C) 2008-2009 Intel Corporation.
|
|
||||||
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
|
|
||||||
*/
|
|
||||||
#include <linux/kallsyms.h>
|
|
||||||
#include <linux/debugfs.h>
|
|
||||||
#include <linux/ftrace.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/cpu.h>
|
|
||||||
#include <linux/smp.h>
|
|
||||||
#include <linux/fs.h>
|
|
||||||
|
|
||||||
#include <asm/ds.h>
|
|
||||||
|
|
||||||
#include "trace_output.h"
|
|
||||||
#include "trace.h"
|
|
||||||
|
|
||||||
|
|
||||||
#define BTS_BUFFER_SIZE (1 << 13)
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
|
|
||||||
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
|
|
||||||
|
|
||||||
#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
|
|
||||||
|
|
||||||
static int trace_hw_branches_enabled __read_mostly;
|
|
||||||
static int trace_hw_branches_suspended __read_mostly;
|
|
||||||
static struct trace_array *hw_branch_trace __read_mostly;
|
|
||||||
|
|
||||||
|
|
||||||
static void bts_trace_init_cpu(int cpu)
|
|
||||||
{
|
|
||||||
per_cpu(hwb_tracer, cpu) =
|
|
||||||
ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
|
|
||||||
BTS_BUFFER_SIZE, NULL, (size_t)-1,
|
|
||||||
BTS_KERNEL);
|
|
||||||
|
|
||||||
if (IS_ERR(per_cpu(hwb_tracer, cpu)))
|
|
||||||
per_cpu(hwb_tracer, cpu) = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int bts_trace_init(struct trace_array *tr)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
hw_branch_trace = tr;
|
|
||||||
trace_hw_branches_enabled = 0;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
bts_trace_init_cpu(cpu);
|
|
||||||
|
|
||||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
|
||||||
trace_hw_branches_enabled = 1;
|
|
||||||
}
|
|
||||||
trace_hw_branches_suspended = 0;
|
|
||||||
put_online_cpus();
|
|
||||||
|
|
||||||
/* If we could not enable tracing on a single cpu, we fail. */
|
|
||||||
return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bts_trace_reset(struct trace_array *tr)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
if (likely(per_cpu(hwb_tracer, cpu))) {
|
|
||||||
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
|
||||||
per_cpu(hwb_tracer, cpu) = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
trace_hw_branches_enabled = 0;
|
|
||||||
trace_hw_branches_suspended = 0;
|
|
||||||
put_online_cpus();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bts_trace_start(struct trace_array *tr)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
|
||||||
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
|
||||||
trace_hw_branches_suspended = 0;
|
|
||||||
put_online_cpus();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bts_trace_stop(struct trace_array *tr)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
|
||||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
|
||||||
trace_hw_branches_suspended = 1;
|
|
||||||
put_online_cpus();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
case CPU_ONLINE:
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
/* The notification is sent with interrupts enabled. */
|
|
||||||
if (trace_hw_branches_enabled) {
|
|
||||||
bts_trace_init_cpu(cpu);
|
|
||||||
|
|
||||||
if (trace_hw_branches_suspended &&
|
|
||||||
likely(per_cpu(hwb_tracer, cpu)))
|
|
||||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
/* The notification is sent with interrupts enabled. */
|
|
||||||
if (likely(per_cpu(hwb_tracer, cpu))) {
|
|
||||||
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
|
||||||
per_cpu(hwb_tracer, cpu) = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
|
|
||||||
.notifier_call = bts_hotcpu_handler
|
|
||||||
};
|
|
||||||
|
|
||||||
static void bts_trace_print_header(struct seq_file *m)
|
|
||||||
{
|
|
||||||
seq_puts(m, "# CPU# TO <- FROM\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
|
|
||||||
{
|
|
||||||
unsigned long symflags = TRACE_ITER_SYM_OFFSET;
|
|
||||||
struct trace_entry *entry = iter->ent;
|
|
||||||
struct trace_seq *seq = &iter->seq;
|
|
||||||
struct hw_branch_entry *it;
|
|
||||||
|
|
||||||
trace_assign_type(it, entry);
|
|
||||||
|
|
||||||
if (entry->type == TRACE_HW_BRANCHES) {
|
|
||||||
if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
|
|
||||||
seq_print_ip_sym(seq, it->to, symflags) &&
|
|
||||||
trace_seq_printf(seq, "\t <- ") &&
|
|
||||||
seq_print_ip_sym(seq, it->from, symflags) &&
|
|
||||||
trace_seq_printf(seq, "\n"))
|
|
||||||
return TRACE_TYPE_HANDLED;
|
|
||||||
return TRACE_TYPE_PARTIAL_LINE;
|
|
||||||
}
|
|
||||||
return TRACE_TYPE_UNHANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
void trace_hw_branch(u64 from, u64 to)
|
|
||||||
{
|
|
||||||
struct ftrace_event_call *call = &event_hw_branch;
|
|
||||||
struct trace_array *tr = hw_branch_trace;
|
|
||||||
struct ring_buffer_event *event;
|
|
||||||
struct ring_buffer *buf;
|
|
||||||
struct hw_branch_entry *entry;
|
|
||||||
unsigned long irq1;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
if (unlikely(!tr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (unlikely(!trace_hw_branches_enabled))
|
|
||||||
return;
|
|
||||||
|
|
||||||
local_irq_save(irq1);
|
|
||||||
cpu = raw_smp_processor_id();
|
|
||||||
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
buf = tr->buffer;
|
|
||||||
event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
|
|
||||||
sizeof(*entry), 0, 0);
|
|
||||||
if (!event)
|
|
||||||
goto out;
|
|
||||||
entry = ring_buffer_event_data(event);
|
|
||||||
tracing_generic_entry_update(&entry->ent, 0, from);
|
|
||||||
entry->ent.type = TRACE_HW_BRANCHES;
|
|
||||||
entry->from = from;
|
|
||||||
entry->to = to;
|
|
||||||
if (!filter_check_discard(call, entry, buf, event))
|
|
||||||
trace_buffer_unlock_commit(buf, event, 0, 0);
|
|
||||||
|
|
||||||
out:
|
|
||||||
atomic_dec(&tr->data[cpu]->disabled);
|
|
||||||
local_irq_restore(irq1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void trace_bts_at(const struct bts_trace *trace, void *at)
|
|
||||||
{
|
|
||||||
struct bts_struct bts;
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
WARN_ON_ONCE(!trace->read);
|
|
||||||
if (!trace->read)
|
|
||||||
return;
|
|
||||||
|
|
||||||
err = trace->read(this_tracer, at, &bts);
|
|
||||||
if (err < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
switch (bts.qualifier) {
|
|
||||||
case BTS_BRANCH:
|
|
||||||
trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Collect the trace on the current cpu and write it into the ftrace buffer.
|
|
||||||
*
|
|
||||||
* pre: tracing must be suspended on the current cpu
|
|
||||||
*/
|
|
||||||
static void trace_bts_cpu(void *arg)
|
|
||||||
{
|
|
||||||
struct trace_array *tr = (struct trace_array *)arg;
|
|
||||||
const struct bts_trace *trace;
|
|
||||||
unsigned char *at;
|
|
||||||
|
|
||||||
if (unlikely(!tr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (unlikely(!this_tracer))
|
|
||||||
return;
|
|
||||||
|
|
||||||
trace = ds_read_bts(this_tracer);
|
|
||||||
if (!trace)
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (at = trace->ds.top; (void *)at < trace->ds.end;
|
|
||||||
at += trace->ds.size)
|
|
||||||
trace_bts_at(trace, at);
|
|
||||||
|
|
||||||
for (at = trace->ds.begin; (void *)at < trace->ds.top;
|
|
||||||
at += trace->ds.size)
|
|
||||||
trace_bts_at(trace, at);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void trace_bts_prepare(struct trace_iterator *iter)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
get_online_cpus();
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
|
||||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
|
||||||
/*
|
|
||||||
* We need to collect the trace on the respective cpu since ftrace
|
|
||||||
* implicitly adds the record for the current cpu.
|
|
||||||
* Once that is more flexible, we could collect the data from any cpu.
|
|
||||||
*/
|
|
||||||
on_each_cpu(trace_bts_cpu, iter->tr, 1);
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
|
||||||
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
|
||||||
put_online_cpus();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void trace_bts_close(struct trace_iterator *iter)
|
|
||||||
{
|
|
||||||
tracing_reset_online_cpus(iter->tr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void trace_hw_branch_oops(void)
|
|
||||||
{
|
|
||||||
if (this_tracer) {
|
|
||||||
ds_suspend_bts_noirq(this_tracer);
|
|
||||||
trace_bts_cpu(hw_branch_trace);
|
|
||||||
ds_resume_bts_noirq(this_tracer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct tracer bts_tracer __read_mostly =
|
|
||||||
{
|
|
||||||
.name = "hw-branch-tracer",
|
|
||||||
.init = bts_trace_init,
|
|
||||||
.reset = bts_trace_reset,
|
|
||||||
.print_header = bts_trace_print_header,
|
|
||||||
.print_line = bts_trace_print_line,
|
|
||||||
.start = bts_trace_start,
|
|
||||||
.stop = bts_trace_stop,
|
|
||||||
.open = trace_bts_prepare,
|
|
||||||
.close = trace_bts_close,
|
|
||||||
#ifdef CONFIG_FTRACE_SELFTEST
|
|
||||||
.selftest = trace_selftest_startup_hw_branches,
|
|
||||||
#endif /* CONFIG_FTRACE_SELFTEST */
|
|
||||||
};
|
|
||||||
|
|
||||||
__init static int init_bts_trace(void)
|
|
||||||
{
|
|
||||||
register_hotcpu_notifier(&bts_hotcpu_notifier);
|
|
||||||
return register_tracer(&bts_tracer);
|
|
||||||
}
|
|
||||||
device_initcall(init_bts_trace);
|
|
|
@ -29,6 +29,8 @@
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
|
#include <linux/stringify.h>
|
||||||
|
#include <asm/bitsperlong.h>
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "trace_output.h"
|
#include "trace_output.h"
|
||||||
|
@ -40,7 +42,6 @@
|
||||||
|
|
||||||
/* Reserved field names */
|
/* Reserved field names */
|
||||||
#define FIELD_STRING_IP "__probe_ip"
|
#define FIELD_STRING_IP "__probe_ip"
|
||||||
#define FIELD_STRING_NARGS "__probe_nargs"
|
|
||||||
#define FIELD_STRING_RETIP "__probe_ret_ip"
|
#define FIELD_STRING_RETIP "__probe_ret_ip"
|
||||||
#define FIELD_STRING_FUNC "__probe_func"
|
#define FIELD_STRING_FUNC "__probe_func"
|
||||||
|
|
||||||
|
@ -52,56 +53,102 @@ const char *reserved_field_names[] = {
|
||||||
"common_tgid",
|
"common_tgid",
|
||||||
"common_lock_depth",
|
"common_lock_depth",
|
||||||
FIELD_STRING_IP,
|
FIELD_STRING_IP,
|
||||||
FIELD_STRING_NARGS,
|
|
||||||
FIELD_STRING_RETIP,
|
FIELD_STRING_RETIP,
|
||||||
FIELD_STRING_FUNC,
|
FIELD_STRING_FUNC,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fetch_func {
|
/* Printing function type */
|
||||||
unsigned long (*func)(struct pt_regs *, void *);
|
typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *);
|
||||||
|
#define PRINT_TYPE_FUNC_NAME(type) print_type_##type
|
||||||
|
#define PRINT_TYPE_FMT_NAME(type) print_type_format_##type
|
||||||
|
|
||||||
|
/* Printing in basic type function template */
|
||||||
|
#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \
|
||||||
|
static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \
|
||||||
|
const char *name, void *data)\
|
||||||
|
{ \
|
||||||
|
return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\
|
||||||
|
} \
|
||||||
|
static const char PRINT_TYPE_FMT_NAME(type)[] = fmt;
|
||||||
|
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int)
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int)
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long)
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long)
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int)
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int)
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long)
|
||||||
|
DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long)
|
||||||
|
|
||||||
|
/* Data fetch function type */
|
||||||
|
typedef void (*fetch_func_t)(struct pt_regs *, void *, void *);
|
||||||
|
|
||||||
|
struct fetch_param {
|
||||||
|
fetch_func_t fn;
|
||||||
void *data;
|
void *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
static __kprobes unsigned long call_fetch(struct fetch_func *f,
|
static __kprobes void call_fetch(struct fetch_param *fprm,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs, void *dest)
|
||||||
{
|
{
|
||||||
return f->func(regs, f->data);
|
return fprm->fn(regs, fprm->data, dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* fetch handlers */
|
#define FETCH_FUNC_NAME(kind, type) fetch_##kind##_##type
|
||||||
static __kprobes unsigned long fetch_register(struct pt_regs *regs,
|
/*
|
||||||
void *offset)
|
* Define macro for basic types - we don't need to define s* types, because
|
||||||
{
|
* we have to care only about bitwidth at recording time.
|
||||||
return regs_get_register(regs, (unsigned int)((unsigned long)offset));
|
*/
|
||||||
}
|
#define DEFINE_BASIC_FETCH_FUNCS(kind) \
|
||||||
|
DEFINE_FETCH_##kind(u8) \
|
||||||
|
DEFINE_FETCH_##kind(u16) \
|
||||||
|
DEFINE_FETCH_##kind(u32) \
|
||||||
|
DEFINE_FETCH_##kind(u64)
|
||||||
|
|
||||||
static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
|
#define CHECK_BASIC_FETCH_FUNCS(kind, fn) \
|
||||||
void *num)
|
((FETCH_FUNC_NAME(kind, u8) == fn) || \
|
||||||
{
|
(FETCH_FUNC_NAME(kind, u16) == fn) || \
|
||||||
return regs_get_kernel_stack_nth(regs,
|
(FETCH_FUNC_NAME(kind, u32) == fn) || \
|
||||||
(unsigned int)((unsigned long)num));
|
(FETCH_FUNC_NAME(kind, u64) == fn))
|
||||||
}
|
|
||||||
|
|
||||||
static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
|
/* Data fetch function templates */
|
||||||
{
|
#define DEFINE_FETCH_reg(type) \
|
||||||
unsigned long retval;
|
static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \
|
||||||
|
void *offset, void *dest) \
|
||||||
if (probe_kernel_address(addr, retval))
|
{ \
|
||||||
return 0;
|
*(type *)dest = (type)regs_get_register(regs, \
|
||||||
return retval;
|
(unsigned int)((unsigned long)offset)); \
|
||||||
}
|
}
|
||||||
|
DEFINE_BASIC_FETCH_FUNCS(reg)
|
||||||
|
|
||||||
static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
|
#define DEFINE_FETCH_stack(type) \
|
||||||
void *dummy)
|
static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
|
||||||
{
|
void *offset, void *dest) \
|
||||||
return regs_return_value(regs);
|
{ \
|
||||||
|
*(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
|
||||||
|
(unsigned int)((unsigned long)offset)); \
|
||||||
}
|
}
|
||||||
|
DEFINE_BASIC_FETCH_FUNCS(stack)
|
||||||
|
|
||||||
static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
|
#define DEFINE_FETCH_retval(type) \
|
||||||
void *dummy)
|
static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\
|
||||||
{
|
void *dummy, void *dest) \
|
||||||
return kernel_stack_pointer(regs);
|
{ \
|
||||||
|
*(type *)dest = (type)regs_return_value(regs); \
|
||||||
}
|
}
|
||||||
|
DEFINE_BASIC_FETCH_FUNCS(retval)
|
||||||
|
|
||||||
|
#define DEFINE_FETCH_memory(type) \
|
||||||
|
static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
|
||||||
|
void *addr, void *dest) \
|
||||||
|
{ \
|
||||||
|
type retval; \
|
||||||
|
if (probe_kernel_address(addr, retval)) \
|
||||||
|
*(type *)dest = 0; \
|
||||||
|
else \
|
||||||
|
*(type *)dest = retval; \
|
||||||
|
}
|
||||||
|
DEFINE_BASIC_FETCH_FUNCS(memory)
|
||||||
|
|
||||||
/* Memory fetching by symbol */
|
/* Memory fetching by symbol */
|
||||||
struct symbol_cache {
|
struct symbol_cache {
|
||||||
|
@ -145,51 +192,126 @@ static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
|
||||||
return sc;
|
return sc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
|
#define DEFINE_FETCH_symbol(type) \
|
||||||
{
|
static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\
|
||||||
struct symbol_cache *sc = data;
|
void *data, void *dest) \
|
||||||
|
{ \
|
||||||
if (sc->addr)
|
struct symbol_cache *sc = data; \
|
||||||
return fetch_memory(regs, (void *)sc->addr);
|
if (sc->addr) \
|
||||||
else
|
fetch_memory_##type(regs, (void *)sc->addr, dest); \
|
||||||
return 0;
|
else \
|
||||||
|
*(type *)dest = 0; \
|
||||||
}
|
}
|
||||||
|
DEFINE_BASIC_FETCH_FUNCS(symbol)
|
||||||
|
|
||||||
/* Special indirect memory access interface */
|
/* Dereference memory access function */
|
||||||
struct indirect_fetch_data {
|
struct deref_fetch_param {
|
||||||
struct fetch_func orig;
|
struct fetch_param orig;
|
||||||
long offset;
|
long offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
|
#define DEFINE_FETCH_deref(type) \
|
||||||
{
|
static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\
|
||||||
struct indirect_fetch_data *ind = data;
|
void *data, void *dest) \
|
||||||
unsigned long addr;
|
{ \
|
||||||
|
struct deref_fetch_param *dprm = data; \
|
||||||
addr = call_fetch(&ind->orig, regs);
|
unsigned long addr; \
|
||||||
if (addr) {
|
call_fetch(&dprm->orig, regs, &addr); \
|
||||||
addr += ind->offset;
|
if (addr) { \
|
||||||
return fetch_memory(regs, (void *)addr);
|
addr += dprm->offset; \
|
||||||
} else
|
fetch_memory_##type(regs, (void *)addr, dest); \
|
||||||
return 0;
|
} else \
|
||||||
|
*(type *)dest = 0; \
|
||||||
}
|
}
|
||||||
|
DEFINE_BASIC_FETCH_FUNCS(deref)
|
||||||
|
|
||||||
static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
|
static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
|
||||||
{
|
{
|
||||||
if (data->orig.func == fetch_indirect)
|
if (CHECK_BASIC_FETCH_FUNCS(deref, data->orig.fn))
|
||||||
free_indirect_fetch_data(data->orig.data);
|
free_deref_fetch_param(data->orig.data);
|
||||||
else if (data->orig.func == fetch_symbol)
|
else if (CHECK_BASIC_FETCH_FUNCS(symbol, data->orig.fn))
|
||||||
free_symbol_cache(data->orig.data);
|
free_symbol_cache(data->orig.data);
|
||||||
kfree(data);
|
kfree(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Default (unsigned long) fetch type */
|
||||||
|
#define __DEFAULT_FETCH_TYPE(t) u##t
|
||||||
|
#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
|
||||||
|
#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
|
||||||
|
#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
|
||||||
|
|
||||||
|
#define ASSIGN_FETCH_FUNC(kind, type) \
|
||||||
|
.kind = FETCH_FUNC_NAME(kind, type)
|
||||||
|
|
||||||
|
#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
|
||||||
|
{.name = #ptype, \
|
||||||
|
.size = sizeof(ftype), \
|
||||||
|
.is_signed = sign, \
|
||||||
|
.print = PRINT_TYPE_FUNC_NAME(ptype), \
|
||||||
|
.fmt = PRINT_TYPE_FMT_NAME(ptype), \
|
||||||
|
ASSIGN_FETCH_FUNC(reg, ftype), \
|
||||||
|
ASSIGN_FETCH_FUNC(stack, ftype), \
|
||||||
|
ASSIGN_FETCH_FUNC(retval, ftype), \
|
||||||
|
ASSIGN_FETCH_FUNC(memory, ftype), \
|
||||||
|
ASSIGN_FETCH_FUNC(symbol, ftype), \
|
||||||
|
ASSIGN_FETCH_FUNC(deref, ftype), \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fetch type information table */
|
||||||
|
static const struct fetch_type {
|
||||||
|
const char *name; /* Name of type */
|
||||||
|
size_t size; /* Byte size of type */
|
||||||
|
int is_signed; /* Signed flag */
|
||||||
|
print_type_func_t print; /* Print functions */
|
||||||
|
const char *fmt; /* Fromat string */
|
||||||
|
/* Fetch functions */
|
||||||
|
fetch_func_t reg;
|
||||||
|
fetch_func_t stack;
|
||||||
|
fetch_func_t retval;
|
||||||
|
fetch_func_t memory;
|
||||||
|
fetch_func_t symbol;
|
||||||
|
fetch_func_t deref;
|
||||||
|
} fetch_type_table[] = {
|
||||||
|
ASSIGN_FETCH_TYPE(u8, u8, 0),
|
||||||
|
ASSIGN_FETCH_TYPE(u16, u16, 0),
|
||||||
|
ASSIGN_FETCH_TYPE(u32, u32, 0),
|
||||||
|
ASSIGN_FETCH_TYPE(u64, u64, 0),
|
||||||
|
ASSIGN_FETCH_TYPE(s8, u8, 1),
|
||||||
|
ASSIGN_FETCH_TYPE(s16, u16, 1),
|
||||||
|
ASSIGN_FETCH_TYPE(s32, u32, 1),
|
||||||
|
ASSIGN_FETCH_TYPE(s64, u64, 1),
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct fetch_type *find_fetch_type(const char *type)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!type)
|
||||||
|
type = DEFAULT_FETCH_TYPE_STR;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
|
||||||
|
if (strcmp(type, fetch_type_table[i].name) == 0)
|
||||||
|
return &fetch_type_table[i];
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Special function : only accept unsigned long */
|
||||||
|
static __kprobes void fetch_stack_address(struct pt_regs *regs,
|
||||||
|
void *dummy, void *dest)
|
||||||
|
{
|
||||||
|
*(unsigned long *)dest = kernel_stack_pointer(regs);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Kprobe event core functions
|
* Kprobe event core functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct probe_arg {
|
struct probe_arg {
|
||||||
struct fetch_func fetch;
|
struct fetch_param fetch;
|
||||||
const char *name;
|
unsigned int offset; /* Offset from argument entry */
|
||||||
|
const char *name; /* Name of this argument */
|
||||||
|
const char *comm; /* Command of this argument */
|
||||||
|
const struct fetch_type *type; /* Type of this argument */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Flags for trace_probe */
|
/* Flags for trace_probe */
|
||||||
|
@ -204,6 +326,7 @@ struct trace_probe {
|
||||||
const char *symbol; /* symbol name */
|
const char *symbol; /* symbol name */
|
||||||
struct ftrace_event_call call;
|
struct ftrace_event_call call;
|
||||||
struct trace_event event;
|
struct trace_event event;
|
||||||
|
ssize_t size; /* trace entry size */
|
||||||
unsigned int nr_args;
|
unsigned int nr_args;
|
||||||
struct probe_arg args[];
|
struct probe_arg args[];
|
||||||
};
|
};
|
||||||
|
@ -212,6 +335,7 @@ struct trace_probe {
|
||||||
(offsetof(struct trace_probe, args) + \
|
(offsetof(struct trace_probe, args) + \
|
||||||
(sizeof(struct probe_arg) * (n)))
|
(sizeof(struct probe_arg) * (n)))
|
||||||
|
|
||||||
|
|
||||||
static __kprobes int probe_is_return(struct trace_probe *tp)
|
static __kprobes int probe_is_return(struct trace_probe *tp)
|
||||||
{
|
{
|
||||||
return tp->rp.handler != NULL;
|
return tp->rp.handler != NULL;
|
||||||
|
@ -222,49 +346,6 @@ static __kprobes const char *probe_symbol(struct trace_probe *tp)
|
||||||
return tp->symbol ? tp->symbol : "unknown";
|
return tp->symbol ? tp->symbol : "unknown";
|
||||||
}
|
}
|
||||||
|
|
||||||
static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
|
|
||||||
{
|
|
||||||
int ret = -EINVAL;
|
|
||||||
|
|
||||||
if (ff->func == fetch_register) {
|
|
||||||
const char *name;
|
|
||||||
name = regs_query_register_name((unsigned int)((long)ff->data));
|
|
||||||
ret = snprintf(buf, n, "%%%s", name);
|
|
||||||
} else if (ff->func == fetch_stack)
|
|
||||||
ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
|
|
||||||
else if (ff->func == fetch_memory)
|
|
||||||
ret = snprintf(buf, n, "@0x%p", ff->data);
|
|
||||||
else if (ff->func == fetch_symbol) {
|
|
||||||
struct symbol_cache *sc = ff->data;
|
|
||||||
if (sc->offset)
|
|
||||||
ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
|
|
||||||
sc->offset);
|
|
||||||
else
|
|
||||||
ret = snprintf(buf, n, "@%s", sc->symbol);
|
|
||||||
} else if (ff->func == fetch_retvalue)
|
|
||||||
ret = snprintf(buf, n, "$retval");
|
|
||||||
else if (ff->func == fetch_stack_address)
|
|
||||||
ret = snprintf(buf, n, "$stack");
|
|
||||||
else if (ff->func == fetch_indirect) {
|
|
||||||
struct indirect_fetch_data *id = ff->data;
|
|
||||||
size_t l = 0;
|
|
||||||
ret = snprintf(buf, n, "%+ld(", id->offset);
|
|
||||||
if (ret >= n)
|
|
||||||
goto end;
|
|
||||||
l += ret;
|
|
||||||
ret = probe_arg_string(buf + l, n - l, &id->orig);
|
|
||||||
if (ret < 0)
|
|
||||||
goto end;
|
|
||||||
l += ret;
|
|
||||||
ret = snprintf(buf + l, n - l, ")");
|
|
||||||
ret += l;
|
|
||||||
}
|
|
||||||
end:
|
|
||||||
if (ret >= n)
|
|
||||||
return -ENOSPC;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int register_probe_event(struct trace_probe *tp);
|
static int register_probe_event(struct trace_probe *tp);
|
||||||
static void unregister_probe_event(struct trace_probe *tp);
|
static void unregister_probe_event(struct trace_probe *tp);
|
||||||
|
|
||||||
|
@ -347,11 +428,12 @@ error:
|
||||||
|
|
||||||
static void free_probe_arg(struct probe_arg *arg)
|
static void free_probe_arg(struct probe_arg *arg)
|
||||||
{
|
{
|
||||||
if (arg->fetch.func == fetch_symbol)
|
if (CHECK_BASIC_FETCH_FUNCS(deref, arg->fetch.fn))
|
||||||
|
free_deref_fetch_param(arg->fetch.data);
|
||||||
|
else if (CHECK_BASIC_FETCH_FUNCS(symbol, arg->fetch.fn))
|
||||||
free_symbol_cache(arg->fetch.data);
|
free_symbol_cache(arg->fetch.data);
|
||||||
else if (arg->fetch.func == fetch_indirect)
|
|
||||||
free_indirect_fetch_data(arg->fetch.data);
|
|
||||||
kfree(arg->name);
|
kfree(arg->name);
|
||||||
|
kfree(arg->comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_trace_probe(struct trace_probe *tp)
|
static void free_trace_probe(struct trace_probe *tp)
|
||||||
|
@ -457,28 +539,30 @@ static int split_symbol_offset(char *symbol, unsigned long *offset)
|
||||||
#define PARAM_MAX_ARGS 16
|
#define PARAM_MAX_ARGS 16
|
||||||
#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
|
#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
|
||||||
|
|
||||||
static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
|
static int parse_probe_vars(char *arg, const struct fetch_type *t,
|
||||||
|
struct fetch_param *f, int is_return)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long param;
|
unsigned long param;
|
||||||
|
|
||||||
if (strcmp(arg, "retval") == 0) {
|
if (strcmp(arg, "retval") == 0) {
|
||||||
if (is_return) {
|
if (is_return)
|
||||||
ff->func = fetch_retvalue;
|
f->fn = t->retval;
|
||||||
ff->data = NULL;
|
else
|
||||||
} else
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
} else if (strncmp(arg, "stack", 5) == 0) {
|
} else if (strncmp(arg, "stack", 5) == 0) {
|
||||||
if (arg[5] == '\0') {
|
if (arg[5] == '\0') {
|
||||||
ff->func = fetch_stack_address;
|
if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0)
|
||||||
ff->data = NULL;
|
f->fn = fetch_stack_address;
|
||||||
|
else
|
||||||
|
ret = -EINVAL;
|
||||||
} else if (isdigit(arg[5])) {
|
} else if (isdigit(arg[5])) {
|
||||||
ret = strict_strtoul(arg + 5, 10, ¶m);
|
ret = strict_strtoul(arg + 5, 10, ¶m);
|
||||||
if (ret || param > PARAM_MAX_STACK)
|
if (ret || param > PARAM_MAX_STACK)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
else {
|
else {
|
||||||
ff->func = fetch_stack;
|
f->fn = t->stack;
|
||||||
ff->data = (void *)param;
|
f->data = (void *)param;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -488,7 +572,8 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Recursive argument parser */
|
/* Recursive argument parser */
|
||||||
static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
|
static int __parse_probe_arg(char *arg, const struct fetch_type *t,
|
||||||
|
struct fetch_param *f, int is_return)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long param;
|
unsigned long param;
|
||||||
|
@ -497,13 +582,13 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
|
||||||
|
|
||||||
switch (arg[0]) {
|
switch (arg[0]) {
|
||||||
case '$':
|
case '$':
|
||||||
ret = parse_probe_vars(arg + 1, ff, is_return);
|
ret = parse_probe_vars(arg + 1, t, f, is_return);
|
||||||
break;
|
break;
|
||||||
case '%': /* named register */
|
case '%': /* named register */
|
||||||
ret = regs_query_register_offset(arg + 1);
|
ret = regs_query_register_offset(arg + 1);
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
ff->func = fetch_register;
|
f->fn = t->reg;
|
||||||
ff->data = (void *)(unsigned long)ret;
|
f->data = (void *)(unsigned long)ret;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -512,26 +597,22 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
|
||||||
ret = strict_strtoul(arg + 1, 0, ¶m);
|
ret = strict_strtoul(arg + 1, 0, ¶m);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
ff->func = fetch_memory;
|
f->fn = t->memory;
|
||||||
ff->data = (void *)param;
|
f->data = (void *)param;
|
||||||
} else {
|
} else {
|
||||||
ret = split_symbol_offset(arg + 1, &offset);
|
ret = split_symbol_offset(arg + 1, &offset);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
ff->data = alloc_symbol_cache(arg + 1, offset);
|
f->data = alloc_symbol_cache(arg + 1, offset);
|
||||||
if (ff->data)
|
if (f->data)
|
||||||
ff->func = fetch_symbol;
|
f->fn = t->symbol;
|
||||||
else
|
|
||||||
ret = -EINVAL;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '+': /* indirect memory */
|
case '+': /* deref memory */
|
||||||
case '-':
|
case '-':
|
||||||
tmp = strchr(arg, '(');
|
tmp = strchr(arg, '(');
|
||||||
if (!tmp) {
|
if (!tmp)
|
||||||
ret = -EINVAL;
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
*tmp = '\0';
|
*tmp = '\0';
|
||||||
ret = strict_strtol(arg + 1, 0, &offset);
|
ret = strict_strtol(arg + 1, 0, &offset);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -541,38 +622,58 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
|
||||||
arg = tmp + 1;
|
arg = tmp + 1;
|
||||||
tmp = strrchr(arg, ')');
|
tmp = strrchr(arg, ')');
|
||||||
if (tmp) {
|
if (tmp) {
|
||||||
struct indirect_fetch_data *id;
|
struct deref_fetch_param *dprm;
|
||||||
|
const struct fetch_type *t2 = find_fetch_type(NULL);
|
||||||
*tmp = '\0';
|
*tmp = '\0';
|
||||||
id = kzalloc(sizeof(struct indirect_fetch_data),
|
dprm = kzalloc(sizeof(struct deref_fetch_param),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!id)
|
if (!dprm)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
id->offset = offset;
|
dprm->offset = offset;
|
||||||
ret = __parse_probe_arg(arg, &id->orig, is_return);
|
ret = __parse_probe_arg(arg, t2, &dprm->orig,
|
||||||
|
is_return);
|
||||||
if (ret)
|
if (ret)
|
||||||
kfree(id);
|
kfree(dprm);
|
||||||
else {
|
else {
|
||||||
ff->func = fetch_indirect;
|
f->fn = t->deref;
|
||||||
ff->data = (void *)id;
|
f->data = (void *)dprm;
|
||||||
}
|
}
|
||||||
} else
|
}
|
||||||
ret = -EINVAL;
|
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
/* TODO: support custom handler */
|
|
||||||
ret = -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
if (!ret && !f->fn)
|
||||||
|
ret = -EINVAL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* String length checking wrapper */
|
/* String length checking wrapper */
|
||||||
static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
|
static int parse_probe_arg(char *arg, struct trace_probe *tp,
|
||||||
|
struct probe_arg *parg, int is_return)
|
||||||
{
|
{
|
||||||
|
const char *t;
|
||||||
|
|
||||||
if (strlen(arg) > MAX_ARGSTR_LEN) {
|
if (strlen(arg) > MAX_ARGSTR_LEN) {
|
||||||
pr_info("Argument is too long.: %s\n", arg);
|
pr_info("Argument is too long.: %s\n", arg);
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
return __parse_probe_arg(arg, ff, is_return);
|
parg->comm = kstrdup(arg, GFP_KERNEL);
|
||||||
|
if (!parg->comm) {
|
||||||
|
pr_info("Failed to allocate memory for command '%s'.\n", arg);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
t = strchr(parg->comm, ':');
|
||||||
|
if (t) {
|
||||||
|
arg[t - parg->comm] = '\0';
|
||||||
|
t++;
|
||||||
|
}
|
||||||
|
parg->type = find_fetch_type(t);
|
||||||
|
if (!parg->type) {
|
||||||
|
pr_info("Unsupported type: %s\n", t);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
parg->offset = tp->size;
|
||||||
|
tp->size += parg->type->size;
|
||||||
|
return __parse_probe_arg(arg, parg->type, &parg->fetch, is_return);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return 1 if name is reserved or already used by another argument */
|
/* Return 1 if name is reserved or already used by another argument */
|
||||||
|
@ -602,15 +703,18 @@ static int create_trace_probe(int argc, char **argv)
|
||||||
* @ADDR : fetch memory at ADDR (ADDR should be in kernel)
|
* @ADDR : fetch memory at ADDR (ADDR should be in kernel)
|
||||||
* @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
|
* @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
|
||||||
* %REG : fetch register REG
|
* %REG : fetch register REG
|
||||||
* Indirect memory fetch:
|
* Dereferencing memory fetch:
|
||||||
* +|-offs(ARG) : fetch memory at ARG +|- offs address.
|
* +|-offs(ARG) : fetch memory at ARG +|- offs address.
|
||||||
* Alias name of args:
|
* Alias name of args:
|
||||||
* NAME=FETCHARG : set NAME as alias of FETCHARG.
|
* NAME=FETCHARG : set NAME as alias of FETCHARG.
|
||||||
|
* Type of args:
|
||||||
|
* FETCHARG:TYPE : use TYPE instead of unsigned long.
|
||||||
*/
|
*/
|
||||||
struct trace_probe *tp;
|
struct trace_probe *tp;
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
int is_return = 0, is_delete = 0;
|
int is_return = 0, is_delete = 0;
|
||||||
char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
|
char *symbol = NULL, *event = NULL, *group = NULL;
|
||||||
|
char *arg, *tmp;
|
||||||
unsigned long offset = 0;
|
unsigned long offset = 0;
|
||||||
void *addr = NULL;
|
void *addr = NULL;
|
||||||
char buf[MAX_EVENT_NAME_LEN];
|
char buf[MAX_EVENT_NAME_LEN];
|
||||||
|
@ -723,13 +827,6 @@ static int create_trace_probe(int argc, char **argv)
|
||||||
else
|
else
|
||||||
arg = argv[i];
|
arg = argv[i];
|
||||||
|
|
||||||
if (conflict_field_name(argv[i], tp->args, i)) {
|
|
||||||
pr_info("Argument%d name '%s' conflicts with "
|
|
||||||
"another field.\n", i, argv[i]);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
|
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
|
||||||
if (!tp->args[i].name) {
|
if (!tp->args[i].name) {
|
||||||
pr_info("Failed to allocate argument%d name '%s'.\n",
|
pr_info("Failed to allocate argument%d name '%s'.\n",
|
||||||
|
@ -737,9 +834,19 @@ static int create_trace_probe(int argc, char **argv)
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
tmp = strchr(tp->args[i].name, ':');
|
||||||
|
if (tmp)
|
||||||
|
*tmp = '_'; /* convert : to _ */
|
||||||
|
|
||||||
|
if (conflict_field_name(tp->args[i].name, tp->args, i)) {
|
||||||
|
pr_info("Argument%d name '%s' conflicts with "
|
||||||
|
"another field.\n", i, argv[i]);
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
/* Parse fetch argument */
|
/* Parse fetch argument */
|
||||||
ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
|
ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info("Parse error at argument%d. (%d)\n", i, ret);
|
pr_info("Parse error at argument%d. (%d)\n", i, ret);
|
||||||
kfree(tp->args[i].name);
|
kfree(tp->args[i].name);
|
||||||
|
@ -794,8 +901,7 @@ static void probes_seq_stop(struct seq_file *m, void *v)
|
||||||
static int probes_seq_show(struct seq_file *m, void *v)
|
static int probes_seq_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = v;
|
struct trace_probe *tp = v;
|
||||||
int i, ret;
|
int i;
|
||||||
char buf[MAX_ARGSTR_LEN + 1];
|
|
||||||
|
|
||||||
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
|
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
|
||||||
seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
|
seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
|
||||||
|
@ -807,15 +913,10 @@ static int probes_seq_show(struct seq_file *m, void *v)
|
||||||
else
|
else
|
||||||
seq_printf(m, " %s", probe_symbol(tp));
|
seq_printf(m, " %s", probe_symbol(tp));
|
||||||
|
|
||||||
for (i = 0; i < tp->nr_args; i++) {
|
for (i = 0; i < tp->nr_args; i++)
|
||||||
ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
|
seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
|
||||||
if (ret < 0) {
|
|
||||||
pr_warning("Argument%d decoding error(%d).\n", i, ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
seq_printf(m, " %s=%s", tp->args[i].name, buf);
|
|
||||||
}
|
|
||||||
seq_printf(m, "\n");
|
seq_printf(m, "\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -945,9 +1046,10 @@ static const struct file_operations kprobe_profile_ops = {
|
||||||
static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||||
struct kprobe_trace_entry *entry;
|
struct kprobe_trace_entry_head *entry;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
|
u8 *data;
|
||||||
int size, i, pc;
|
int size, i, pc;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
|
@ -957,7 +1059,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||||
local_save_flags(irq_flags);
|
local_save_flags(irq_flags);
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
|
|
||||||
size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
size = sizeof(*entry) + tp->size;
|
||||||
|
|
||||||
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
||||||
irq_flags, pc);
|
irq_flags, pc);
|
||||||
|
@ -965,10 +1067,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
entry->nargs = tp->nr_args;
|
|
||||||
entry->ip = (unsigned long)kp->addr;
|
entry->ip = (unsigned long)kp->addr;
|
||||||
|
data = (u8 *)&entry[1];
|
||||||
for (i = 0; i < tp->nr_args; i++)
|
for (i = 0; i < tp->nr_args; i++)
|
||||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
|
||||||
|
|
||||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||||
|
@ -979,9 +1081,10 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||||
struct kretprobe_trace_entry *entry;
|
struct kretprobe_trace_entry_head *entry;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
|
u8 *data;
|
||||||
int size, i, pc;
|
int size, i, pc;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
|
@ -989,7 +1092,7 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||||
local_save_flags(irq_flags);
|
local_save_flags(irq_flags);
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
|
|
||||||
size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
size = sizeof(*entry) + tp->size;
|
||||||
|
|
||||||
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
||||||
irq_flags, pc);
|
irq_flags, pc);
|
||||||
|
@ -997,11 +1100,11 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
entry->nargs = tp->nr_args;
|
|
||||||
entry->func = (unsigned long)tp->rp.kp.addr;
|
entry->func = (unsigned long)tp->rp.kp.addr;
|
||||||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||||
|
data = (u8 *)&entry[1];
|
||||||
for (i = 0; i < tp->nr_args; i++)
|
for (i = 0; i < tp->nr_args; i++)
|
||||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
|
||||||
|
|
||||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||||
|
@ -1011,13 +1114,14 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||||
enum print_line_t
|
enum print_line_t
|
||||||
print_kprobe_event(struct trace_iterator *iter, int flags)
|
print_kprobe_event(struct trace_iterator *iter, int flags)
|
||||||
{
|
{
|
||||||
struct kprobe_trace_entry *field;
|
struct kprobe_trace_entry_head *field;
|
||||||
struct trace_seq *s = &iter->seq;
|
struct trace_seq *s = &iter->seq;
|
||||||
struct trace_event *event;
|
struct trace_event *event;
|
||||||
struct trace_probe *tp;
|
struct trace_probe *tp;
|
||||||
|
u8 *data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
field = (struct kprobe_trace_entry *)iter->ent;
|
field = (struct kprobe_trace_entry_head *)iter->ent;
|
||||||
event = ftrace_find_event(field->ent.type);
|
event = ftrace_find_event(field->ent.type);
|
||||||
tp = container_of(event, struct trace_probe, event);
|
tp = container_of(event, struct trace_probe, event);
|
||||||
|
|
||||||
|
@ -1030,9 +1134,10 @@ print_kprobe_event(struct trace_iterator *iter, int flags)
|
||||||
if (!trace_seq_puts(s, ")"))
|
if (!trace_seq_puts(s, ")"))
|
||||||
goto partial;
|
goto partial;
|
||||||
|
|
||||||
for (i = 0; i < field->nargs; i++)
|
data = (u8 *)&field[1];
|
||||||
if (!trace_seq_printf(s, " %s=%lx",
|
for (i = 0; i < tp->nr_args; i++)
|
||||||
tp->args[i].name, field->args[i]))
|
if (!tp->args[i].type->print(s, tp->args[i].name,
|
||||||
|
data + tp->args[i].offset))
|
||||||
goto partial;
|
goto partial;
|
||||||
|
|
||||||
if (!trace_seq_puts(s, "\n"))
|
if (!trace_seq_puts(s, "\n"))
|
||||||
|
@ -1046,13 +1151,14 @@ partial:
|
||||||
enum print_line_t
|
enum print_line_t
|
||||||
print_kretprobe_event(struct trace_iterator *iter, int flags)
|
print_kretprobe_event(struct trace_iterator *iter, int flags)
|
||||||
{
|
{
|
||||||
struct kretprobe_trace_entry *field;
|
struct kretprobe_trace_entry_head *field;
|
||||||
struct trace_seq *s = &iter->seq;
|
struct trace_seq *s = &iter->seq;
|
||||||
struct trace_event *event;
|
struct trace_event *event;
|
||||||
struct trace_probe *tp;
|
struct trace_probe *tp;
|
||||||
|
u8 *data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
field = (struct kretprobe_trace_entry *)iter->ent;
|
field = (struct kretprobe_trace_entry_head *)iter->ent;
|
||||||
event = ftrace_find_event(field->ent.type);
|
event = ftrace_find_event(field->ent.type);
|
||||||
tp = container_of(event, struct trace_probe, event);
|
tp = container_of(event, struct trace_probe, event);
|
||||||
|
|
||||||
|
@ -1071,9 +1177,10 @@ print_kretprobe_event(struct trace_iterator *iter, int flags)
|
||||||
if (!trace_seq_puts(s, ")"))
|
if (!trace_seq_puts(s, ")"))
|
||||||
goto partial;
|
goto partial;
|
||||||
|
|
||||||
for (i = 0; i < field->nargs; i++)
|
data = (u8 *)&field[1];
|
||||||
if (!trace_seq_printf(s, " %s=%lx",
|
for (i = 0; i < tp->nr_args; i++)
|
||||||
tp->args[i].name, field->args[i]))
|
if (!tp->args[i].type->print(s, tp->args[i].name,
|
||||||
|
data + tp->args[i].offset))
|
||||||
goto partial;
|
goto partial;
|
||||||
|
|
||||||
if (!trace_seq_puts(s, "\n"))
|
if (!trace_seq_puts(s, "\n"))
|
||||||
|
@ -1129,29 +1236,43 @@ static int probe_event_raw_init(struct ftrace_event_call *event_call)
|
||||||
static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
|
static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
|
||||||
{
|
{
|
||||||
int ret, i;
|
int ret, i;
|
||||||
struct kprobe_trace_entry field;
|
struct kprobe_trace_entry_head field;
|
||||||
struct trace_probe *tp = (struct trace_probe *)event_call->data;
|
struct trace_probe *tp = (struct trace_probe *)event_call->data;
|
||||||
|
|
||||||
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
|
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
|
||||||
DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
|
|
||||||
/* Set argument names as fields */
|
/* Set argument names as fields */
|
||||||
for (i = 0; i < tp->nr_args; i++)
|
for (i = 0; i < tp->nr_args; i++) {
|
||||||
DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
|
ret = trace_define_field(event_call, tp->args[i].type->name,
|
||||||
|
tp->args[i].name,
|
||||||
|
sizeof(field) + tp->args[i].offset,
|
||||||
|
tp->args[i].type->size,
|
||||||
|
tp->args[i].type->is_signed,
|
||||||
|
FILTER_OTHER);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
|
static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
|
||||||
{
|
{
|
||||||
int ret, i;
|
int ret, i;
|
||||||
struct kretprobe_trace_entry field;
|
struct kretprobe_trace_entry_head field;
|
||||||
struct trace_probe *tp = (struct trace_probe *)event_call->data;
|
struct trace_probe *tp = (struct trace_probe *)event_call->data;
|
||||||
|
|
||||||
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
|
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
|
||||||
DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
|
DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
|
||||||
DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
|
|
||||||
/* Set argument names as fields */
|
/* Set argument names as fields */
|
||||||
for (i = 0; i < tp->nr_args; i++)
|
for (i = 0; i < tp->nr_args; i++) {
|
||||||
DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
|
ret = trace_define_field(event_call, tp->args[i].type->name,
|
||||||
|
tp->args[i].name,
|
||||||
|
sizeof(field) + tp->args[i].offset,
|
||||||
|
tp->args[i].type->size,
|
||||||
|
tp->args[i].type->is_signed,
|
||||||
|
FILTER_OTHER);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1176,8 +1297,8 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
|
||||||
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
|
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
|
||||||
|
|
||||||
for (i = 0; i < tp->nr_args; i++) {
|
for (i = 0; i < tp->nr_args; i++) {
|
||||||
pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%%lx",
|
pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
|
||||||
tp->args[i].name);
|
tp->args[i].name, tp->args[i].type->fmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
|
pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
|
||||||
|
@ -1219,12 +1340,13 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
struct kprobe_trace_entry *entry;
|
struct kprobe_trace_entry_head *entry;
|
||||||
|
u8 *data;
|
||||||
int size, __size, i;
|
int size, __size, i;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
int rctx;
|
int rctx;
|
||||||
|
|
||||||
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
__size = sizeof(*entry) + tp->size;
|
||||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||||
size -= sizeof(u32);
|
size -= sizeof(u32);
|
||||||
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
||||||
|
@ -1235,10 +1357,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
|
||||||
if (!entry)
|
if (!entry)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
entry->nargs = tp->nr_args;
|
|
||||||
entry->ip = (unsigned long)kp->addr;
|
entry->ip = (unsigned long)kp->addr;
|
||||||
|
data = (u8 *)&entry[1];
|
||||||
for (i = 0; i < tp->nr_args; i++)
|
for (i = 0; i < tp->nr_args; i++)
|
||||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
|
||||||
|
|
||||||
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
|
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
|
||||||
}
|
}
|
||||||
|
@ -1249,12 +1371,13 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
|
||||||
{
|
{
|
||||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||||
struct ftrace_event_call *call = &tp->call;
|
struct ftrace_event_call *call = &tp->call;
|
||||||
struct kretprobe_trace_entry *entry;
|
struct kretprobe_trace_entry_head *entry;
|
||||||
|
u8 *data;
|
||||||
int size, __size, i;
|
int size, __size, i;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
int rctx;
|
int rctx;
|
||||||
|
|
||||||
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
__size = sizeof(*entry) + tp->size;
|
||||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||||
size -= sizeof(u32);
|
size -= sizeof(u32);
|
||||||
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
||||||
|
@ -1265,11 +1388,11 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
|
||||||
if (!entry)
|
if (!entry)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
entry->nargs = tp->nr_args;
|
|
||||||
entry->func = (unsigned long)tp->rp.kp.addr;
|
entry->func = (unsigned long)tp->rp.kp.addr;
|
||||||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||||
|
data = (u8 *)&entry[1];
|
||||||
for (i = 0; i < tp->nr_args; i++)
|
for (i = 0; i < tp->nr_args; i++)
|
||||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
|
||||||
|
|
||||||
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
|
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
|
||||||
irq_flags, regs);
|
irq_flags, regs);
|
||||||
|
|
|
@ -17,7 +17,6 @@ static inline int trace_valid_entry(struct trace_entry *entry)
|
||||||
case TRACE_BRANCH:
|
case TRACE_BRANCH:
|
||||||
case TRACE_GRAPH_ENT:
|
case TRACE_GRAPH_ENT:
|
||||||
case TRACE_GRAPH_RET:
|
case TRACE_GRAPH_RET:
|
||||||
case TRACE_HW_BRANCHES:
|
|
||||||
case TRACE_KSYM:
|
case TRACE_KSYM:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -755,62 +754,6 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_BRANCH_TRACER */
|
#endif /* CONFIG_BRANCH_TRACER */
|
||||||
|
|
||||||
#ifdef CONFIG_HW_BRANCH_TRACER
|
|
||||||
int
|
|
||||||
trace_selftest_startup_hw_branches(struct tracer *trace,
|
|
||||||
struct trace_array *tr)
|
|
||||||
{
|
|
||||||
struct trace_iterator *iter;
|
|
||||||
struct tracer tracer;
|
|
||||||
unsigned long count;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!trace->open) {
|
|
||||||
printk(KERN_CONT "missing open function...");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = tracer_init(trace, tr);
|
|
||||||
if (ret) {
|
|
||||||
warn_failed_init_tracer(trace, ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The hw-branch tracer needs to collect the trace from the various
|
|
||||||
* cpu trace buffers - before tracing is stopped.
|
|
||||||
*/
|
|
||||||
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
|
||||||
if (!iter)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
memcpy(&tracer, trace, sizeof(tracer));
|
|
||||||
|
|
||||||
iter->trace = &tracer;
|
|
||||||
iter->tr = tr;
|
|
||||||
iter->pos = -1;
|
|
||||||
mutex_init(&iter->mutex);
|
|
||||||
|
|
||||||
trace->open(iter);
|
|
||||||
|
|
||||||
mutex_destroy(&iter->mutex);
|
|
||||||
kfree(iter);
|
|
||||||
|
|
||||||
tracing_stop();
|
|
||||||
|
|
||||||
ret = trace_test_buffer(tr, &count);
|
|
||||||
trace->reset(tr);
|
|
||||||
tracing_start();
|
|
||||||
|
|
||||||
if (!ret && !count) {
|
|
||||||
printk(KERN_CONT "no entries found..");
|
|
||||||
ret = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_HW_BRANCH_TRACER */
|
|
||||||
|
|
||||||
#ifdef CONFIG_KSYM_TRACER
|
#ifdef CONFIG_KSYM_TRACER
|
||||||
static int ksym_selftest_dummy;
|
static int ksym_selftest_dummy;
|
||||||
|
|
||||||
|
|
41
mm/mlock.c
41
mm/mlock.c
|
@ -607,44 +607,3 @@ void user_shm_unlock(size_t size, struct user_struct *user)
|
||||||
spin_unlock(&shmlock_user_lock);
|
spin_unlock(&shmlock_user_lock);
|
||||||
free_uid(user);
|
free_uid(user);
|
||||||
}
|
}
|
||||||
|
|
||||||
int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
unsigned long lim, vm, pgsz;
|
|
||||||
int error = -ENOMEM;
|
|
||||||
|
|
||||||
pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
|
||||||
|
|
||||||
lim = ACCESS_ONCE(rlim[RLIMIT_AS].rlim_cur) >> PAGE_SHIFT;
|
|
||||||
vm = mm->total_vm + pgsz;
|
|
||||||
if (lim < vm)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
lim = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur) >> PAGE_SHIFT;
|
|
||||||
vm = mm->locked_vm + pgsz;
|
|
||||||
if (lim < vm)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
mm->total_vm += pgsz;
|
|
||||||
mm->locked_vm += pgsz;
|
|
||||||
|
|
||||||
error = 0;
|
|
||||||
out:
|
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
void refund_locked_memory(struct mm_struct *mm, size_t size)
|
|
||||||
{
|
|
||||||
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
|
||||||
|
|
||||||
mm->total_vm -= pgsz;
|
|
||||||
mm->locked_vm -= pgsz;
|
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
}
|
|
||||||
|
|
|
@ -19,12 +19,12 @@ COMMON OPTIONS
|
||||||
-f::
|
-f::
|
||||||
--format=::
|
--format=::
|
||||||
Specify format style.
|
Specify format style.
|
||||||
Current available format styles are,
|
Current available format styles are:
|
||||||
|
|
||||||
'default'::
|
'default'::
|
||||||
Default style. This is mainly for human reading.
|
Default style. This is mainly for human reading.
|
||||||
---------------------
|
---------------------
|
||||||
% perf bench sched pipe # with no style specify
|
% perf bench sched pipe # with no style specified
|
||||||
(executing 1000000 pipe operations between two tasks)
|
(executing 1000000 pipe operations between two tasks)
|
||||||
Total time:5.855 sec
|
Total time:5.855 sec
|
||||||
5.855061 usecs/op
|
5.855061 usecs/op
|
||||||
|
@ -79,7 +79,7 @@ options (20 sender and receiver processes per group)
|
||||||
|
|
||||||
Total time:0.308 sec
|
Total time:0.308 sec
|
||||||
|
|
||||||
% perf bench sched messaging -t -g 20 # be multi-thread,with 20 groups
|
% perf bench sched messaging -t -g 20 # be multi-thread, with 20 groups
|
||||||
(20 sender and receiver threads per group)
|
(20 sender and receiver threads per group)
|
||||||
(20 groups == 800 threads run)
|
(20 groups == 800 threads run)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
perf-kvm(1)
|
||||||
|
==============
|
||||||
|
|
||||||
|
NAME
|
||||||
|
----
|
||||||
|
perf-kvm - Tool to trace/measure kvm guest os
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
--------
|
||||||
|
[verse]
|
||||||
|
'perf kvm' [--host] [--guest] [--guestmount=<path>
|
||||||
|
[--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]]
|
||||||
|
{top|record|report|diff|buildid-list}
|
||||||
|
'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
|
||||||
|
| --guestvmlinux=<path>] {top|record|report|diff|buildid-list}
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
-----------
|
||||||
|
There are a couple of variants of perf kvm:
|
||||||
|
|
||||||
|
'perf kvm [options] top <command>' to generates and displays
|
||||||
|
a performance counter profile of guest os in realtime
|
||||||
|
of an arbitrary workload.
|
||||||
|
|
||||||
|
'perf kvm record <command>' to record the performance couinter profile
|
||||||
|
of an arbitrary workload and save it into a perf data file. If both
|
||||||
|
--host and --guest are input, the perf data file name is perf.data.kvm.
|
||||||
|
If there is no --host but --guest, the file name is perf.data.guest.
|
||||||
|
If there is no --guest but --host, the file name is perf.data.host.
|
||||||
|
|
||||||
|
'perf kvm report' to display the performance counter profile information
|
||||||
|
recorded via perf kvm record.
|
||||||
|
|
||||||
|
'perf kvm diff' to displays the performance difference amongst two perf.data
|
||||||
|
files captured via perf record.
|
||||||
|
|
||||||
|
'perf kvm buildid-list' to display the buildids found in a perf data file,
|
||||||
|
so that other tools can be used to fetch packages with matching symbol tables
|
||||||
|
for use by perf report.
|
||||||
|
|
||||||
|
OPTIONS
|
||||||
|
-------
|
||||||
|
--host=::
|
||||||
|
Collect host side perforamnce profile.
|
||||||
|
--guest=::
|
||||||
|
Collect guest side perforamnce profile.
|
||||||
|
--guestmount=<path>::
|
||||||
|
Guest os root file system mount directory. Users mounts guest os
|
||||||
|
root directories under <path> by a specific filesystem access method,
|
||||||
|
typically, sshfs. For example, start 2 guest os. The one's pid is 8888
|
||||||
|
and the other's is 9999.
|
||||||
|
#mkdir ~/guestmount; cd ~/guestmount
|
||||||
|
#sshfs -o allow_other,direct_io -p 5551 localhost:/ 8888/
|
||||||
|
#sshfs -o allow_other,direct_io -p 5552 localhost:/ 9999/
|
||||||
|
#perf kvm --host --guest --guestmount=~/guestmount top
|
||||||
|
--guestkallsyms=<path>::
|
||||||
|
Guest os /proc/kallsyms file copy. 'perf' kvm' reads it to get guest
|
||||||
|
kernel symbols. Users copy it out from guest os.
|
||||||
|
--guestmodules=<path>::
|
||||||
|
Guest os /proc/modules file copy. 'perf' kvm' reads it to get guest
|
||||||
|
kernel module information. Users copy it out from guest os.
|
||||||
|
--guestvmlinux=<path>::
|
||||||
|
Guest os kernel vmlinux.
|
||||||
|
|
||||||
|
SEE ALSO
|
||||||
|
--------
|
||||||
|
linkperf:perf-top[1] perf-record[1] perf-report[1] perf-diff[1] perf-buildid-list[1]
|
|
@ -57,6 +57,11 @@ OPTIONS
|
||||||
--force::
|
--force::
|
||||||
Forcibly add events with existing name.
|
Forcibly add events with existing name.
|
||||||
|
|
||||||
|
-n::
|
||||||
|
--dry-run::
|
||||||
|
Dry run. With this option, --add and --del doesn't execute actual
|
||||||
|
adding and removal operations.
|
||||||
|
|
||||||
PROBE SYNTAX
|
PROBE SYNTAX
|
||||||
------------
|
------------
|
||||||
Probe points are defined by following syntax.
|
Probe points are defined by following syntax.
|
||||||
|
@ -74,13 +79,22 @@ Probe points are defined by following syntax.
|
||||||
'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
|
'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
|
||||||
'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
|
'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
|
||||||
It is also possible to specify a probe point by the source line number or lazy matching by using 'SRC:ALN' or 'SRC;PTN' syntax, where 'SRC' is the source file path, ':ALN' is the line number and ';PTN' is the lazy matching pattern.
|
It is also possible to specify a probe point by the source line number or lazy matching by using 'SRC:ALN' or 'SRC;PTN' syntax, where 'SRC' is the source file path, ':ALN' is the line number and ';PTN' is the lazy matching pattern.
|
||||||
'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
|
'ARG' specifies the arguments of this probe point, (see PROBE ARGUMENT).
|
||||||
|
|
||||||
|
PROBE ARGUMENT
|
||||||
|
--------------
|
||||||
|
Each probe argument follows below syntax.
|
||||||
|
|
||||||
|
[NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
|
||||||
|
|
||||||
|
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
|
||||||
|
'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo.
|
||||||
|
|
||||||
LINE SYNTAX
|
LINE SYNTAX
|
||||||
-----------
|
-----------
|
||||||
Line range is descripted by following syntax.
|
Line range is descripted by following syntax.
|
||||||
|
|
||||||
"FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]"
|
"FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]"
|
||||||
|
|
||||||
FUNC specifies the function name of showing lines. 'RLN' is the start line
|
FUNC specifies the function name of showing lines. 'RLN' is the start line
|
||||||
number from function entry line, and 'RLN2' is the end line number. As same as
|
number from function entry line, and 'RLN2' is the end line number. As same as
|
||||||
|
|
|
@ -58,7 +58,7 @@ OPTIONS
|
||||||
|
|
||||||
-f::
|
-f::
|
||||||
--force::
|
--force::
|
||||||
Overwrite existing data file.
|
Overwrite existing data file. (deprecated)
|
||||||
|
|
||||||
-c::
|
-c::
|
||||||
--count=::
|
--count=::
|
||||||
|
@ -101,7 +101,7 @@ OPTIONS
|
||||||
|
|
||||||
-R::
|
-R::
|
||||||
--raw-samples::
|
--raw-samples::
|
||||||
Collect raw sample records from all opened counters (typically for tracepoint counters).
|
Collect raw sample records from all opened counters (default for tracepoint counters).
|
||||||
|
|
||||||
SEE ALSO
|
SEE ALSO
|
||||||
--------
|
--------
|
||||||
|
|
|
@ -12,7 +12,7 @@ SYNOPSIS
|
||||||
|
|
||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
-----------
|
-----------
|
||||||
There's four variants of perf sched:
|
There are four variants of perf sched:
|
||||||
|
|
||||||
'perf sched record <command>' to record the scheduling events
|
'perf sched record <command>' to record the scheduling events
|
||||||
of an arbitrary workload.
|
of an arbitrary workload.
|
||||||
|
@ -27,7 +27,7 @@ There's four variants of perf sched:
|
||||||
via perf sched record. (this is done by starting up mockup threads
|
via perf sched record. (this is done by starting up mockup threads
|
||||||
that mimic the workload based on the events in the trace. These
|
that mimic the workload based on the events in the trace. These
|
||||||
threads can then replay the timings (CPU runtime and sleep patterns)
|
threads can then replay the timings (CPU runtime and sleep patterns)
|
||||||
of the workload as it occured when it was recorded - and can repeat
|
of the workload as it occurred when it was recorded - and can repeat
|
||||||
it a number of times, measuring its performance.)
|
it a number of times, measuring its performance.)
|
||||||
|
|
||||||
OPTIONS
|
OPTIONS
|
||||||
|
|
|
@ -1,3 +1,7 @@
|
||||||
|
ifeq ("$(origin O)", "command line")
|
||||||
|
OUTPUT := $(O)/
|
||||||
|
endif
|
||||||
|
|
||||||
# The default target of this Makefile is...
|
# The default target of this Makefile is...
|
||||||
all::
|
all::
|
||||||
|
|
||||||
|
@ -150,10 +154,17 @@ all::
|
||||||
# Define LDFLAGS=-static to build a static binary.
|
# Define LDFLAGS=-static to build a static binary.
|
||||||
#
|
#
|
||||||
# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
|
# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
|
||||||
|
#
|
||||||
|
# Define NO_DWARF if you do not want debug-info analysis feature at all.
|
||||||
|
|
||||||
PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
|
$(shell sh -c 'mkdir -p $(OUTPUT)scripts/python/Perf-Trace-Util/' 2> /dev/null)
|
||||||
@$(SHELL_PATH) util/PERF-VERSION-GEN
|
$(shell sh -c 'mkdir -p $(OUTPUT)scripts/perl/Perf-Trace-Util/' 2> /dev/null)
|
||||||
-include PERF-VERSION-FILE
|
$(shell sh -c 'mkdir -p $(OUTPUT)util/scripting-engines/' 2> /dev/null)
|
||||||
|
$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null)
|
||||||
|
|
||||||
|
$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
|
||||||
|
@$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
|
||||||
|
-include $(OUTPUT)PERF-VERSION-FILE
|
||||||
|
|
||||||
uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not')
|
uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not')
|
||||||
uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
|
uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
|
||||||
|
@ -308,7 +319,7 @@ PROGRAMS += $(EXTRA_PROGRAMS)
|
||||||
#
|
#
|
||||||
# Single 'perf' binary right now:
|
# Single 'perf' binary right now:
|
||||||
#
|
#
|
||||||
PROGRAMS += perf
|
PROGRAMS += $(OUTPUT)perf
|
||||||
|
|
||||||
# List built-in command $C whose implementation cmd_$C() is not in
|
# List built-in command $C whose implementation cmd_$C() is not in
|
||||||
# builtin-$C.o but is linked in as part of some other command.
|
# builtin-$C.o but is linked in as part of some other command.
|
||||||
|
@ -318,7 +329,7 @@ PROGRAMS += perf
|
||||||
ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
|
ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
|
||||||
|
|
||||||
# what 'all' will build but not install in perfexecdir
|
# what 'all' will build but not install in perfexecdir
|
||||||
OTHER_PROGRAMS = perf$X
|
OTHER_PROGRAMS = $(OUTPUT)perf$X
|
||||||
|
|
||||||
# Set paths to tools early so that they can be used for version tests.
|
# Set paths to tools early so that they can be used for version tests.
|
||||||
ifndef SHELL_PATH
|
ifndef SHELL_PATH
|
||||||
|
@ -330,7 +341,7 @@ endif
|
||||||
|
|
||||||
export PERL_PATH
|
export PERL_PATH
|
||||||
|
|
||||||
LIB_FILE=libperf.a
|
LIB_FILE=$(OUTPUT)libperf.a
|
||||||
|
|
||||||
LIB_H += ../../include/linux/perf_event.h
|
LIB_H += ../../include/linux/perf_event.h
|
||||||
LIB_H += ../../include/linux/rbtree.h
|
LIB_H += ../../include/linux/rbtree.h
|
||||||
|
@ -375,7 +386,6 @@ LIB_H += util/header.h
|
||||||
LIB_H += util/help.h
|
LIB_H += util/help.h
|
||||||
LIB_H += util/session.h
|
LIB_H += util/session.h
|
||||||
LIB_H += util/strbuf.h
|
LIB_H += util/strbuf.h
|
||||||
LIB_H += util/string.h
|
|
||||||
LIB_H += util/strlist.h
|
LIB_H += util/strlist.h
|
||||||
LIB_H += util/svghelper.h
|
LIB_H += util/svghelper.h
|
||||||
LIB_H += util/run-command.h
|
LIB_H += util/run-command.h
|
||||||
|
@ -391,77 +401,78 @@ LIB_H += util/probe-finder.h
|
||||||
LIB_H += util/probe-event.h
|
LIB_H += util/probe-event.h
|
||||||
LIB_H += util/cpumap.h
|
LIB_H += util/cpumap.h
|
||||||
|
|
||||||
LIB_OBJS += util/abspath.o
|
LIB_OBJS += $(OUTPUT)util/abspath.o
|
||||||
LIB_OBJS += util/alias.o
|
LIB_OBJS += $(OUTPUT)util/alias.o
|
||||||
LIB_OBJS += util/build-id.o
|
LIB_OBJS += $(OUTPUT)util/build-id.o
|
||||||
LIB_OBJS += util/config.o
|
LIB_OBJS += $(OUTPUT)util/config.o
|
||||||
LIB_OBJS += util/ctype.o
|
LIB_OBJS += $(OUTPUT)util/ctype.o
|
||||||
LIB_OBJS += util/debugfs.o
|
LIB_OBJS += $(OUTPUT)util/debugfs.o
|
||||||
LIB_OBJS += util/environment.o
|
LIB_OBJS += $(OUTPUT)util/environment.o
|
||||||
LIB_OBJS += util/event.o
|
LIB_OBJS += $(OUTPUT)util/event.o
|
||||||
LIB_OBJS += util/exec_cmd.o
|
LIB_OBJS += $(OUTPUT)util/exec_cmd.o
|
||||||
LIB_OBJS += util/help.o
|
LIB_OBJS += $(OUTPUT)util/help.o
|
||||||
LIB_OBJS += util/levenshtein.o
|
LIB_OBJS += $(OUTPUT)util/levenshtein.o
|
||||||
LIB_OBJS += util/parse-options.o
|
LIB_OBJS += $(OUTPUT)util/parse-options.o
|
||||||
LIB_OBJS += util/parse-events.o
|
LIB_OBJS += $(OUTPUT)util/parse-events.o
|
||||||
LIB_OBJS += util/path.o
|
LIB_OBJS += $(OUTPUT)util/path.o
|
||||||
LIB_OBJS += util/rbtree.o
|
LIB_OBJS += $(OUTPUT)util/rbtree.o
|
||||||
LIB_OBJS += util/bitmap.o
|
LIB_OBJS += $(OUTPUT)util/bitmap.o
|
||||||
LIB_OBJS += util/hweight.o
|
LIB_OBJS += $(OUTPUT)util/hweight.o
|
||||||
LIB_OBJS += util/find_next_bit.o
|
LIB_OBJS += $(OUTPUT)util/find_next_bit.o
|
||||||
LIB_OBJS += util/run-command.o
|
LIB_OBJS += $(OUTPUT)util/run-command.o
|
||||||
LIB_OBJS += util/quote.o
|
LIB_OBJS += $(OUTPUT)util/quote.o
|
||||||
LIB_OBJS += util/strbuf.o
|
LIB_OBJS += $(OUTPUT)util/strbuf.o
|
||||||
LIB_OBJS += util/string.o
|
LIB_OBJS += $(OUTPUT)util/string.o
|
||||||
LIB_OBJS += util/strlist.o
|
LIB_OBJS += $(OUTPUT)util/strlist.o
|
||||||
LIB_OBJS += util/usage.o
|
LIB_OBJS += $(OUTPUT)util/usage.o
|
||||||
LIB_OBJS += util/wrapper.o
|
LIB_OBJS += $(OUTPUT)util/wrapper.o
|
||||||
LIB_OBJS += util/sigchain.o
|
LIB_OBJS += $(OUTPUT)util/sigchain.o
|
||||||
LIB_OBJS += util/symbol.o
|
LIB_OBJS += $(OUTPUT)util/symbol.o
|
||||||
LIB_OBJS += util/color.o
|
LIB_OBJS += $(OUTPUT)util/color.o
|
||||||
LIB_OBJS += util/pager.o
|
LIB_OBJS += $(OUTPUT)util/pager.o
|
||||||
LIB_OBJS += util/header.o
|
LIB_OBJS += $(OUTPUT)util/header.o
|
||||||
LIB_OBJS += util/callchain.o
|
LIB_OBJS += $(OUTPUT)util/callchain.o
|
||||||
LIB_OBJS += util/values.o
|
LIB_OBJS += $(OUTPUT)util/values.o
|
||||||
LIB_OBJS += util/debug.o
|
LIB_OBJS += $(OUTPUT)util/debug.o
|
||||||
LIB_OBJS += util/map.o
|
LIB_OBJS += $(OUTPUT)util/map.o
|
||||||
LIB_OBJS += util/session.o
|
LIB_OBJS += $(OUTPUT)util/session.o
|
||||||
LIB_OBJS += util/thread.o
|
LIB_OBJS += $(OUTPUT)util/thread.o
|
||||||
LIB_OBJS += util/trace-event-parse.o
|
LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
|
||||||
LIB_OBJS += util/trace-event-read.o
|
LIB_OBJS += $(OUTPUT)util/trace-event-read.o
|
||||||
LIB_OBJS += util/trace-event-info.o
|
LIB_OBJS += $(OUTPUT)util/trace-event-info.o
|
||||||
LIB_OBJS += util/trace-event-scripting.o
|
LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
|
||||||
LIB_OBJS += util/svghelper.o
|
LIB_OBJS += $(OUTPUT)util/svghelper.o
|
||||||
LIB_OBJS += util/sort.o
|
LIB_OBJS += $(OUTPUT)util/sort.o
|
||||||
LIB_OBJS += util/hist.o
|
LIB_OBJS += $(OUTPUT)util/hist.o
|
||||||
LIB_OBJS += util/probe-event.o
|
LIB_OBJS += $(OUTPUT)util/probe-event.o
|
||||||
LIB_OBJS += util/util.o
|
LIB_OBJS += $(OUTPUT)util/util.o
|
||||||
LIB_OBJS += util/cpumap.o
|
LIB_OBJS += $(OUTPUT)util/cpumap.o
|
||||||
|
|
||||||
BUILTIN_OBJS += builtin-annotate.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
|
||||||
|
|
||||||
BUILTIN_OBJS += builtin-bench.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
|
||||||
|
|
||||||
# Benchmark modules
|
# Benchmark modules
|
||||||
BUILTIN_OBJS += bench/sched-messaging.o
|
BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
|
||||||
BUILTIN_OBJS += bench/sched-pipe.o
|
BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
|
||||||
BUILTIN_OBJS += bench/mem-memcpy.o
|
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
|
||||||
|
|
||||||
BUILTIN_OBJS += builtin-diff.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
|
||||||
BUILTIN_OBJS += builtin-help.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-help.o
|
||||||
BUILTIN_OBJS += builtin-sched.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-sched.o
|
||||||
BUILTIN_OBJS += builtin-buildid-list.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o
|
||||||
BUILTIN_OBJS += builtin-buildid-cache.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o
|
||||||
BUILTIN_OBJS += builtin-list.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-list.o
|
||||||
BUILTIN_OBJS += builtin-record.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-record.o
|
||||||
BUILTIN_OBJS += builtin-report.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-report.o
|
||||||
BUILTIN_OBJS += builtin-stat.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-stat.o
|
||||||
BUILTIN_OBJS += builtin-timechart.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o
|
||||||
BUILTIN_OBJS += builtin-top.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-top.o
|
||||||
BUILTIN_OBJS += builtin-trace.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
|
||||||
BUILTIN_OBJS += builtin-probe.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
|
||||||
BUILTIN_OBJS += builtin-kmem.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
|
||||||
BUILTIN_OBJS += builtin-lock.o
|
BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
|
||||||
|
BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
|
||||||
|
|
||||||
PERFLIBS = $(LIB_FILE)
|
PERFLIBS = $(LIB_FILE)
|
||||||
|
|
||||||
|
@ -492,6 +503,10 @@ ifeq ($(uname_S),Darwin)
|
||||||
PTHREAD_LIBS =
|
PTHREAD_LIBS =
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq ($(OUTPUT),)
|
||||||
|
BASIC_CFLAGS += -I$(OUTPUT)
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
|
ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
|
||||||
ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
|
ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
|
||||||
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
|
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
|
||||||
|
@ -506,11 +521,20 @@ endif
|
||||||
|
|
||||||
ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
|
ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
|
||||||
msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev);
|
msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev);
|
||||||
BASIC_CFLAGS += -DNO_DWARF_SUPPORT
|
|
||||||
else
|
else
|
||||||
BASIC_CFLAGS += -I/usr/include/elfutils
|
ifndef NO_DWARF
|
||||||
|
BASIC_CFLAGS += -I/usr/include/elfutils -DDWARF_SUPPORT
|
||||||
EXTLIBS += -lelf -ldw
|
EXTLIBS += -lelf -ldw
|
||||||
LIB_OBJS += util/probe-finder.o
|
LIB_OBJS += $(OUTPUT)util/probe-finder.o
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq ($(shell sh -c "(echo '\#include <newt.h>'; echo 'int main(void) { newtInit(); newtCls(); return newtFinished(); }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -lnewt -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
|
||||||
|
msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev);
|
||||||
|
BASIC_CFLAGS += -DNO_NEWT_SUPPORT
|
||||||
|
else
|
||||||
|
EXTLIBS += -lnewt
|
||||||
|
LIB_OBJS += $(OUTPUT)util/newt.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef NO_LIBPERL
|
ifndef NO_LIBPERL
|
||||||
|
@ -522,8 +546,8 @@ ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; e
|
||||||
BASIC_CFLAGS += -DNO_LIBPERL
|
BASIC_CFLAGS += -DNO_LIBPERL
|
||||||
else
|
else
|
||||||
ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
|
ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
|
||||||
LIB_OBJS += util/scripting-engines/trace-event-perl.o
|
LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
|
||||||
LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o
|
LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef NO_LIBPYTHON
|
ifndef NO_LIBPYTHON
|
||||||
|
@ -531,12 +555,12 @@ PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
|
||||||
PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
|
PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o /dev/null $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
|
ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o $(BITBUCKET) $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
|
||||||
BASIC_CFLAGS += -DNO_LIBPYTHON
|
BASIC_CFLAGS += -DNO_LIBPYTHON
|
||||||
else
|
else
|
||||||
ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
|
ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
|
||||||
LIB_OBJS += util/scripting-engines/trace-event-python.o
|
LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
|
||||||
LIB_OBJS += scripts/python/Perf-Trace-Util/Context.o
|
LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef NO_DEMANGLE
|
ifdef NO_DEMANGLE
|
||||||
|
@ -607,53 +631,53 @@ ifdef NO_C99_FORMAT
|
||||||
endif
|
endif
|
||||||
ifdef SNPRINTF_RETURNS_BOGUS
|
ifdef SNPRINTF_RETURNS_BOGUS
|
||||||
COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS
|
COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS
|
||||||
COMPAT_OBJS += compat/snprintf.o
|
COMPAT_OBJS += $(OUTPUT)compat/snprintf.o
|
||||||
endif
|
endif
|
||||||
ifdef FREAD_READS_DIRECTORIES
|
ifdef FREAD_READS_DIRECTORIES
|
||||||
COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES
|
COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES
|
||||||
COMPAT_OBJS += compat/fopen.o
|
COMPAT_OBJS += $(OUTPUT)compat/fopen.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_SYMLINK_HEAD
|
ifdef NO_SYMLINK_HEAD
|
||||||
BASIC_CFLAGS += -DNO_SYMLINK_HEAD
|
BASIC_CFLAGS += -DNO_SYMLINK_HEAD
|
||||||
endif
|
endif
|
||||||
ifdef NO_STRCASESTR
|
ifdef NO_STRCASESTR
|
||||||
COMPAT_CFLAGS += -DNO_STRCASESTR
|
COMPAT_CFLAGS += -DNO_STRCASESTR
|
||||||
COMPAT_OBJS += compat/strcasestr.o
|
COMPAT_OBJS += $(OUTPUT)compat/strcasestr.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_STRTOUMAX
|
ifdef NO_STRTOUMAX
|
||||||
COMPAT_CFLAGS += -DNO_STRTOUMAX
|
COMPAT_CFLAGS += -DNO_STRTOUMAX
|
||||||
COMPAT_OBJS += compat/strtoumax.o
|
COMPAT_OBJS += $(OUTPUT)compat/strtoumax.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_STRTOULL
|
ifdef NO_STRTOULL
|
||||||
COMPAT_CFLAGS += -DNO_STRTOULL
|
COMPAT_CFLAGS += -DNO_STRTOULL
|
||||||
endif
|
endif
|
||||||
ifdef NO_SETENV
|
ifdef NO_SETENV
|
||||||
COMPAT_CFLAGS += -DNO_SETENV
|
COMPAT_CFLAGS += -DNO_SETENV
|
||||||
COMPAT_OBJS += compat/setenv.o
|
COMPAT_OBJS += $(OUTPUT)compat/setenv.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_MKDTEMP
|
ifdef NO_MKDTEMP
|
||||||
COMPAT_CFLAGS += -DNO_MKDTEMP
|
COMPAT_CFLAGS += -DNO_MKDTEMP
|
||||||
COMPAT_OBJS += compat/mkdtemp.o
|
COMPAT_OBJS += $(OUTPUT)compat/mkdtemp.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_UNSETENV
|
ifdef NO_UNSETENV
|
||||||
COMPAT_CFLAGS += -DNO_UNSETENV
|
COMPAT_CFLAGS += -DNO_UNSETENV
|
||||||
COMPAT_OBJS += compat/unsetenv.o
|
COMPAT_OBJS += $(OUTPUT)compat/unsetenv.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_SYS_SELECT_H
|
ifdef NO_SYS_SELECT_H
|
||||||
BASIC_CFLAGS += -DNO_SYS_SELECT_H
|
BASIC_CFLAGS += -DNO_SYS_SELECT_H
|
||||||
endif
|
endif
|
||||||
ifdef NO_MMAP
|
ifdef NO_MMAP
|
||||||
COMPAT_CFLAGS += -DNO_MMAP
|
COMPAT_CFLAGS += -DNO_MMAP
|
||||||
COMPAT_OBJS += compat/mmap.o
|
COMPAT_OBJS += $(OUTPUT)compat/mmap.o
|
||||||
else
|
else
|
||||||
ifdef USE_WIN32_MMAP
|
ifdef USE_WIN32_MMAP
|
||||||
COMPAT_CFLAGS += -DUSE_WIN32_MMAP
|
COMPAT_CFLAGS += -DUSE_WIN32_MMAP
|
||||||
COMPAT_OBJS += compat/win32mmap.o
|
COMPAT_OBJS += $(OUTPUT)compat/win32mmap.o
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
ifdef NO_PREAD
|
ifdef NO_PREAD
|
||||||
COMPAT_CFLAGS += -DNO_PREAD
|
COMPAT_CFLAGS += -DNO_PREAD
|
||||||
COMPAT_OBJS += compat/pread.o
|
COMPAT_OBJS += $(OUTPUT)compat/pread.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_FAST_WORKING_DIRECTORY
|
ifdef NO_FAST_WORKING_DIRECTORY
|
||||||
BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY
|
BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY
|
||||||
|
@ -675,10 +699,10 @@ else
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
ifdef NO_INET_NTOP
|
ifdef NO_INET_NTOP
|
||||||
LIB_OBJS += compat/inet_ntop.o
|
LIB_OBJS += $(OUTPUT)compat/inet_ntop.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_INET_PTON
|
ifdef NO_INET_PTON
|
||||||
LIB_OBJS += compat/inet_pton.o
|
LIB_OBJS += $(OUTPUT)compat/inet_pton.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef NO_ICONV
|
ifdef NO_ICONV
|
||||||
|
@ -695,15 +719,15 @@ endif
|
||||||
|
|
||||||
ifdef PPC_SHA1
|
ifdef PPC_SHA1
|
||||||
SHA1_HEADER = "ppc/sha1.h"
|
SHA1_HEADER = "ppc/sha1.h"
|
||||||
LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o
|
LIB_OBJS += $(OUTPUT)ppc/sha1.o ppc/sha1ppc.o
|
||||||
else
|
else
|
||||||
ifdef ARM_SHA1
|
ifdef ARM_SHA1
|
||||||
SHA1_HEADER = "arm/sha1.h"
|
SHA1_HEADER = "arm/sha1.h"
|
||||||
LIB_OBJS += arm/sha1.o arm/sha1_arm.o
|
LIB_OBJS += $(OUTPUT)arm/sha1.o $(OUTPUT)arm/sha1_arm.o
|
||||||
else
|
else
|
||||||
ifdef MOZILLA_SHA1
|
ifdef MOZILLA_SHA1
|
||||||
SHA1_HEADER = "mozilla-sha1/sha1.h"
|
SHA1_HEADER = "mozilla-sha1/sha1.h"
|
||||||
LIB_OBJS += mozilla-sha1/sha1.o
|
LIB_OBJS += $(OUTPUT)mozilla-sha1/sha1.o
|
||||||
else
|
else
|
||||||
SHA1_HEADER = <openssl/sha.h>
|
SHA1_HEADER = <openssl/sha.h>
|
||||||
EXTLIBS += $(LIB_4_CRYPTO)
|
EXTLIBS += $(LIB_4_CRYPTO)
|
||||||
|
@ -715,15 +739,15 @@ ifdef NO_PERL_MAKEMAKER
|
||||||
endif
|
endif
|
||||||
ifdef NO_HSTRERROR
|
ifdef NO_HSTRERROR
|
||||||
COMPAT_CFLAGS += -DNO_HSTRERROR
|
COMPAT_CFLAGS += -DNO_HSTRERROR
|
||||||
COMPAT_OBJS += compat/hstrerror.o
|
COMPAT_OBJS += $(OUTPUT)compat/hstrerror.o
|
||||||
endif
|
endif
|
||||||
ifdef NO_MEMMEM
|
ifdef NO_MEMMEM
|
||||||
COMPAT_CFLAGS += -DNO_MEMMEM
|
COMPAT_CFLAGS += -DNO_MEMMEM
|
||||||
COMPAT_OBJS += compat/memmem.o
|
COMPAT_OBJS += $(OUTPUT)compat/memmem.o
|
||||||
endif
|
endif
|
||||||
ifdef INTERNAL_QSORT
|
ifdef INTERNAL_QSORT
|
||||||
COMPAT_CFLAGS += -DINTERNAL_QSORT
|
COMPAT_CFLAGS += -DINTERNAL_QSORT
|
||||||
COMPAT_OBJS += compat/qsort.o
|
COMPAT_OBJS += $(OUTPUT)compat/qsort.o
|
||||||
endif
|
endif
|
||||||
ifdef RUNTIME_PREFIX
|
ifdef RUNTIME_PREFIX
|
||||||
COMPAT_CFLAGS += -DRUNTIME_PREFIX
|
COMPAT_CFLAGS += -DRUNTIME_PREFIX
|
||||||
|
@ -803,7 +827,7 @@ export TAR INSTALL DESTDIR SHELL_PATH
|
||||||
|
|
||||||
SHELL = $(SHELL_PATH)
|
SHELL = $(SHELL_PATH)
|
||||||
|
|
||||||
all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS
|
all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) $(OUTPUT)PERF-BUILD-OPTIONS
|
||||||
ifneq (,$X)
|
ifneq (,$X)
|
||||||
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
|
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
|
||||||
endif
|
endif
|
||||||
|
@ -815,39 +839,39 @@ please_set_SHELL_PATH_to_a_more_modern_shell:
|
||||||
|
|
||||||
shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
|
shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
|
||||||
|
|
||||||
strip: $(PROGRAMS) perf$X
|
strip: $(PROGRAMS) $(OUTPUT)perf$X
|
||||||
$(STRIP) $(STRIP_OPTS) $(PROGRAMS) perf$X
|
$(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf$X
|
||||||
|
|
||||||
perf.o: perf.c common-cmds.h PERF-CFLAGS
|
$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \
|
$(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \
|
||||||
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
|
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
|
||||||
$(ALL_CFLAGS) -c $(filter %.c,$^)
|
$(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
|
||||||
|
|
||||||
perf$X: perf.o $(BUILTIN_OBJS) $(PERFLIBS)
|
$(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
|
||||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ perf.o \
|
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(OUTPUT)perf.o \
|
||||||
$(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS)
|
$(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS)
|
||||||
|
|
||||||
builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS
|
$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
|
||||||
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
|
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
|
||||||
'-DPERF_MAN_PATH="$(mandir_SQ)"' \
|
'-DPERF_MAN_PATH="$(mandir_SQ)"' \
|
||||||
'-DPERF_INFO_PATH="$(infodir_SQ)"' $<
|
'-DPERF_INFO_PATH="$(infodir_SQ)"' $<
|
||||||
|
|
||||||
builtin-timechart.o: builtin-timechart.c common-cmds.h PERF-CFLAGS
|
$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
|
||||||
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
|
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
|
||||||
'-DPERF_MAN_PATH="$(mandir_SQ)"' \
|
'-DPERF_MAN_PATH="$(mandir_SQ)"' \
|
||||||
'-DPERF_INFO_PATH="$(infodir_SQ)"' $<
|
'-DPERF_INFO_PATH="$(infodir_SQ)"' $<
|
||||||
|
|
||||||
$(BUILT_INS): perf$X
|
$(BUILT_INS): $(OUTPUT)perf$X
|
||||||
$(QUIET_BUILT_IN)$(RM) $@ && \
|
$(QUIET_BUILT_IN)$(RM) $@ && \
|
||||||
ln perf$X $@ 2>/dev/null || \
|
ln perf$X $@ 2>/dev/null || \
|
||||||
ln -s perf$X $@ 2>/dev/null || \
|
ln -s perf$X $@ 2>/dev/null || \
|
||||||
cp perf$X $@
|
cp perf$X $@
|
||||||
|
|
||||||
common-cmds.h: util/generate-cmdlist.sh command-list.txt
|
$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
|
||||||
|
|
||||||
common-cmds.h: $(wildcard Documentation/perf-*.txt)
|
$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
|
||||||
$(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
|
$(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
|
||||||
|
|
||||||
$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
|
$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
|
||||||
|
@ -859,7 +883,7 @@ $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
|
||||||
-e 's/@@NO_CURL@@/$(NO_CURL)/g' \
|
-e 's/@@NO_CURL@@/$(NO_CURL)/g' \
|
||||||
$@.sh >$@+ && \
|
$@.sh >$@+ && \
|
||||||
chmod +x $@+ && \
|
chmod +x $@+ && \
|
||||||
mv $@+ $@
|
mv $@+ $(OUTPUT)$@
|
||||||
|
|
||||||
configure: configure.ac
|
configure: configure.ac
|
||||||
$(QUIET_GEN)$(RM) $@ $<+ && \
|
$(QUIET_GEN)$(RM) $@ $<+ && \
|
||||||
|
@ -869,60 +893,60 @@ configure: configure.ac
|
||||||
$(RM) $<+
|
$(RM) $<+
|
||||||
|
|
||||||
# These can record PERF_VERSION
|
# These can record PERF_VERSION
|
||||||
perf.o perf.spec \
|
$(OUTPUT)perf.o perf.spec \
|
||||||
$(patsubst %.sh,%,$(SCRIPT_SH)) \
|
$(patsubst %.sh,%,$(SCRIPT_SH)) \
|
||||||
$(patsubst %.perl,%,$(SCRIPT_PERL)) \
|
$(patsubst %.perl,%,$(SCRIPT_PERL)) \
|
||||||
: PERF-VERSION-FILE
|
: $(OUTPUT)PERF-VERSION-FILE
|
||||||
|
|
||||||
%.o: %.c PERF-CFLAGS
|
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
|
||||||
%.s: %.c PERF-CFLAGS
|
$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
|
$(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
|
||||||
%.o: %.S
|
$(OUTPUT)%.o: %.S
|
||||||
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
|
||||||
|
|
||||||
util/exec_cmd.o: util/exec_cmd.c PERF-CFLAGS
|
$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
|
||||||
'-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
|
'-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
|
||||||
'-DBINDIR="$(bindir_relative_SQ)"' \
|
'-DBINDIR="$(bindir_relative_SQ)"' \
|
||||||
'-DPREFIX="$(prefix_SQ)"' \
|
'-DPREFIX="$(prefix_SQ)"' \
|
||||||
$<
|
$<
|
||||||
|
|
||||||
builtin-init-db.o: builtin-init-db.c PERF-CFLAGS
|
$(OUTPUT)builtin-init-db.o: builtin-init-db.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $<
|
||||||
|
|
||||||
util/config.o: util/config.c PERF-CFLAGS
|
$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||||
|
|
||||||
util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS
|
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||||
|
|
||||||
# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing
|
# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing
|
||||||
# from <string.h> that comes from kernel headers wrapping.
|
# from <string.h> that comes from kernel headers wrapping.
|
||||||
KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//`
|
KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//`
|
||||||
|
|
||||||
util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS
|
$(OUTPUT)util/bitmap.o: ../../lib/bitmap.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
$(QUIET_CC)$(CC) -o $@ -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||||
|
|
||||||
util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
|
$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||||
|
|
||||||
util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
|
$(OUTPUT)util/find_next_bit.o: ../../lib/find_next_bit.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||||
|
|
||||||
util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c PERF-CFLAGS
|
$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
|
||||||
|
|
||||||
scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS
|
$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
|
||||||
|
|
||||||
util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c PERF-CFLAGS
|
$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-python.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
|
||||||
|
|
||||||
scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c PERF-CFLAGS
|
$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
|
||||||
$(QUIET_CC)$(CC) -o scripts/python/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
|
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
|
||||||
|
|
||||||
perf-%$X: %.o $(PERFLIBS)
|
$(OUTPUT)perf-%$X: %.o $(PERFLIBS)
|
||||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
|
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
|
||||||
|
|
||||||
$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
|
$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
|
||||||
|
@ -963,17 +987,17 @@ cscope:
|
||||||
TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\
|
TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\
|
||||||
$(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
|
$(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
|
||||||
|
|
||||||
PERF-CFLAGS: .FORCE-PERF-CFLAGS
|
$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
|
||||||
@FLAGS='$(TRACK_CFLAGS)'; \
|
@FLAGS='$(TRACK_CFLAGS)'; \
|
||||||
if test x"$$FLAGS" != x"`cat PERF-CFLAGS 2>/dev/null`" ; then \
|
if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \
|
||||||
echo 1>&2 " * new build flags or prefix"; \
|
echo 1>&2 " * new build flags or prefix"; \
|
||||||
echo "$$FLAGS" >PERF-CFLAGS; \
|
echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# We need to apply sq twice, once to protect from the shell
|
# We need to apply sq twice, once to protect from the shell
|
||||||
# that runs PERF-BUILD-OPTIONS, and then again to protect it
|
# that runs $(OUTPUT)PERF-BUILD-OPTIONS, and then again to protect it
|
||||||
# and the first level quoting from the shell that runs "echo".
|
# and the first level quoting from the shell that runs "echo".
|
||||||
PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS
|
$(OUTPUT)PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS
|
||||||
@echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@
|
@echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@
|
||||||
@echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@
|
@echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@
|
||||||
@echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@
|
@echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@
|
||||||
|
@ -994,7 +1018,7 @@ all:: $(TEST_PROGRAMS)
|
||||||
|
|
||||||
export NO_SVN_TESTS
|
export NO_SVN_TESTS
|
||||||
|
|
||||||
check: common-cmds.h
|
check: $(OUTPUT)common-cmds.h
|
||||||
if sparse; \
|
if sparse; \
|
||||||
then \
|
then \
|
||||||
for i in *.c */*.c; \
|
for i in *.c */*.c; \
|
||||||
|
@ -1028,10 +1052,10 @@ export perfexec_instdir
|
||||||
|
|
||||||
install: all
|
install: all
|
||||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
|
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
|
||||||
$(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
|
$(INSTALL) $(OUTPUT)perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
|
||||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
|
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
|
||||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
|
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
|
||||||
$(INSTALL) perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
||||||
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
|
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
|
||||||
$(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
|
$(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
|
||||||
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
|
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
|
||||||
|
@ -1045,7 +1069,7 @@ ifdef BUILT_INS
|
||||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
||||||
$(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
$(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
||||||
ifneq (,$X)
|
ifneq (,$X)
|
||||||
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';)
|
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) $(OUTPUT)perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';)
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -1129,14 +1153,14 @@ clean:
|
||||||
$(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE)
|
$(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE)
|
||||||
$(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X
|
$(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X
|
||||||
$(RM) $(TEST_PROGRAMS)
|
$(RM) $(TEST_PROGRAMS)
|
||||||
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope*
|
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
|
||||||
$(RM) -r autom4te.cache
|
$(RM) -r autom4te.cache
|
||||||
$(RM) config.log config.mak.autogen config.mak.append config.status config.cache
|
$(RM) config.log config.mak.autogen config.mak.append config.status config.cache
|
||||||
$(RM) -r $(PERF_TARNAME) .doc-tmp-dir
|
$(RM) -r $(PERF_TARNAME) .doc-tmp-dir
|
||||||
$(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz
|
$(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz
|
||||||
$(RM) $(htmldocs).tar.gz $(manpages).tar.gz
|
$(RM) $(htmldocs).tar.gz $(manpages).tar.gz
|
||||||
$(MAKE) -C Documentation/ clean
|
$(MAKE) -C Documentation/ clean
|
||||||
$(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS
|
$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-BUILD-OPTIONS
|
||||||
|
|
||||||
.PHONY: all install clean strip
|
.PHONY: all install clean strip
|
||||||
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
|
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
#include "../perf.h"
|
#include "../perf.h"
|
||||||
#include "../util/util.h"
|
#include "../util/util.h"
|
||||||
#include "../util/parse-options.h"
|
#include "../util/parse-options.h"
|
||||||
#include "../util/string.h"
|
|
||||||
#include "../util/header.h"
|
#include "../util/header.h"
|
||||||
#include "bench.h"
|
#include "bench.h"
|
||||||
|
|
||||||
|
@ -24,7 +23,7 @@
|
||||||
|
|
||||||
static const char *length_str = "1MB";
|
static const char *length_str = "1MB";
|
||||||
static const char *routine = "default";
|
static const char *routine = "default";
|
||||||
static int use_clock = 0;
|
static bool use_clock = false;
|
||||||
static int clock_fd;
|
static int clock_fd;
|
||||||
|
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
|
|
|
@ -31,9 +31,9 @@
|
||||||
|
|
||||||
#define DATASIZE 100
|
#define DATASIZE 100
|
||||||
|
|
||||||
static int use_pipes = 0;
|
static bool use_pipes = false;
|
||||||
static unsigned int loops = 100;
|
static unsigned int loops = 100;
|
||||||
static unsigned int thread_mode = 0;
|
static bool thread_mode = false;
|
||||||
static unsigned int num_groups = 10;
|
static unsigned int num_groups = 10;
|
||||||
|
|
||||||
struct sender_context {
|
struct sender_context {
|
||||||
|
|
|
@ -93,7 +93,7 @@ int bench_sched_pipe(int argc, const char **argv,
|
||||||
|
|
||||||
switch (bench_format) {
|
switch (bench_format) {
|
||||||
case BENCH_FORMAT_DEFAULT:
|
case BENCH_FORMAT_DEFAULT:
|
||||||
printf("# Extecuted %d pipe operations between two tasks\n\n",
|
printf("# Executed %d pipe operations between two tasks\n\n",
|
||||||
loops);
|
loops);
|
||||||
|
|
||||||
result_usec = diff.tv_sec * 1000000;
|
result_usec = diff.tv_sec * 1000000;
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
#include "util/cache.h"
|
#include "util/cache.h"
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include "util/symbol.h"
|
#include "util/symbol.h"
|
||||||
#include "util/string.h"
|
|
||||||
|
|
||||||
#include "perf.h"
|
#include "perf.h"
|
||||||
#include "util/debug.h"
|
#include "util/debug.h"
|
||||||
|
@ -29,11 +28,11 @@
|
||||||
|
|
||||||
static char const *input_name = "perf.data";
|
static char const *input_name = "perf.data";
|
||||||
|
|
||||||
static int force;
|
static bool force;
|
||||||
|
|
||||||
static int full_paths;
|
static bool full_paths;
|
||||||
|
|
||||||
static int print_line;
|
static bool print_line;
|
||||||
|
|
||||||
struct sym_hist {
|
struct sym_hist {
|
||||||
u64 sum;
|
u64 sum;
|
||||||
|
@ -69,13 +68,13 @@ static int sym__alloc_hist(struct symbol *self)
|
||||||
static int annotate__hist_hit(struct hist_entry *he, u64 ip)
|
static int annotate__hist_hit(struct hist_entry *he, u64 ip)
|
||||||
{
|
{
|
||||||
unsigned int sym_size, offset;
|
unsigned int sym_size, offset;
|
||||||
struct symbol *sym = he->sym;
|
struct symbol *sym = he->ms.sym;
|
||||||
struct sym_priv *priv;
|
struct sym_priv *priv;
|
||||||
struct sym_hist *h;
|
struct sym_hist *h;
|
||||||
|
|
||||||
he->count++;
|
he->count++;
|
||||||
|
|
||||||
if (!sym || !he->map)
|
if (!sym || !he->ms.map)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
priv = symbol__priv(sym);
|
priv = symbol__priv(sym);
|
||||||
|
@ -85,7 +84,7 @@ static int annotate__hist_hit(struct hist_entry *he, u64 ip)
|
||||||
sym_size = sym->end - sym->start;
|
sym_size = sym->end - sym->start;
|
||||||
offset = ip - sym->start;
|
offset = ip - sym->start;
|
||||||
|
|
||||||
pr_debug3("%s: ip=%#Lx\n", __func__, he->map->unmap_ip(he->map, ip));
|
pr_debug3("%s: ip=%#Lx\n", __func__, he->ms.map->unmap_ip(he->ms.map, ip));
|
||||||
|
|
||||||
if (offset >= sym_size)
|
if (offset >= sym_size)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -94,8 +93,8 @@ static int annotate__hist_hit(struct hist_entry *he, u64 ip)
|
||||||
h->sum++;
|
h->sum++;
|
||||||
h->ip[offset]++;
|
h->ip[offset]++;
|
||||||
|
|
||||||
pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->sym->start,
|
pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->ms.sym->start,
|
||||||
he->sym->name, ip, ip - he->sym->start, h->ip[offset]);
|
he->ms.sym->name, ip, ip - he->ms.sym->start, h->ip[offset]);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,7 +186,7 @@ static struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
|
||||||
static int parse_line(FILE *file, struct hist_entry *he,
|
static int parse_line(FILE *file, struct hist_entry *he,
|
||||||
struct list_head *head)
|
struct list_head *head)
|
||||||
{
|
{
|
||||||
struct symbol *sym = he->sym;
|
struct symbol *sym = he->ms.sym;
|
||||||
struct objdump_line *objdump_line;
|
struct objdump_line *objdump_line;
|
||||||
char *line = NULL, *tmp, *tmp2;
|
char *line = NULL, *tmp, *tmp2;
|
||||||
size_t line_len;
|
size_t line_len;
|
||||||
|
@ -226,7 +225,7 @@ static int parse_line(FILE *file, struct hist_entry *he,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (line_ip != -1) {
|
if (line_ip != -1) {
|
||||||
u64 start = map__rip_2objdump(he->map, sym->start);
|
u64 start = map__rip_2objdump(he->ms.map, sym->start);
|
||||||
offset = line_ip - start;
|
offset = line_ip - start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,7 +243,7 @@ static int objdump_line__print(struct objdump_line *self,
|
||||||
struct list_head *head,
|
struct list_head *head,
|
||||||
struct hist_entry *he, u64 len)
|
struct hist_entry *he, u64 len)
|
||||||
{
|
{
|
||||||
struct symbol *sym = he->sym;
|
struct symbol *sym = he->ms.sym;
|
||||||
static const char *prev_line;
|
static const char *prev_line;
|
||||||
static const char *prev_color;
|
static const char *prev_color;
|
||||||
|
|
||||||
|
@ -327,7 +326,7 @@ static void insert_source_line(struct sym_ext *sym_ext)
|
||||||
|
|
||||||
static void free_source_line(struct hist_entry *he, int len)
|
static void free_source_line(struct hist_entry *he, int len)
|
||||||
{
|
{
|
||||||
struct sym_priv *priv = symbol__priv(he->sym);
|
struct sym_priv *priv = symbol__priv(he->ms.sym);
|
||||||
struct sym_ext *sym_ext = priv->ext;
|
struct sym_ext *sym_ext = priv->ext;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -346,7 +345,7 @@ static void free_source_line(struct hist_entry *he, int len)
|
||||||
static void
|
static void
|
||||||
get_source_line(struct hist_entry *he, int len, const char *filename)
|
get_source_line(struct hist_entry *he, int len, const char *filename)
|
||||||
{
|
{
|
||||||
struct symbol *sym = he->sym;
|
struct symbol *sym = he->ms.sym;
|
||||||
u64 start;
|
u64 start;
|
||||||
int i;
|
int i;
|
||||||
char cmd[PATH_MAX * 2];
|
char cmd[PATH_MAX * 2];
|
||||||
|
@ -361,7 +360,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename)
|
||||||
if (!priv->ext)
|
if (!priv->ext)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
start = he->map->unmap_ip(he->map, sym->start);
|
start = he->ms.map->unmap_ip(he->ms.map, sym->start);
|
||||||
|
|
||||||
for (i = 0; i < len; i++) {
|
for (i = 0; i < len; i++) {
|
||||||
char *path = NULL;
|
char *path = NULL;
|
||||||
|
@ -425,7 +424,7 @@ static void print_summary(const char *filename)
|
||||||
|
|
||||||
static void hist_entry__print_hits(struct hist_entry *self)
|
static void hist_entry__print_hits(struct hist_entry *self)
|
||||||
{
|
{
|
||||||
struct symbol *sym = self->sym;
|
struct symbol *sym = self->ms.sym;
|
||||||
struct sym_priv *priv = symbol__priv(sym);
|
struct sym_priv *priv = symbol__priv(sym);
|
||||||
struct sym_hist *h = priv->hist;
|
struct sym_hist *h = priv->hist;
|
||||||
u64 len = sym->end - sym->start, offset;
|
u64 len = sym->end - sym->start, offset;
|
||||||
|
@ -439,9 +438,9 @@ static void hist_entry__print_hits(struct hist_entry *self)
|
||||||
|
|
||||||
static void annotate_sym(struct hist_entry *he)
|
static void annotate_sym(struct hist_entry *he)
|
||||||
{
|
{
|
||||||
struct map *map = he->map;
|
struct map *map = he->ms.map;
|
||||||
struct dso *dso = map->dso;
|
struct dso *dso = map->dso;
|
||||||
struct symbol *sym = he->sym;
|
struct symbol *sym = he->ms.sym;
|
||||||
const char *filename = dso->long_name, *d_filename;
|
const char *filename = dso->long_name, *d_filename;
|
||||||
u64 len;
|
u64 len;
|
||||||
char command[PATH_MAX*2];
|
char command[PATH_MAX*2];
|
||||||
|
@ -452,6 +451,16 @@ static void annotate_sym(struct hist_entry *he)
|
||||||
if (!filename)
|
if (!filename)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (dso->origin == DSO__ORIG_KERNEL) {
|
||||||
|
if (dso->annotate_warned)
|
||||||
|
return;
|
||||||
|
dso->annotate_warned = 1;
|
||||||
|
pr_err("Can't annotate %s: No vmlinux file was found in the "
|
||||||
|
"path:\n", sym->name);
|
||||||
|
vmlinux_path__fprintf(stderr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
|
pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
|
||||||
filename, sym->name, map->unmap_ip(map, sym->start),
|
filename, sym->name, map->unmap_ip(map, sym->start),
|
||||||
map->unmap_ip(map, sym->end));
|
map->unmap_ip(map, sym->end));
|
||||||
|
@ -516,17 +525,17 @@ static void perf_session__find_annotations(struct perf_session *self)
|
||||||
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
|
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
|
||||||
struct sym_priv *priv;
|
struct sym_priv *priv;
|
||||||
|
|
||||||
if (he->sym == NULL)
|
if (he->ms.sym == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
priv = symbol__priv(he->sym);
|
priv = symbol__priv(he->ms.sym);
|
||||||
if (priv->hist == NULL)
|
if (priv->hist == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
annotate_sym(he);
|
annotate_sym(he);
|
||||||
/*
|
/*
|
||||||
* Since we have a hist_entry per IP for the same symbol, free
|
* Since we have a hist_entry per IP for the same symbol, free
|
||||||
* he->sym->hist to signal we already processed this symbol.
|
* he->ms.sym->hist to signal we already processed this symbol.
|
||||||
*/
|
*/
|
||||||
free(priv->hist);
|
free(priv->hist);
|
||||||
priv->hist = NULL;
|
priv->hist = NULL;
|
||||||
|
@ -562,7 +571,7 @@ static int __cmd_annotate(void)
|
||||||
perf_session__fprintf(session, stdout);
|
perf_session__fprintf(session, stdout);
|
||||||
|
|
||||||
if (verbose > 2)
|
if (verbose > 2)
|
||||||
dsos__fprintf(stdout);
|
dsos__fprintf(&session->kerninfo_root, stdout);
|
||||||
|
|
||||||
perf_session__collapse_resort(&session->hists);
|
perf_session__collapse_resort(&session->hists);
|
||||||
perf_session__output_resort(&session->hists, session->event_total[0]);
|
perf_session__output_resort(&session->hists, session->event_total[0]);
|
||||||
|
@ -581,10 +590,12 @@ static const char * const annotate_usage[] = {
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
OPT_STRING('i', "input", &input_name, "file",
|
OPT_STRING('i', "input", &input_name, "file",
|
||||||
"input file name"),
|
"input file name"),
|
||||||
|
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
|
||||||
|
"only consider symbols in these dsos"),
|
||||||
OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
|
OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
|
||||||
"symbol to annotate"),
|
"symbol to annotate"),
|
||||||
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show symbol address, etc)"),
|
"be more verbose (show symbol address, etc)"),
|
||||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
||||||
"dump raw trace in ASCII"),
|
"dump raw trace in ASCII"),
|
||||||
|
|
|
@ -27,7 +27,7 @@ static const struct option buildid_cache_options[] = {
|
||||||
"file list", "file(s) to add"),
|
"file list", "file(s) to add"),
|
||||||
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
|
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
|
||||||
"file(s) to remove"),
|
"file(s) to remove"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
|
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#include "util/symbol.h"
|
#include "util/symbol.h"
|
||||||
|
|
||||||
static char const *input_name = "perf.data";
|
static char const *input_name = "perf.data";
|
||||||
static int force;
|
static bool force;
|
||||||
static bool with_hits;
|
static bool with_hits;
|
||||||
|
|
||||||
static const char * const buildid_list_usage[] = {
|
static const char * const buildid_list_usage[] = {
|
||||||
|
@ -29,7 +29,7 @@ static const struct option options[] = {
|
||||||
OPT_STRING('i', "input", &input_name, "file",
|
OPT_STRING('i', "input", &input_name, "file",
|
||||||
"input file name"),
|
"input file name"),
|
||||||
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose"),
|
"be more verbose"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
@ -46,7 +46,7 @@ static int __cmd_buildid_list(void)
|
||||||
if (with_hits)
|
if (with_hits)
|
||||||
perf_session__process_events(session, &build_id__mark_dso_hit_ops);
|
perf_session__process_events(session, &build_id__mark_dso_hit_ops);
|
||||||
|
|
||||||
dsos__fprintf_buildid(stdout, with_hits);
|
dsos__fprintf_buildid(&session->kerninfo_root, stdout, with_hits);
|
||||||
|
|
||||||
perf_session__delete(session);
|
perf_session__delete(session);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
static char const *input_old = "perf.data.old",
|
static char const *input_old = "perf.data.old",
|
||||||
*input_new = "perf.data";
|
*input_new = "perf.data";
|
||||||
static char diff__default_sort_order[] = "dso,symbol";
|
static char diff__default_sort_order[] = "dso,symbol";
|
||||||
static int force;
|
static bool force;
|
||||||
static bool show_displacement;
|
static bool show_displacement;
|
||||||
|
|
||||||
static int perf_session__add_hist_entry(struct perf_session *self,
|
static int perf_session__add_hist_entry(struct perf_session *self,
|
||||||
|
@ -33,7 +33,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (hit)
|
if (hit)
|
||||||
he->count += count;
|
__perf_session__add_count(he, al, count);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ static const char * const diff_usage[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show symbol address, etc)"),
|
"be more verbose (show symbol address, etc)"),
|
||||||
OPT_BOOLEAN('m', "displacement", &show_displacement,
|
OPT_BOOLEAN('m', "displacement", &show_displacement,
|
||||||
"Show position displacement relative to baseline"),
|
"Show position displacement relative to baseline"),
|
||||||
|
@ -225,6 +225,10 @@ int cmd_diff(int argc, const char **argv, const char *prefix __used)
|
||||||
input_new = argv[1];
|
input_new = argv[1];
|
||||||
} else
|
} else
|
||||||
input_new = argv[0];
|
input_new = argv[0];
|
||||||
|
} else if (symbol_conf.default_guest_vmlinux_name ||
|
||||||
|
symbol_conf.default_guest_kallsyms) {
|
||||||
|
input_old = "perf.data.host";
|
||||||
|
input_new = "perf.data.guest";
|
||||||
}
|
}
|
||||||
|
|
||||||
symbol_conf.exclude_other = false;
|
symbol_conf.exclude_other = false;
|
||||||
|
|
|
@ -29,7 +29,7 @@ enum help_format {
|
||||||
HELP_FORMAT_WEB,
|
HELP_FORMAT_WEB,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int show_all = 0;
|
static bool show_all = false;
|
||||||
static enum help_format help_format = HELP_FORMAT_MAN;
|
static enum help_format help_format = HELP_FORMAT_MAN;
|
||||||
static struct option builtin_help_options[] = {
|
static struct option builtin_help_options[] = {
|
||||||
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
|
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
|
||||||
|
|
|
@ -351,6 +351,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
|
||||||
int n_lines, int is_caller)
|
int n_lines, int is_caller)
|
||||||
{
|
{
|
||||||
struct rb_node *next;
|
struct rb_node *next;
|
||||||
|
struct kernel_info *kerninfo;
|
||||||
|
|
||||||
printf("%.102s\n", graph_dotted_line);
|
printf("%.102s\n", graph_dotted_line);
|
||||||
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
|
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
|
||||||
|
@ -359,23 +360,31 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
|
||||||
|
|
||||||
next = rb_first(root);
|
next = rb_first(root);
|
||||||
|
|
||||||
|
kerninfo = kerninfo__findhost(&session->kerninfo_root);
|
||||||
|
if (!kerninfo) {
|
||||||
|
pr_err("__print_result: couldn't find kernel information\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
while (next && n_lines--) {
|
while (next && n_lines--) {
|
||||||
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
|
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
|
||||||
node);
|
node);
|
||||||
struct symbol *sym = NULL;
|
struct symbol *sym = NULL;
|
||||||
|
struct map_groups *kmaps = &kerninfo->kmaps;
|
||||||
|
struct map *map;
|
||||||
char buf[BUFSIZ];
|
char buf[BUFSIZ];
|
||||||
u64 addr;
|
u64 addr;
|
||||||
|
|
||||||
if (is_caller) {
|
if (is_caller) {
|
||||||
addr = data->call_site;
|
addr = data->call_site;
|
||||||
if (!raw_ip)
|
if (!raw_ip)
|
||||||
sym = map_groups__find_function(&session->kmaps, addr, NULL);
|
sym = map_groups__find_function(kmaps, addr,
|
||||||
|
&map, NULL);
|
||||||
} else
|
} else
|
||||||
addr = data->ptr;
|
addr = data->ptr;
|
||||||
|
|
||||||
if (sym != NULL)
|
if (sym != NULL)
|
||||||
snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
|
snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
|
||||||
addr - sym->start);
|
addr - map->unmap_ip(map, sym->start));
|
||||||
else
|
else
|
||||||
snprintf(buf, sizeof(buf), "%#Lx", addr);
|
snprintf(buf, sizeof(buf), "%#Lx", addr);
|
||||||
printf(" %-34s |", buf);
|
printf(" %-34s |", buf);
|
||||||
|
@ -488,6 +497,9 @@ static int __cmd_kmem(void)
|
||||||
if (session == NULL)
|
if (session == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (perf_session__create_kernel_maps(session) < 0)
|
||||||
|
goto out_delete;
|
||||||
|
|
||||||
if (!perf_session__has_traces(session, "kmem record"))
|
if (!perf_session__has_traces(session, "kmem record"))
|
||||||
goto out_delete;
|
goto out_delete;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,144 @@
|
||||||
|
#include "builtin.h"
|
||||||
|
#include "perf.h"
|
||||||
|
|
||||||
|
#include "util/util.h"
|
||||||
|
#include "util/cache.h"
|
||||||
|
#include "util/symbol.h"
|
||||||
|
#include "util/thread.h"
|
||||||
|
#include "util/header.h"
|
||||||
|
#include "util/session.h"
|
||||||
|
|
||||||
|
#include "util/parse-options.h"
|
||||||
|
#include "util/trace-event.h"
|
||||||
|
|
||||||
|
#include "util/debug.h"
|
||||||
|
|
||||||
|
#include <sys/prctl.h>
|
||||||
|
|
||||||
|
#include <semaphore.h>
|
||||||
|
#include <pthread.h>
|
||||||
|
#include <math.h>
|
||||||
|
|
||||||
|
static char *file_name;
|
||||||
|
static char name_buffer[256];
|
||||||
|
|
||||||
|
int perf_host = 1;
|
||||||
|
int perf_guest;
|
||||||
|
|
||||||
|
static const char * const kvm_usage[] = {
|
||||||
|
"perf kvm [<options>] {top|record|report|diff|buildid-list}",
|
||||||
|
NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct option kvm_options[] = {
|
||||||
|
OPT_STRING('i', "input", &file_name, "file",
|
||||||
|
"Input file name"),
|
||||||
|
OPT_STRING('o', "output", &file_name, "file",
|
||||||
|
"Output file name"),
|
||||||
|
OPT_BOOLEAN(0, "guest", &perf_guest,
|
||||||
|
"Collect guest os data"),
|
||||||
|
OPT_BOOLEAN(0, "host", &perf_host,
|
||||||
|
"Collect guest os data"),
|
||||||
|
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
|
||||||
|
"guest mount directory under which every guest os"
|
||||||
|
" instance has a subdir"),
|
||||||
|
OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
|
||||||
|
"file", "file saving guest os vmlinux"),
|
||||||
|
OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
|
||||||
|
"file", "file saving guest os /proc/kallsyms"),
|
||||||
|
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
|
||||||
|
"file", "file saving guest os /proc/modules"),
|
||||||
|
OPT_END()
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __cmd_record(int argc, const char **argv)
|
||||||
|
{
|
||||||
|
int rec_argc, i = 0, j;
|
||||||
|
const char **rec_argv;
|
||||||
|
|
||||||
|
rec_argc = argc + 2;
|
||||||
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||||
|
rec_argv[i++] = strdup("record");
|
||||||
|
rec_argv[i++] = strdup("-o");
|
||||||
|
rec_argv[i++] = strdup(file_name);
|
||||||
|
for (j = 1; j < argc; j++, i++)
|
||||||
|
rec_argv[i] = argv[j];
|
||||||
|
|
||||||
|
BUG_ON(i != rec_argc);
|
||||||
|
|
||||||
|
return cmd_record(i, rec_argv, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cmd_report(int argc, const char **argv)
|
||||||
|
{
|
||||||
|
int rec_argc, i = 0, j;
|
||||||
|
const char **rec_argv;
|
||||||
|
|
||||||
|
rec_argc = argc + 2;
|
||||||
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||||
|
rec_argv[i++] = strdup("report");
|
||||||
|
rec_argv[i++] = strdup("-i");
|
||||||
|
rec_argv[i++] = strdup(file_name);
|
||||||
|
for (j = 1; j < argc; j++, i++)
|
||||||
|
rec_argv[i] = argv[j];
|
||||||
|
|
||||||
|
BUG_ON(i != rec_argc);
|
||||||
|
|
||||||
|
return cmd_report(i, rec_argv, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cmd_buildid_list(int argc, const char **argv)
|
||||||
|
{
|
||||||
|
int rec_argc, i = 0, j;
|
||||||
|
const char **rec_argv;
|
||||||
|
|
||||||
|
rec_argc = argc + 2;
|
||||||
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||||
|
rec_argv[i++] = strdup("buildid-list");
|
||||||
|
rec_argv[i++] = strdup("-i");
|
||||||
|
rec_argv[i++] = strdup(file_name);
|
||||||
|
for (j = 1; j < argc; j++, i++)
|
||||||
|
rec_argv[i] = argv[j];
|
||||||
|
|
||||||
|
BUG_ON(i != rec_argc);
|
||||||
|
|
||||||
|
return cmd_buildid_list(i, rec_argv, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
int cmd_kvm(int argc, const char **argv, const char *prefix __used)
|
||||||
|
{
|
||||||
|
perf_host = perf_guest = 0;
|
||||||
|
|
||||||
|
argc = parse_options(argc, argv, kvm_options, kvm_usage,
|
||||||
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
|
if (!argc)
|
||||||
|
usage_with_options(kvm_usage, kvm_options);
|
||||||
|
|
||||||
|
if (!perf_host)
|
||||||
|
perf_guest = 1;
|
||||||
|
|
||||||
|
if (!file_name) {
|
||||||
|
if (perf_host && !perf_guest)
|
||||||
|
sprintf(name_buffer, "perf.data.host");
|
||||||
|
else if (!perf_host && perf_guest)
|
||||||
|
sprintf(name_buffer, "perf.data.guest");
|
||||||
|
else
|
||||||
|
sprintf(name_buffer, "perf.data.kvm");
|
||||||
|
file_name = name_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!strncmp(argv[0], "rec", 3))
|
||||||
|
return __cmd_record(argc, argv);
|
||||||
|
else if (!strncmp(argv[0], "rep", 3))
|
||||||
|
return __cmd_report(argc, argv);
|
||||||
|
else if (!strncmp(argv[0], "diff", 4))
|
||||||
|
return cmd_diff(argc, argv, NULL);
|
||||||
|
else if (!strncmp(argv[0], "top", 3))
|
||||||
|
return cmd_top(argc, argv, NULL);
|
||||||
|
else if (!strncmp(argv[0], "buildid-list", 12))
|
||||||
|
return __cmd_buildid_list(argc, argv);
|
||||||
|
else
|
||||||
|
usage_with_options(kvm_usage, kvm_options);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -744,7 +744,7 @@ static const char * const lock_usage[] = {
|
||||||
|
|
||||||
static const struct option lock_options[] = {
|
static const struct option lock_options[] = {
|
||||||
OPT_STRING('i', "input", &input_name, "file", "input file name"),
|
OPT_STRING('i', "input", &input_name, "file", "input file name"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
|
OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
|
||||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
|
@ -36,13 +36,10 @@
|
||||||
#include "builtin.h"
|
#include "builtin.h"
|
||||||
#include "util/util.h"
|
#include "util/util.h"
|
||||||
#include "util/strlist.h"
|
#include "util/strlist.h"
|
||||||
#include "util/event.h"
|
#include "util/symbol.h"
|
||||||
#include "util/debug.h"
|
#include "util/debug.h"
|
||||||
#include "util/debugfs.h"
|
#include "util/debugfs.h"
|
||||||
#include "util/symbol.h"
|
|
||||||
#include "util/thread.h"
|
|
||||||
#include "util/parse-options.h"
|
#include "util/parse-options.h"
|
||||||
#include "util/parse-events.h" /* For debugfs_path */
|
|
||||||
#include "util/probe-finder.h"
|
#include "util/probe-finder.h"
|
||||||
#include "util/probe-event.h"
|
#include "util/probe-event.h"
|
||||||
|
|
||||||
|
@ -50,103 +47,83 @@
|
||||||
|
|
||||||
/* Session management structure */
|
/* Session management structure */
|
||||||
static struct {
|
static struct {
|
||||||
bool need_dwarf;
|
|
||||||
bool list_events;
|
bool list_events;
|
||||||
bool force_add;
|
bool force_add;
|
||||||
bool show_lines;
|
bool show_lines;
|
||||||
int nr_probe;
|
int nevents;
|
||||||
struct probe_point probes[MAX_PROBES];
|
struct perf_probe_event events[MAX_PROBES];
|
||||||
struct strlist *dellist;
|
struct strlist *dellist;
|
||||||
struct map_groups kmap_groups;
|
|
||||||
struct map *kmaps[MAP__NR_TYPES];
|
|
||||||
struct line_range line_range;
|
struct line_range line_range;
|
||||||
} session;
|
} params;
|
||||||
|
|
||||||
|
|
||||||
/* Parse an event definition. Note that any error must die. */
|
/* Parse an event definition. Note that any error must die. */
|
||||||
static void parse_probe_event(const char *str)
|
static int parse_probe_event(const char *str)
|
||||||
{
|
{
|
||||||
struct probe_point *pp = &session.probes[session.nr_probe];
|
struct perf_probe_event *pev = ¶ms.events[params.nevents];
|
||||||
|
int ret;
|
||||||
|
|
||||||
pr_debug("probe-definition(%d): %s\n", session.nr_probe, str);
|
pr_debug("probe-definition(%d): %s\n", params.nevents, str);
|
||||||
if (++session.nr_probe == MAX_PROBES)
|
if (++params.nevents == MAX_PROBES)
|
||||||
die("Too many probes (> %d) are specified.", MAX_PROBES);
|
die("Too many probes (> %d) are specified.", MAX_PROBES);
|
||||||
|
|
||||||
/* Parse perf-probe event into probe_point */
|
/* Parse a perf-probe command into event */
|
||||||
parse_perf_probe_event(str, pp, &session.need_dwarf);
|
ret = parse_perf_probe_command(str, pev);
|
||||||
|
pr_debug("%d arguments\n", pev->nargs);
|
||||||
|
|
||||||
pr_debug("%d arguments\n", pp->nr_args);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void parse_probe_event_argv(int argc, const char **argv)
|
static int parse_probe_event_argv(int argc, const char **argv)
|
||||||
{
|
{
|
||||||
int i, len;
|
int i, len, ret;
|
||||||
char *buf;
|
char *buf;
|
||||||
|
|
||||||
/* Bind up rest arguments */
|
/* Bind up rest arguments */
|
||||||
len = 0;
|
len = 0;
|
||||||
for (i = 0; i < argc; i++)
|
for (i = 0; i < argc; i++)
|
||||||
len += strlen(argv[i]) + 1;
|
len += strlen(argv[i]) + 1;
|
||||||
buf = zalloc(len + 1);
|
buf = xzalloc(len + 1);
|
||||||
if (!buf)
|
|
||||||
die("Failed to allocate memory for binding arguments.");
|
|
||||||
len = 0;
|
len = 0;
|
||||||
for (i = 0; i < argc; i++)
|
for (i = 0; i < argc; i++)
|
||||||
len += sprintf(&buf[len], "%s ", argv[i]);
|
len += sprintf(&buf[len], "%s ", argv[i]);
|
||||||
parse_probe_event(buf);
|
ret = parse_probe_event(buf);
|
||||||
free(buf);
|
free(buf);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int opt_add_probe_event(const struct option *opt __used,
|
static int opt_add_probe_event(const struct option *opt __used,
|
||||||
const char *str, int unset __used)
|
const char *str, int unset __used)
|
||||||
{
|
{
|
||||||
if (str)
|
if (str)
|
||||||
parse_probe_event(str);
|
return parse_probe_event(str);
|
||||||
return 0;
|
else
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int opt_del_probe_event(const struct option *opt __used,
|
static int opt_del_probe_event(const struct option *opt __used,
|
||||||
const char *str, int unset __used)
|
const char *str, int unset __used)
|
||||||
{
|
{
|
||||||
if (str) {
|
if (str) {
|
||||||
if (!session.dellist)
|
if (!params.dellist)
|
||||||
session.dellist = strlist__new(true, NULL);
|
params.dellist = strlist__new(true, NULL);
|
||||||
strlist__add(session.dellist, str);
|
strlist__add(params.dellist, str);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Currently just checking function name from symbol map */
|
#ifdef DWARF_SUPPORT
|
||||||
static void evaluate_probe_point(struct probe_point *pp)
|
|
||||||
{
|
|
||||||
struct symbol *sym;
|
|
||||||
sym = map__find_symbol_by_name(session.kmaps[MAP__FUNCTION],
|
|
||||||
pp->function, NULL);
|
|
||||||
if (!sym)
|
|
||||||
die("Kernel symbol \'%s\' not found - probe not added.",
|
|
||||||
pp->function);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef NO_DWARF_SUPPORT
|
|
||||||
static int open_vmlinux(void)
|
|
||||||
{
|
|
||||||
if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) {
|
|
||||||
pr_debug("Failed to load kernel map.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
pr_debug("Try to open %s\n",
|
|
||||||
session.kmaps[MAP__FUNCTION]->dso->long_name);
|
|
||||||
return open(session.kmaps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int opt_show_lines(const struct option *opt __used,
|
static int opt_show_lines(const struct option *opt __used,
|
||||||
const char *str, int unset __used)
|
const char *str, int unset __used)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (str)
|
if (str)
|
||||||
parse_line_range_desc(str, &session.line_range);
|
ret = parse_line_range_desc(str, ¶ms.line_range);
|
||||||
INIT_LIST_HEAD(&session.line_range.line_list);
|
INIT_LIST_HEAD(¶ms.line_range.line_list);
|
||||||
session.show_lines = true;
|
params.show_lines = true;
|
||||||
return 0;
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -155,29 +132,25 @@ static const char * const probe_usage[] = {
|
||||||
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
|
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
|
||||||
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
|
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
|
||||||
"perf probe --list",
|
"perf probe --list",
|
||||||
#ifndef NO_DWARF_SUPPORT
|
#ifdef DWARF_SUPPORT
|
||||||
"perf probe --line 'LINEDESC'",
|
"perf probe --line 'LINEDESC'",
|
||||||
#endif
|
#endif
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show parsed arguments, etc)"),
|
"be more verbose (show parsed arguments, etc)"),
|
||||||
#ifndef NO_DWARF_SUPPORT
|
OPT_BOOLEAN('l', "list", ¶ms.list_events,
|
||||||
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
|
|
||||||
"file", "vmlinux pathname"),
|
|
||||||
#endif
|
|
||||||
OPT_BOOLEAN('l', "list", &session.list_events,
|
|
||||||
"list up current probe events"),
|
"list up current probe events"),
|
||||||
OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
|
OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
|
||||||
opt_del_probe_event),
|
opt_del_probe_event),
|
||||||
OPT_CALLBACK('a', "add", NULL,
|
OPT_CALLBACK('a', "add", NULL,
|
||||||
#ifdef NO_DWARF_SUPPORT
|
#ifdef DWARF_SUPPORT
|
||||||
"[EVENT=]FUNC[+OFF|%return] [ARG ...]",
|
|
||||||
#else
|
|
||||||
"[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
|
"[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
|
||||||
" [ARG ...]",
|
" [[NAME=]ARG ...]",
|
||||||
|
#else
|
||||||
|
"[EVENT=]FUNC[+OFF|%return] [[NAME=]ARG ...]",
|
||||||
#endif
|
#endif
|
||||||
"probe point definition, where\n"
|
"probe point definition, where\n"
|
||||||
"\t\tGROUP:\tGroup name (optional)\n"
|
"\t\tGROUP:\tGroup name (optional)\n"
|
||||||
|
@ -185,51 +158,33 @@ static const struct option options[] = {
|
||||||
"\t\tFUNC:\tFunction name\n"
|
"\t\tFUNC:\tFunction name\n"
|
||||||
"\t\tOFF:\tOffset from function entry (in byte)\n"
|
"\t\tOFF:\tOffset from function entry (in byte)\n"
|
||||||
"\t\t%return:\tPut the probe at function return\n"
|
"\t\t%return:\tPut the probe at function return\n"
|
||||||
#ifdef NO_DWARF_SUPPORT
|
#ifdef DWARF_SUPPORT
|
||||||
"\t\tARG:\tProbe argument (only \n"
|
|
||||||
#else
|
|
||||||
"\t\tSRC:\tSource code path\n"
|
"\t\tSRC:\tSource code path\n"
|
||||||
"\t\tRL:\tRelative line number from function entry.\n"
|
"\t\tRL:\tRelative line number from function entry.\n"
|
||||||
"\t\tAL:\tAbsolute line number in file.\n"
|
"\t\tAL:\tAbsolute line number in file.\n"
|
||||||
"\t\tPT:\tLazy expression of line code.\n"
|
"\t\tPT:\tLazy expression of line code.\n"
|
||||||
"\t\tARG:\tProbe argument (local variable name or\n"
|
"\t\tARG:\tProbe argument (local variable name or\n"
|
||||||
#endif
|
|
||||||
"\t\t\tkprobe-tracer argument format.)\n",
|
"\t\t\tkprobe-tracer argument format.)\n",
|
||||||
opt_add_probe_event),
|
#else
|
||||||
OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
|
"\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n",
|
||||||
" with existing name"),
|
|
||||||
#ifndef NO_DWARF_SUPPORT
|
|
||||||
OPT_CALLBACK('L', "line", NULL,
|
|
||||||
"FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]",
|
|
||||||
"Show source code lines.", opt_show_lines),
|
|
||||||
#endif
|
#endif
|
||||||
|
opt_add_probe_event),
|
||||||
|
OPT_BOOLEAN('f', "force", ¶ms.force_add, "forcibly add events"
|
||||||
|
" with existing name"),
|
||||||
|
#ifdef DWARF_SUPPORT
|
||||||
|
OPT_CALLBACK('L', "line", NULL,
|
||||||
|
"FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
|
||||||
|
"Show source code lines.", opt_show_lines),
|
||||||
|
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
|
||||||
|
"file", "vmlinux pathname"),
|
||||||
|
#endif
|
||||||
|
OPT__DRY_RUN(&probe_event_dry_run),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Initialize symbol maps for vmlinux */
|
|
||||||
static void init_vmlinux(void)
|
|
||||||
{
|
|
||||||
symbol_conf.sort_by_name = true;
|
|
||||||
if (symbol_conf.vmlinux_name == NULL)
|
|
||||||
symbol_conf.try_vmlinux_path = true;
|
|
||||||
else
|
|
||||||
pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
|
|
||||||
if (symbol__init() < 0)
|
|
||||||
die("Failed to init symbol map.");
|
|
||||||
|
|
||||||
map_groups__init(&session.kmap_groups);
|
|
||||||
if (map_groups__create_kernel_maps(&session.kmap_groups,
|
|
||||||
session.kmaps) < 0)
|
|
||||||
die("Failed to create kernel maps.");
|
|
||||||
}
|
|
||||||
|
|
||||||
int cmd_probe(int argc, const char **argv, const char *prefix __used)
|
int cmd_probe(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int ret;
|
||||||
#ifndef NO_DWARF_SUPPORT
|
|
||||||
int fd;
|
|
||||||
#endif
|
|
||||||
struct probe_point *pp;
|
|
||||||
|
|
||||||
argc = parse_options(argc, argv, options, probe_usage,
|
argc = parse_options(argc, argv, options, probe_usage,
|
||||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
|
@ -238,123 +193,65 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
|
||||||
pr_warning(" Error: '-' is not supported.\n");
|
pr_warning(" Error: '-' is not supported.\n");
|
||||||
usage_with_options(probe_usage, options);
|
usage_with_options(probe_usage, options);
|
||||||
}
|
}
|
||||||
parse_probe_event_argv(argc, argv);
|
ret = parse_probe_event_argv(argc, argv);
|
||||||
|
if (ret < 0) {
|
||||||
|
pr_err(" Error: Parse Error. (%d)\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((!session.nr_probe && !session.dellist && !session.list_events &&
|
if ((!params.nevents && !params.dellist && !params.list_events &&
|
||||||
!session.show_lines))
|
!params.show_lines))
|
||||||
usage_with_options(probe_usage, options);
|
usage_with_options(probe_usage, options);
|
||||||
|
|
||||||
if (debugfs_valid_mountpoint(debugfs_path) < 0)
|
if (params.list_events) {
|
||||||
die("Failed to find debugfs path.");
|
if (params.nevents != 0 || params.dellist) {
|
||||||
|
pr_err(" Error: Don't use --list with --add/--del.\n");
|
||||||
if (session.list_events) {
|
|
||||||
if (session.nr_probe != 0 || session.dellist) {
|
|
||||||
pr_warning(" Error: Don't use --list with"
|
|
||||||
" --add/--del.\n");
|
|
||||||
usage_with_options(probe_usage, options);
|
usage_with_options(probe_usage, options);
|
||||||
}
|
}
|
||||||
if (session.show_lines) {
|
if (params.show_lines) {
|
||||||
pr_warning(" Error: Don't use --list with --line.\n");
|
pr_err(" Error: Don't use --list with --line.\n");
|
||||||
usage_with_options(probe_usage, options);
|
usage_with_options(probe_usage, options);
|
||||||
}
|
}
|
||||||
show_perf_probe_events();
|
ret = show_perf_probe_events();
|
||||||
return 0;
|
if (ret < 0)
|
||||||
|
pr_err(" Error: Failed to show event list. (%d)\n",
|
||||||
|
ret);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef NO_DWARF_SUPPORT
|
#ifdef DWARF_SUPPORT
|
||||||
if (session.show_lines) {
|
if (params.show_lines) {
|
||||||
if (session.nr_probe != 0 || session.dellist) {
|
if (params.nevents != 0 || params.dellist) {
|
||||||
pr_warning(" Error: Don't use --line with"
|
pr_warning(" Error: Don't use --line with"
|
||||||
" --add/--del.\n");
|
" --add/--del.\n");
|
||||||
usage_with_options(probe_usage, options);
|
usage_with_options(probe_usage, options);
|
||||||
}
|
}
|
||||||
init_vmlinux();
|
|
||||||
fd = open_vmlinux();
|
ret = show_line_range(¶ms.line_range);
|
||||||
if (fd < 0)
|
if (ret < 0)
|
||||||
die("Could not open debuginfo file.");
|
pr_err(" Error: Failed to show lines. (%d)\n", ret);
|
||||||
ret = find_line_range(fd, &session.line_range);
|
return ret;
|
||||||
if (ret <= 0)
|
|
||||||
die("Source line is not found.\n");
|
|
||||||
close(fd);
|
|
||||||
show_line_range(&session.line_range);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (session.dellist) {
|
if (params.dellist) {
|
||||||
del_trace_kprobe_events(session.dellist);
|
ret = del_perf_probe_events(params.dellist);
|
||||||
strlist__delete(session.dellist);
|
strlist__delete(params.dellist);
|
||||||
if (session.nr_probe == 0)
|
if (ret < 0) {
|
||||||
return 0;
|
pr_err(" Error: Failed to delete events. (%d)\n", ret);
|
||||||
}
|
return ret;
|
||||||
|
|
||||||
/* Add probes */
|
|
||||||
init_vmlinux();
|
|
||||||
|
|
||||||
if (session.need_dwarf)
|
|
||||||
#ifdef NO_DWARF_SUPPORT
|
|
||||||
die("Debuginfo-analysis is not supported");
|
|
||||||
#else /* !NO_DWARF_SUPPORT */
|
|
||||||
pr_debug("Some probes require debuginfo.\n");
|
|
||||||
|
|
||||||
fd = open_vmlinux();
|
|
||||||
if (fd < 0) {
|
|
||||||
if (session.need_dwarf)
|
|
||||||
die("Could not open debuginfo file.");
|
|
||||||
|
|
||||||
pr_debug("Could not open vmlinux/module file."
|
|
||||||
" Try to use symbols.\n");
|
|
||||||
goto end_dwarf;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Searching probe points */
|
|
||||||
for (i = 0; i < session.nr_probe; i++) {
|
|
||||||
pp = &session.probes[i];
|
|
||||||
if (pp->found)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
lseek(fd, SEEK_SET, 0);
|
|
||||||
ret = find_probe_point(fd, pp);
|
|
||||||
if (ret > 0)
|
|
||||||
continue;
|
|
||||||
if (ret == 0) { /* No error but failed to find probe point. */
|
|
||||||
synthesize_perf_probe_point(pp);
|
|
||||||
die("Probe point '%s' not found. - probe not added.",
|
|
||||||
pp->probes[0]);
|
|
||||||
}
|
}
|
||||||
/* Error path */
|
}
|
||||||
if (session.need_dwarf) {
|
|
||||||
if (ret == -ENOENT)
|
if (params.nevents) {
|
||||||
pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO=y.\n");
|
ret = add_perf_probe_events(params.events, params.nevents,
|
||||||
die("Could not analyze debuginfo.");
|
params.force_add);
|
||||||
|
if (ret < 0) {
|
||||||
|
pr_err(" Error: Failed to add events. (%d)\n", ret);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
pr_debug("An error occurred in debuginfo analysis."
|
|
||||||
" Try to use symbols.\n");
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
close(fd);
|
|
||||||
|
|
||||||
end_dwarf:
|
|
||||||
#endif /* !NO_DWARF_SUPPORT */
|
|
||||||
|
|
||||||
/* Synthesize probes without dwarf */
|
|
||||||
for (i = 0; i < session.nr_probe; i++) {
|
|
||||||
pp = &session.probes[i];
|
|
||||||
if (pp->found) /* This probe is already found. */
|
|
||||||
continue;
|
|
||||||
|
|
||||||
evaluate_probe_point(pp);
|
|
||||||
ret = synthesize_trace_kprobe_event(pp);
|
|
||||||
if (ret == -E2BIG)
|
|
||||||
die("probe point definition becomes too long.");
|
|
||||||
else if (ret < 0)
|
|
||||||
die("Failed to synthesize a probe point.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Settng up probe points */
|
|
||||||
add_trace_kprobe_events(session.probes, session.nr_probe,
|
|
||||||
session.force_add);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
#include "util/util.h"
|
#include "util/util.h"
|
||||||
#include "util/parse-options.h"
|
#include "util/parse-options.h"
|
||||||
#include "util/parse-events.h"
|
#include "util/parse-events.h"
|
||||||
#include "util/string.h"
|
|
||||||
|
|
||||||
#include "util/header.h"
|
#include "util/header.h"
|
||||||
#include "util/event.h"
|
#include "util/event.h"
|
||||||
|
@ -27,31 +26,41 @@
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sched.h>
|
#include <sched.h>
|
||||||
|
|
||||||
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
|
enum write_mode_t {
|
||||||
|
WRITE_FORCE,
|
||||||
|
WRITE_APPEND
|
||||||
|
};
|
||||||
|
|
||||||
|
static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
|
||||||
|
|
||||||
|
static unsigned int user_interval = UINT_MAX;
|
||||||
static long default_interval = 0;
|
static long default_interval = 0;
|
||||||
|
|
||||||
static int nr_cpus = 0;
|
static int nr_cpus = 0;
|
||||||
static unsigned int page_size;
|
static unsigned int page_size;
|
||||||
static unsigned int mmap_pages = 128;
|
static unsigned int mmap_pages = 128;
|
||||||
|
static unsigned int user_freq = UINT_MAX;
|
||||||
static int freq = 1000;
|
static int freq = 1000;
|
||||||
static int output;
|
static int output;
|
||||||
|
static int pipe_output = 0;
|
||||||
static const char *output_name = "perf.data";
|
static const char *output_name = "perf.data";
|
||||||
static int group = 0;
|
static int group = 0;
|
||||||
static unsigned int realtime_prio = 0;
|
static unsigned int realtime_prio = 0;
|
||||||
static int raw_samples = 0;
|
static bool raw_samples = false;
|
||||||
static int system_wide = 0;
|
static bool system_wide = false;
|
||||||
static int profile_cpu = -1;
|
static int profile_cpu = -1;
|
||||||
static pid_t target_pid = -1;
|
static pid_t target_pid = -1;
|
||||||
|
static pid_t target_tid = -1;
|
||||||
|
static pid_t *all_tids = NULL;
|
||||||
|
static int thread_num = 0;
|
||||||
static pid_t child_pid = -1;
|
static pid_t child_pid = -1;
|
||||||
static int inherit = 1;
|
static bool inherit = true;
|
||||||
static int force = 0;
|
static enum write_mode_t write_mode = WRITE_FORCE;
|
||||||
static int append_file = 0;
|
static bool call_graph = false;
|
||||||
static int call_graph = 0;
|
static bool inherit_stat = false;
|
||||||
static int inherit_stat = 0;
|
static bool no_samples = false;
|
||||||
static int no_samples = 0;
|
static bool sample_address = false;
|
||||||
static int sample_address = 0;
|
static bool multiplex = false;
|
||||||
static int multiplex = 0;
|
|
||||||
static int multiplex_fd = -1;
|
static int multiplex_fd = -1;
|
||||||
|
|
||||||
static long samples = 0;
|
static long samples = 0;
|
||||||
|
@ -60,7 +69,7 @@ static struct timeval this_read;
|
||||||
|
|
||||||
static u64 bytes_written = 0;
|
static u64 bytes_written = 0;
|
||||||
|
|
||||||
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
|
static struct pollfd *event_array;
|
||||||
|
|
||||||
static int nr_poll = 0;
|
static int nr_poll = 0;
|
||||||
static int nr_cpu = 0;
|
static int nr_cpu = 0;
|
||||||
|
@ -77,7 +86,7 @@ struct mmap_data {
|
||||||
unsigned int prev;
|
unsigned int prev;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
|
static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
|
||||||
|
|
||||||
static unsigned long mmap_read_head(struct mmap_data *md)
|
static unsigned long mmap_read_head(struct mmap_data *md)
|
||||||
{
|
{
|
||||||
|
@ -101,6 +110,11 @@ static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
|
||||||
pc->data_tail = tail;
|
pc->data_tail = tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void advance_output(size_t size)
|
||||||
|
{
|
||||||
|
bytes_written += size;
|
||||||
|
}
|
||||||
|
|
||||||
static void write_output(void *buf, size_t size)
|
static void write_output(void *buf, size_t size)
|
||||||
{
|
{
|
||||||
while (size) {
|
while (size) {
|
||||||
|
@ -225,12 +239,13 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
|
||||||
return h_attr;
|
return h_attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void create_counter(int counter, int cpu, pid_t pid)
|
static void create_counter(int counter, int cpu)
|
||||||
{
|
{
|
||||||
char *filter = filters[counter];
|
char *filter = filters[counter];
|
||||||
struct perf_event_attr *attr = attrs + counter;
|
struct perf_event_attr *attr = attrs + counter;
|
||||||
struct perf_header_attr *h_attr;
|
struct perf_header_attr *h_attr;
|
||||||
int track = !counter; /* only the first counter needs these */
|
int track = !counter; /* only the first counter needs these */
|
||||||
|
int thread_index;
|
||||||
int ret;
|
int ret;
|
||||||
struct {
|
struct {
|
||||||
u64 count;
|
u64 count;
|
||||||
|
@ -248,10 +263,19 @@ static void create_counter(int counter, int cpu, pid_t pid)
|
||||||
if (nr_counters > 1)
|
if (nr_counters > 1)
|
||||||
attr->sample_type |= PERF_SAMPLE_ID;
|
attr->sample_type |= PERF_SAMPLE_ID;
|
||||||
|
|
||||||
if (freq) {
|
/*
|
||||||
attr->sample_type |= PERF_SAMPLE_PERIOD;
|
* We default some events to a 1 default interval. But keep
|
||||||
attr->freq = 1;
|
* it a weak assumption overridable by the user.
|
||||||
attr->sample_freq = freq;
|
*/
|
||||||
|
if (!attr->sample_period || (user_freq != UINT_MAX &&
|
||||||
|
user_interval != UINT_MAX)) {
|
||||||
|
if (freq) {
|
||||||
|
attr->sample_type |= PERF_SAMPLE_PERIOD;
|
||||||
|
attr->freq = 1;
|
||||||
|
attr->sample_freq = freq;
|
||||||
|
} else {
|
||||||
|
attr->sample_period = default_interval;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (no_samples)
|
if (no_samples)
|
||||||
|
@ -275,118 +299,129 @@ static void create_counter(int counter, int cpu, pid_t pid)
|
||||||
attr->mmap = track;
|
attr->mmap = track;
|
||||||
attr->comm = track;
|
attr->comm = track;
|
||||||
attr->inherit = inherit;
|
attr->inherit = inherit;
|
||||||
attr->disabled = 1;
|
if (target_pid == -1 && !system_wide) {
|
||||||
|
attr->disabled = 1;
|
||||||
|
attr->enable_on_exec = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (thread_index = 0; thread_index < thread_num; thread_index++) {
|
||||||
try_again:
|
try_again:
|
||||||
fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0);
|
fd[nr_cpu][counter][thread_index] = sys_perf_event_open(attr,
|
||||||
|
all_tids[thread_index], cpu, group_fd, 0);
|
||||||
|
|
||||||
if (fd[nr_cpu][counter] < 0) {
|
if (fd[nr_cpu][counter][thread_index] < 0) {
|
||||||
int err = errno;
|
int err = errno;
|
||||||
|
|
||||||
if (err == EPERM || err == EACCES)
|
if (err == EPERM || err == EACCES)
|
||||||
die("Permission error - are you root?\n");
|
die("Permission error - are you root?\n"
|
||||||
else if (err == ENODEV && profile_cpu != -1)
|
"\t Consider tweaking"
|
||||||
die("No such device - did you specify an out-of-range profile CPU?\n");
|
" /proc/sys/kernel/perf_event_paranoid.\n");
|
||||||
|
else if (err == ENODEV && profile_cpu != -1) {
|
||||||
|
die("No such device - did you specify"
|
||||||
|
" an out-of-range profile CPU?\n");
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If it's cycles then fall back to hrtimer
|
* If it's cycles then fall back to hrtimer
|
||||||
* based cpu-clock-tick sw counter, which
|
* based cpu-clock-tick sw counter, which
|
||||||
* is always available even if no PMU support:
|
* is always available even if no PMU support:
|
||||||
*/
|
*/
|
||||||
if (attr->type == PERF_TYPE_HARDWARE
|
if (attr->type == PERF_TYPE_HARDWARE
|
||||||
&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {
|
&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {
|
||||||
|
|
||||||
if (verbose)
|
if (verbose)
|
||||||
warning(" ... trying to fall back to cpu-clock-ticks\n");
|
warning(" ... trying to fall back to cpu-clock-ticks\n");
|
||||||
attr->type = PERF_TYPE_SOFTWARE;
|
attr->type = PERF_TYPE_SOFTWARE;
|
||||||
attr->config = PERF_COUNT_SW_CPU_CLOCK;
|
attr->config = PERF_COUNT_SW_CPU_CLOCK;
|
||||||
goto try_again;
|
goto try_again;
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
error("perfcounter syscall returned with %d (%s)\n",
|
error("perfcounter syscall returned with %d (%s)\n",
|
||||||
fd[nr_cpu][counter], strerror(err));
|
fd[nr_cpu][counter][thread_index], strerror(err));
|
||||||
|
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
#if defined(__i386__) || defined(__x86_64__)
|
||||||
if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
|
if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
|
||||||
die("No hardware sampling interrupt available. No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.\n");
|
die("No hardware sampling interrupt available."
|
||||||
|
" No APIC? If so then you can boot the kernel"
|
||||||
|
" with the \"lapic\" boot parameter to"
|
||||||
|
" force-enable it.\n");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
|
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
|
||||||
exit(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
h_attr = get_header_attr(attr, counter);
|
|
||||||
if (h_attr == NULL)
|
|
||||||
die("nomem\n");
|
|
||||||
|
|
||||||
if (!file_new) {
|
|
||||||
if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
|
|
||||||
fprintf(stderr, "incompatible append\n");
|
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (read(fd[nr_cpu][counter], &read_data, sizeof(read_data)) == -1) {
|
h_attr = get_header_attr(attr, counter);
|
||||||
perror("Unable to read perf file descriptor\n");
|
if (h_attr == NULL)
|
||||||
exit(-1);
|
die("nomem\n");
|
||||||
}
|
|
||||||
|
|
||||||
if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
|
if (!file_new) {
|
||||||
pr_warning("Not enough memory to add id\n");
|
if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
|
||||||
exit(-1);
|
fprintf(stderr, "incompatible append\n");
|
||||||
}
|
exit(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
assert(fd[nr_cpu][counter] >= 0);
|
if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
|
||||||
fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK);
|
perror("Unable to read perf file descriptor\n");
|
||||||
|
|
||||||
/*
|
|
||||||
* First counter acts as the group leader:
|
|
||||||
*/
|
|
||||||
if (group && group_fd == -1)
|
|
||||||
group_fd = fd[nr_cpu][counter];
|
|
||||||
if (multiplex && multiplex_fd == -1)
|
|
||||||
multiplex_fd = fd[nr_cpu][counter];
|
|
||||||
|
|
||||||
if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
|
|
||||||
|
|
||||||
ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
|
|
||||||
assert(ret != -1);
|
|
||||||
} else {
|
|
||||||
event_array[nr_poll].fd = fd[nr_cpu][counter];
|
|
||||||
event_array[nr_poll].events = POLLIN;
|
|
||||||
nr_poll++;
|
|
||||||
|
|
||||||
mmap_array[nr_cpu][counter].counter = counter;
|
|
||||||
mmap_array[nr_cpu][counter].prev = 0;
|
|
||||||
mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1;
|
|
||||||
mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
|
|
||||||
PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter], 0);
|
|
||||||
if (mmap_array[nr_cpu][counter].base == MAP_FAILED) {
|
|
||||||
error("failed to mmap with %d (%s)\n", errno, strerror(errno));
|
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (filter != NULL) {
|
if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
|
||||||
ret = ioctl(fd[nr_cpu][counter],
|
pr_warning("Not enough memory to add id\n");
|
||||||
PERF_EVENT_IOC_SET_FILTER, filter);
|
|
||||||
if (ret) {
|
|
||||||
error("failed to set filter with %d (%s)\n", errno,
|
|
||||||
strerror(errno));
|
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
|
assert(fd[nr_cpu][counter][thread_index] >= 0);
|
||||||
|
fcntl(fd[nr_cpu][counter][thread_index], F_SETFL, O_NONBLOCK);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First counter acts as the group leader:
|
||||||
|
*/
|
||||||
|
if (group && group_fd == -1)
|
||||||
|
group_fd = fd[nr_cpu][counter][thread_index];
|
||||||
|
if (multiplex && multiplex_fd == -1)
|
||||||
|
multiplex_fd = fd[nr_cpu][counter][thread_index];
|
||||||
|
|
||||||
|
if (multiplex && fd[nr_cpu][counter][thread_index] != multiplex_fd) {
|
||||||
|
|
||||||
|
ret = ioctl(fd[nr_cpu][counter][thread_index], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
|
||||||
|
assert(ret != -1);
|
||||||
|
} else {
|
||||||
|
event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
|
||||||
|
event_array[nr_poll].events = POLLIN;
|
||||||
|
nr_poll++;
|
||||||
|
|
||||||
|
mmap_array[nr_cpu][counter][thread_index].counter = counter;
|
||||||
|
mmap_array[nr_cpu][counter][thread_index].prev = 0;
|
||||||
|
mmap_array[nr_cpu][counter][thread_index].mask = mmap_pages*page_size - 1;
|
||||||
|
mmap_array[nr_cpu][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
|
||||||
|
PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0);
|
||||||
|
if (mmap_array[nr_cpu][counter][thread_index].base == MAP_FAILED) {
|
||||||
|
error("failed to mmap with %d (%s)\n", errno, strerror(errno));
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (filter != NULL) {
|
||||||
|
ret = ioctl(fd[nr_cpu][counter][thread_index],
|
||||||
|
PERF_EVENT_IOC_SET_FILTER, filter);
|
||||||
|
if (ret) {
|
||||||
|
error("failed to set filter with %d (%s)\n", errno,
|
||||||
|
strerror(errno));
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void open_counters(int cpu, pid_t pid)
|
static void open_counters(int cpu)
|
||||||
{
|
{
|
||||||
int counter;
|
int counter;
|
||||||
|
|
||||||
group_fd = -1;
|
group_fd = -1;
|
||||||
for (counter = 0; counter < nr_counters; counter++)
|
for (counter = 0; counter < nr_counters; counter++)
|
||||||
create_counter(counter, cpu, pid);
|
create_counter(counter, cpu);
|
||||||
|
|
||||||
nr_cpu++;
|
nr_cpu++;
|
||||||
}
|
}
|
||||||
|
@ -406,10 +441,65 @@ static int process_buildids(void)
|
||||||
|
|
||||||
static void atexit_header(void)
|
static void atexit_header(void)
|
||||||
{
|
{
|
||||||
session->header.data_size += bytes_written;
|
if (!pipe_output) {
|
||||||
|
session->header.data_size += bytes_written;
|
||||||
|
|
||||||
process_buildids();
|
process_buildids();
|
||||||
perf_header__write(&session->header, output, true);
|
perf_header__write(&session->header, output, true);
|
||||||
|
} else {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = event__synthesize_build_ids(process_synthesized_event,
|
||||||
|
session);
|
||||||
|
if (err < 0)
|
||||||
|
pr_err("Couldn't synthesize build ids.\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void event__synthesize_guest_os(struct kernel_info *kerninfo,
|
||||||
|
void *data __attribute__((unused)))
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
char *guest_kallsyms;
|
||||||
|
char path[PATH_MAX];
|
||||||
|
|
||||||
|
if (is_host_kernel(kerninfo))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
*As for guest kernel when processing subcommand record&report,
|
||||||
|
*we arrange module mmap prior to guest kernel mmap and trigger
|
||||||
|
*a preload dso because default guest module symbols are loaded
|
||||||
|
*from guest kallsyms instead of /lib/modules/XXX/XXX. This
|
||||||
|
*method is used to avoid symbol missing when the first addr is
|
||||||
|
*in module instead of in guest kernel.
|
||||||
|
*/
|
||||||
|
err = event__synthesize_modules(process_synthesized_event,
|
||||||
|
session,
|
||||||
|
kerninfo);
|
||||||
|
if (err < 0)
|
||||||
|
pr_err("Couldn't record guest kernel [%d]'s reference"
|
||||||
|
" relocation symbol.\n", kerninfo->pid);
|
||||||
|
|
||||||
|
if (is_default_guest(kerninfo))
|
||||||
|
guest_kallsyms = (char *) symbol_conf.default_guest_kallsyms;
|
||||||
|
else {
|
||||||
|
sprintf(path, "%s/proc/kallsyms", kerninfo->root_dir);
|
||||||
|
guest_kallsyms = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use _stext for guest kernel because guest kernel's /proc/kallsyms
|
||||||
|
* have no _text sometimes.
|
||||||
|
*/
|
||||||
|
err = event__synthesize_kernel_mmap(process_synthesized_event,
|
||||||
|
session, kerninfo, "_text");
|
||||||
|
if (err < 0)
|
||||||
|
err = event__synthesize_kernel_mmap(process_synthesized_event,
|
||||||
|
session, kerninfo, "_stext");
|
||||||
|
if (err < 0)
|
||||||
|
pr_err("Couldn't record guest kernel [%d]'s reference"
|
||||||
|
" relocation symbol.\n", kerninfo->pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __cmd_record(int argc, const char **argv)
|
static int __cmd_record(int argc, const char **argv)
|
||||||
|
@ -421,8 +511,9 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
int err;
|
int err;
|
||||||
unsigned long waking = 0;
|
unsigned long waking = 0;
|
||||||
int child_ready_pipe[2], go_pipe[2];
|
int child_ready_pipe[2], go_pipe[2];
|
||||||
const bool forks = target_pid == -1 && argc > 0;
|
const bool forks = argc > 0;
|
||||||
char buf;
|
char buf;
|
||||||
|
struct kernel_info *kerninfo;
|
||||||
|
|
||||||
page_size = sysconf(_SC_PAGE_SIZE);
|
page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
|
|
||||||
|
@ -435,45 +526,44 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!stat(output_name, &st) && st.st_size) {
|
if (!strcmp(output_name, "-"))
|
||||||
if (!force) {
|
pipe_output = 1;
|
||||||
if (!append_file) {
|
else if (!stat(output_name, &st) && st.st_size) {
|
||||||
pr_err("Error, output file %s exists, use -A "
|
if (write_mode == WRITE_FORCE) {
|
||||||
"to append or -f to overwrite.\n",
|
|
||||||
output_name);
|
|
||||||
exit(-1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
char oldname[PATH_MAX];
|
char oldname[PATH_MAX];
|
||||||
snprintf(oldname, sizeof(oldname), "%s.old",
|
snprintf(oldname, sizeof(oldname), "%s.old",
|
||||||
output_name);
|
output_name);
|
||||||
unlink(oldname);
|
unlink(oldname);
|
||||||
rename(output_name, oldname);
|
rename(output_name, oldname);
|
||||||
}
|
}
|
||||||
} else {
|
} else if (write_mode == WRITE_APPEND) {
|
||||||
append_file = 0;
|
write_mode = WRITE_FORCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
flags = O_CREAT|O_RDWR;
|
flags = O_CREAT|O_RDWR;
|
||||||
if (append_file)
|
if (write_mode == WRITE_APPEND)
|
||||||
file_new = 0;
|
file_new = 0;
|
||||||
else
|
else
|
||||||
flags |= O_TRUNC;
|
flags |= O_TRUNC;
|
||||||
|
|
||||||
output = open(output_name, flags, S_IRUSR|S_IWUSR);
|
if (pipe_output)
|
||||||
|
output = STDOUT_FILENO;
|
||||||
|
else
|
||||||
|
output = open(output_name, flags, S_IRUSR | S_IWUSR);
|
||||||
if (output < 0) {
|
if (output < 0) {
|
||||||
perror("failed to create output file");
|
perror("failed to create output file");
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
session = perf_session__new(output_name, O_WRONLY, force);
|
session = perf_session__new(output_name, O_WRONLY,
|
||||||
|
write_mode == WRITE_FORCE);
|
||||||
if (session == NULL) {
|
if (session == NULL) {
|
||||||
pr_err("Not enough memory for reading perf file header\n");
|
pr_err("Not enough memory for reading perf file header\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!file_new) {
|
if (!file_new) {
|
||||||
err = perf_header__read(&session->header, output);
|
err = perf_header__read(session, output);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -492,13 +582,15 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
atexit(atexit_header);
|
atexit(atexit_header);
|
||||||
|
|
||||||
if (forks) {
|
if (forks) {
|
||||||
pid = fork();
|
child_pid = fork();
|
||||||
if (pid < 0) {
|
if (pid < 0) {
|
||||||
perror("failed to fork");
|
perror("failed to fork");
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pid) {
|
if (!child_pid) {
|
||||||
|
if (pipe_output)
|
||||||
|
dup2(2, 1);
|
||||||
close(child_ready_pipe[0]);
|
close(child_ready_pipe[0]);
|
||||||
close(go_pipe[1]);
|
close(go_pipe[1]);
|
||||||
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
||||||
|
@ -527,10 +619,8 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
child_pid = pid;
|
if (!system_wide && target_tid == -1 && target_pid == -1)
|
||||||
|
all_tids[0] = child_pid;
|
||||||
if (!system_wide)
|
|
||||||
target_pid = pid;
|
|
||||||
|
|
||||||
close(child_ready_pipe[1]);
|
close(child_ready_pipe[1]);
|
||||||
close(go_pipe[0]);
|
close(go_pipe[0]);
|
||||||
|
@ -544,16 +634,19 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
close(child_ready_pipe[0]);
|
close(child_ready_pipe[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if ((!system_wide && !inherit) || profile_cpu != -1) {
|
if ((!system_wide && !inherit) || profile_cpu != -1) {
|
||||||
open_counters(profile_cpu, target_pid);
|
open_counters(profile_cpu);
|
||||||
} else {
|
} else {
|
||||||
nr_cpus = read_cpu_map();
|
nr_cpus = read_cpu_map();
|
||||||
for (i = 0; i < nr_cpus; i++)
|
for (i = 0; i < nr_cpus; i++)
|
||||||
open_counters(cpumap[i], target_pid);
|
open_counters(cpumap[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (file_new) {
|
if (pipe_output) {
|
||||||
|
err = perf_header__write_pipe(output);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
} else if (file_new) {
|
||||||
err = perf_header__write(&session->header, output, false);
|
err = perf_header__write(&session->header, output, false);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -561,21 +654,62 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
|
|
||||||
post_processing_offset = lseek(output, 0, SEEK_CUR);
|
post_processing_offset = lseek(output, 0, SEEK_CUR);
|
||||||
|
|
||||||
|
if (pipe_output) {
|
||||||
|
err = event__synthesize_attrs(&session->header,
|
||||||
|
process_synthesized_event,
|
||||||
|
session);
|
||||||
|
if (err < 0) {
|
||||||
|
pr_err("Couldn't synthesize attrs.\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = event__synthesize_event_types(process_synthesized_event,
|
||||||
|
session);
|
||||||
|
if (err < 0) {
|
||||||
|
pr_err("Couldn't synthesize event_types.\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = event__synthesize_tracing_data(output, attrs,
|
||||||
|
nr_counters,
|
||||||
|
process_synthesized_event,
|
||||||
|
session);
|
||||||
|
if (err <= 0) {
|
||||||
|
pr_err("Couldn't record tracing data.\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
advance_output(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
kerninfo = kerninfo__findhost(&session->kerninfo_root);
|
||||||
|
if (!kerninfo) {
|
||||||
|
pr_err("Couldn't find native kernel information.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
err = event__synthesize_kernel_mmap(process_synthesized_event,
|
err = event__synthesize_kernel_mmap(process_synthesized_event,
|
||||||
session, "_text");
|
session, kerninfo, "_text");
|
||||||
|
if (err < 0)
|
||||||
|
err = event__synthesize_kernel_mmap(process_synthesized_event,
|
||||||
|
session, kerninfo, "_stext");
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pr_err("Couldn't record kernel reference relocation symbol.\n");
|
pr_err("Couldn't record kernel reference relocation symbol.\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = event__synthesize_modules(process_synthesized_event, session);
|
err = event__synthesize_modules(process_synthesized_event,
|
||||||
|
session, kerninfo);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pr_err("Couldn't record kernel reference relocation symbol.\n");
|
pr_err("Couldn't record kernel reference relocation symbol.\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
if (perf_guest)
|
||||||
|
kerninfo__process_allkernels(&session->kerninfo_root,
|
||||||
|
event__synthesize_guest_os, session);
|
||||||
|
|
||||||
if (!system_wide && profile_cpu == -1)
|
if (!system_wide && profile_cpu == -1)
|
||||||
event__synthesize_thread(target_pid, process_synthesized_event,
|
event__synthesize_thread(target_tid, process_synthesized_event,
|
||||||
session);
|
session);
|
||||||
else
|
else
|
||||||
event__synthesize_threads(process_synthesized_event, session);
|
event__synthesize_threads(process_synthesized_event, session);
|
||||||
|
@ -598,11 +732,16 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
int hits = samples;
|
int hits = samples;
|
||||||
|
int thread;
|
||||||
|
|
||||||
for (i = 0; i < nr_cpu; i++) {
|
for (i = 0; i < nr_cpu; i++) {
|
||||||
for (counter = 0; counter < nr_counters; counter++) {
|
for (counter = 0; counter < nr_counters; counter++) {
|
||||||
if (mmap_array[i][counter].base)
|
for (thread = 0;
|
||||||
mmap_read(&mmap_array[i][counter]);
|
thread < thread_num; thread++) {
|
||||||
|
if (mmap_array[i][counter][thread].base)
|
||||||
|
mmap_read(&mmap_array[i][counter][thread]);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -615,8 +754,15 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
|
|
||||||
if (done) {
|
if (done) {
|
||||||
for (i = 0; i < nr_cpu; i++) {
|
for (i = 0; i < nr_cpu; i++) {
|
||||||
for (counter = 0; counter < nr_counters; counter++)
|
for (counter = 0;
|
||||||
ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE);
|
counter < nr_counters;
|
||||||
|
counter++) {
|
||||||
|
for (thread = 0;
|
||||||
|
thread < thread_num;
|
||||||
|
thread++)
|
||||||
|
ioctl(fd[i][counter][thread],
|
||||||
|
PERF_EVENT_IOC_DISABLE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -641,6 +787,8 @@ static const char * const record_usage[] = {
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool force, append_file;
|
||||||
|
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
OPT_CALLBACK('e', "event", NULL, "event",
|
OPT_CALLBACK('e', "event", NULL, "event",
|
||||||
"event selector. use 'perf list' to list available events",
|
"event selector. use 'perf list' to list available events",
|
||||||
|
@ -648,7 +796,9 @@ static const struct option options[] = {
|
||||||
OPT_CALLBACK(0, "filter", NULL, "filter",
|
OPT_CALLBACK(0, "filter", NULL, "filter",
|
||||||
"event filter", parse_filter),
|
"event filter", parse_filter),
|
||||||
OPT_INTEGER('p', "pid", &target_pid,
|
OPT_INTEGER('p', "pid", &target_pid,
|
||||||
"record events on existing pid"),
|
"record events on existing process id"),
|
||||||
|
OPT_INTEGER('t', "tid", &target_tid,
|
||||||
|
"record events on existing thread id"),
|
||||||
OPT_INTEGER('r', "realtime", &realtime_prio,
|
OPT_INTEGER('r', "realtime", &realtime_prio,
|
||||||
"collect data with this RT SCHED_FIFO priority"),
|
"collect data with this RT SCHED_FIFO priority"),
|
||||||
OPT_BOOLEAN('R', "raw-samples", &raw_samples,
|
OPT_BOOLEAN('R', "raw-samples", &raw_samples,
|
||||||
|
@ -660,20 +810,20 @@ static const struct option options[] = {
|
||||||
OPT_INTEGER('C', "profile_cpu", &profile_cpu,
|
OPT_INTEGER('C', "profile_cpu", &profile_cpu,
|
||||||
"CPU to profile on"),
|
"CPU to profile on"),
|
||||||
OPT_BOOLEAN('f', "force", &force,
|
OPT_BOOLEAN('f', "force", &force,
|
||||||
"overwrite existing data file"),
|
"overwrite existing data file (deprecated)"),
|
||||||
OPT_LONG('c', "count", &default_interval,
|
OPT_LONG('c', "count", &user_interval,
|
||||||
"event period to sample"),
|
"event period to sample"),
|
||||||
OPT_STRING('o', "output", &output_name, "file",
|
OPT_STRING('o', "output", &output_name, "file",
|
||||||
"output file name"),
|
"output file name"),
|
||||||
OPT_BOOLEAN('i', "inherit", &inherit,
|
OPT_BOOLEAN('i', "inherit", &inherit,
|
||||||
"child tasks inherit counters"),
|
"child tasks inherit counters"),
|
||||||
OPT_INTEGER('F', "freq", &freq,
|
OPT_INTEGER('F', "freq", &user_freq,
|
||||||
"profile at this frequency"),
|
"profile at this frequency"),
|
||||||
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
|
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
|
||||||
"number of mmap data pages"),
|
"number of mmap data pages"),
|
||||||
OPT_BOOLEAN('g', "call-graph", &call_graph,
|
OPT_BOOLEAN('g', "call-graph", &call_graph,
|
||||||
"do call-graph (stack chain/backtrace) recording"),
|
"do call-graph (stack chain/backtrace) recording"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show counter open errors, etc)"),
|
"be more verbose (show counter open errors, etc)"),
|
||||||
OPT_BOOLEAN('s', "stat", &inherit_stat,
|
OPT_BOOLEAN('s', "stat", &inherit_stat,
|
||||||
"per thread counts"),
|
"per thread counts"),
|
||||||
|
@ -688,13 +838,24 @@ static const struct option options[] = {
|
||||||
|
|
||||||
int cmd_record(int argc, const char **argv, const char *prefix __used)
|
int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
int counter;
|
int i,j;
|
||||||
|
|
||||||
argc = parse_options(argc, argv, options, record_usage,
|
argc = parse_options(argc, argv, options, record_usage,
|
||||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
if (!argc && target_pid == -1 && !system_wide && profile_cpu == -1)
|
if (!argc && target_pid == -1 && target_tid == -1 &&
|
||||||
|
!system_wide && profile_cpu == -1)
|
||||||
usage_with_options(record_usage, options);
|
usage_with_options(record_usage, options);
|
||||||
|
|
||||||
|
if (force && append_file) {
|
||||||
|
fprintf(stderr, "Can't overwrite and append at the same time."
|
||||||
|
" You need to choose between -f and -A");
|
||||||
|
usage_with_options(record_usage, options);
|
||||||
|
} else if (append_file) {
|
||||||
|
write_mode = WRITE_APPEND;
|
||||||
|
} else {
|
||||||
|
write_mode = WRITE_FORCE;
|
||||||
|
}
|
||||||
|
|
||||||
symbol__init();
|
symbol__init();
|
||||||
|
|
||||||
if (!nr_counters) {
|
if (!nr_counters) {
|
||||||
|
@ -703,6 +864,42 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
|
attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (target_pid != -1) {
|
||||||
|
target_tid = target_pid;
|
||||||
|
thread_num = find_all_tid(target_pid, &all_tids);
|
||||||
|
if (thread_num <= 0) {
|
||||||
|
fprintf(stderr, "Can't find all threads of pid %d\n",
|
||||||
|
target_pid);
|
||||||
|
usage_with_options(record_usage, options);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
all_tids=malloc(sizeof(pid_t));
|
||||||
|
if (!all_tids)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
all_tids[0] = target_tid;
|
||||||
|
thread_num = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < MAX_NR_CPUS; i++) {
|
||||||
|
for (j = 0; j < MAX_COUNTERS; j++) {
|
||||||
|
fd[i][j] = malloc(sizeof(int)*thread_num);
|
||||||
|
mmap_array[i][j] = zalloc(
|
||||||
|
sizeof(struct mmap_data)*thread_num);
|
||||||
|
if (!fd[i][j] || !mmap_array[i][j])
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
event_array = malloc(
|
||||||
|
sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
|
||||||
|
if (!event_array)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (user_interval != UINT_MAX)
|
||||||
|
default_interval = user_interval;
|
||||||
|
if (user_freq != UINT_MAX)
|
||||||
|
freq = user_freq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* User specified count overrides default frequency.
|
* User specified count overrides default frequency.
|
||||||
*/
|
*/
|
||||||
|
@ -715,12 +912,5 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (counter = 0; counter < nr_counters; counter++) {
|
|
||||||
if (attrs[counter].sample_period)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
attrs[counter].sample_period = default_interval;
|
|
||||||
}
|
|
||||||
|
|
||||||
return __cmd_record(argc, argv);
|
return __cmd_record(argc, argv);
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
#include "util/cache.h"
|
#include "util/cache.h"
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include "util/symbol.h"
|
#include "util/symbol.h"
|
||||||
#include "util/string.h"
|
|
||||||
#include "util/callchain.h"
|
#include "util/callchain.h"
|
||||||
#include "util/strlist.h"
|
#include "util/strlist.h"
|
||||||
#include "util/values.h"
|
#include "util/values.h"
|
||||||
|
@ -33,11 +32,11 @@
|
||||||
|
|
||||||
static char const *input_name = "perf.data";
|
static char const *input_name = "perf.data";
|
||||||
|
|
||||||
static int force;
|
static bool force;
|
||||||
static bool hide_unresolved;
|
static bool hide_unresolved;
|
||||||
static bool dont_use_callchains;
|
static bool dont_use_callchains;
|
||||||
|
|
||||||
static int show_threads;
|
static bool show_threads;
|
||||||
static struct perf_read_values show_threads_values;
|
static struct perf_read_values show_threads_values;
|
||||||
|
|
||||||
static char default_pretty_printing_style[] = "normal";
|
static char default_pretty_printing_style[] = "normal";
|
||||||
|
@ -81,15 +80,20 @@ static int perf_session__add_hist_entry(struct perf_session *self,
|
||||||
struct addr_location *al,
|
struct addr_location *al,
|
||||||
struct sample_data *data)
|
struct sample_data *data)
|
||||||
{
|
{
|
||||||
struct symbol **syms = NULL, *parent = NULL;
|
struct map_symbol *syms = NULL;
|
||||||
|
struct symbol *parent = NULL;
|
||||||
bool hit;
|
bool hit;
|
||||||
|
int err;
|
||||||
struct hist_entry *he;
|
struct hist_entry *he;
|
||||||
struct event_stat_id *stats;
|
struct event_stat_id *stats;
|
||||||
struct perf_event_attr *attr;
|
struct perf_event_attr *attr;
|
||||||
|
|
||||||
if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain)
|
if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) {
|
||||||
syms = perf_session__resolve_callchain(self, al->thread,
|
syms = perf_session__resolve_callchain(self, al->thread,
|
||||||
data->callchain, &parent);
|
data->callchain, &parent);
|
||||||
|
if (syms == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
attr = perf_header__find_attr(data->id, &self->header);
|
attr = perf_header__find_attr(data->id, &self->header);
|
||||||
if (attr)
|
if (attr)
|
||||||
|
@ -104,13 +108,16 @@ static int perf_session__add_hist_entry(struct perf_session *self,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (hit)
|
if (hit)
|
||||||
he->count += data->period;
|
__perf_session__add_count(he, al, data->period);
|
||||||
|
|
||||||
if (symbol_conf.use_callchain) {
|
if (symbol_conf.use_callchain) {
|
||||||
if (!hit)
|
if (!hit)
|
||||||
callchain_init(&he->callchain);
|
callchain_init(he->callchain);
|
||||||
append_chain(&he->callchain, data->callchain, syms);
|
err = append_chain(he->callchain, data->callchain, syms);
|
||||||
free(syms);
|
free(syms);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -260,13 +267,27 @@ static struct perf_event_ops event_ops = {
|
||||||
.fork = event__process_task,
|
.fork = event__process_task,
|
||||||
.lost = event__process_lost,
|
.lost = event__process_lost,
|
||||||
.read = process_read_event,
|
.read = process_read_event,
|
||||||
|
.attr = event__process_attr,
|
||||||
|
.event_type = event__process_event_type,
|
||||||
|
.tracing_data = event__process_tracing_data,
|
||||||
|
.build_id = event__process_build_id,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern volatile int session_done;
|
||||||
|
|
||||||
|
static void sig_handler(int sig __attribute__((__unused__)))
|
||||||
|
{
|
||||||
|
session_done = 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int __cmd_report(void)
|
static int __cmd_report(void)
|
||||||
{
|
{
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct perf_session *session;
|
struct perf_session *session;
|
||||||
struct rb_node *next;
|
struct rb_node *next;
|
||||||
|
const char *help = "For a higher level overview, try: perf report --sort comm,dso";
|
||||||
|
|
||||||
|
signal(SIGINT, sig_handler);
|
||||||
|
|
||||||
session = perf_session__new(input_name, O_RDONLY, force);
|
session = perf_session__new(input_name, O_RDONLY, force);
|
||||||
if (session == NULL)
|
if (session == NULL)
|
||||||
|
@ -292,39 +313,49 @@ static int __cmd_report(void)
|
||||||
perf_session__fprintf(session, stdout);
|
perf_session__fprintf(session, stdout);
|
||||||
|
|
||||||
if (verbose > 2)
|
if (verbose > 2)
|
||||||
dsos__fprintf(stdout);
|
dsos__fprintf(&session->kerninfo_root, stdout);
|
||||||
|
|
||||||
next = rb_first(&session->stats_by_id);
|
next = rb_first(&session->stats_by_id);
|
||||||
while (next) {
|
while (next) {
|
||||||
struct event_stat_id *stats;
|
struct event_stat_id *stats;
|
||||||
|
u64 nr_hists;
|
||||||
|
|
||||||
stats = rb_entry(next, struct event_stat_id, rb_node);
|
stats = rb_entry(next, struct event_stat_id, rb_node);
|
||||||
perf_session__collapse_resort(&stats->hists);
|
perf_session__collapse_resort(&stats->hists);
|
||||||
perf_session__output_resort(&stats->hists, stats->stats.total);
|
nr_hists = perf_session__output_resort(&stats->hists,
|
||||||
if (rb_first(&session->stats_by_id) ==
|
stats->stats.total);
|
||||||
rb_last(&session->stats_by_id))
|
if (use_browser)
|
||||||
fprintf(stdout, "# Samples: %Ld\n#\n",
|
perf_session__browse_hists(&stats->hists, nr_hists,
|
||||||
stats->stats.total);
|
stats->stats.total, help,
|
||||||
else
|
input_name);
|
||||||
fprintf(stdout, "# Samples: %Ld %s\n#\n",
|
else {
|
||||||
stats->stats.total,
|
if (rb_first(&session->stats_by_id) ==
|
||||||
__event_name(stats->type, stats->config));
|
rb_last(&session->stats_by_id))
|
||||||
|
fprintf(stdout, "# Samples: %Ld\n#\n",
|
||||||
|
stats->stats.total);
|
||||||
|
else
|
||||||
|
fprintf(stdout, "# Samples: %Ld %s\n#\n",
|
||||||
|
stats->stats.total,
|
||||||
|
__event_name(stats->type, stats->config));
|
||||||
|
|
||||||
perf_session__fprintf_hists(&stats->hists, NULL, false, stdout,
|
perf_session__fprintf_hists(&stats->hists, NULL, false, stdout,
|
||||||
stats->stats.total);
|
stats->stats.total);
|
||||||
fprintf(stdout, "\n\n");
|
fprintf(stdout, "\n\n");
|
||||||
|
}
|
||||||
|
|
||||||
next = rb_next(&stats->rb_node);
|
next = rb_next(&stats->rb_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sort_order == default_sort_order &&
|
if (!use_browser && sort_order == default_sort_order &&
|
||||||
parent_pattern == default_parent_pattern)
|
parent_pattern == default_parent_pattern) {
|
||||||
fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n");
|
fprintf(stdout, "#\n# (%s)\n#\n", help);
|
||||||
|
|
||||||
if (show_threads) {
|
if (show_threads) {
|
||||||
bool raw_printing_style = !strcmp(pretty_printing_style, "raw");
|
bool style = !strcmp(pretty_printing_style, "raw");
|
||||||
perf_read_values_display(stdout, &show_threads_values,
|
perf_read_values_display(stdout, &show_threads_values,
|
||||||
raw_printing_style);
|
style);
|
||||||
perf_read_values_destroy(&show_threads_values);
|
perf_read_values_destroy(&show_threads_values);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out_delete:
|
out_delete:
|
||||||
perf_session__delete(session);
|
perf_session__delete(session);
|
||||||
|
@ -400,7 +431,7 @@ static const char * const report_usage[] = {
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
OPT_STRING('i', "input", &input_name, "file",
|
OPT_STRING('i', "input", &input_name, "file",
|
||||||
"input file name"),
|
"input file name"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show symbol address, etc)"),
|
"be more verbose (show symbol address, etc)"),
|
||||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
||||||
"dump raw trace in ASCII"),
|
"dump raw trace in ASCII"),
|
||||||
|
@ -419,6 +450,8 @@ static const struct option options[] = {
|
||||||
"sort by key(s): pid, comm, dso, symbol, parent"),
|
"sort by key(s): pid, comm, dso, symbol, parent"),
|
||||||
OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
|
OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
|
||||||
"Don't shorten the pathnames taking into account the cwd"),
|
"Don't shorten the pathnames taking into account the cwd"),
|
||||||
|
OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
|
||||||
|
"Show sample percentage for different cpu modes"),
|
||||||
OPT_STRING('p', "parent", &parent_pattern, "regex",
|
OPT_STRING('p', "parent", &parent_pattern, "regex",
|
||||||
"regex filter to identify parent, see: '--sort parent'"),
|
"regex filter to identify parent, see: '--sort parent'"),
|
||||||
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
|
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
|
||||||
|
@ -447,7 +480,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
argc = parse_options(argc, argv, options, report_usage, 0);
|
argc = parse_options(argc, argv, options, report_usage, 0);
|
||||||
|
|
||||||
setup_pager();
|
if (strcmp(input_name, "-") != 0)
|
||||||
|
setup_browser();
|
||||||
|
|
||||||
if (symbol__init() < 0)
|
if (symbol__init() < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -455,7 +489,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
|
||||||
setup_sorting(report_usage, options);
|
setup_sorting(report_usage, options);
|
||||||
|
|
||||||
if (parent_pattern != default_parent_pattern) {
|
if (parent_pattern != default_parent_pattern) {
|
||||||
sort_dimension__add("parent");
|
if (sort_dimension__add("parent") < 0)
|
||||||
|
return -1;
|
||||||
sort_parent.elide = 1;
|
sort_parent.elide = 1;
|
||||||
} else
|
} else
|
||||||
symbol_conf.exclude_other = false;
|
symbol_conf.exclude_other = false;
|
||||||
|
|
|
@ -68,10 +68,10 @@ enum sched_event_type {
|
||||||
|
|
||||||
struct sched_atom {
|
struct sched_atom {
|
||||||
enum sched_event_type type;
|
enum sched_event_type type;
|
||||||
|
int specific_wait;
|
||||||
u64 timestamp;
|
u64 timestamp;
|
||||||
u64 duration;
|
u64 duration;
|
||||||
unsigned long nr;
|
unsigned long nr;
|
||||||
int specific_wait;
|
|
||||||
sem_t *wait_sem;
|
sem_t *wait_sem;
|
||||||
struct task_desc *wakee;
|
struct task_desc *wakee;
|
||||||
};
|
};
|
||||||
|
@ -1790,7 +1790,7 @@ static const char * const sched_usage[] = {
|
||||||
static const struct option sched_options[] = {
|
static const struct option sched_options[] = {
|
||||||
OPT_STRING('i', "input", &input_name, "file",
|
OPT_STRING('i', "input", &input_name, "file",
|
||||||
"input file name"),
|
"input file name"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show symbol address, etc)"),
|
"be more verbose (show symbol address, etc)"),
|
||||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
||||||
"dump raw trace in ASCII"),
|
"dump raw trace in ASCII"),
|
||||||
|
@ -1805,7 +1805,7 @@ static const char * const latency_usage[] = {
|
||||||
static const struct option latency_options[] = {
|
static const struct option latency_options[] = {
|
||||||
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
||||||
"sort by key(s): runtime, switch, avg, max"),
|
"sort by key(s): runtime, switch, avg, max"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show symbol address, etc)"),
|
"be more verbose (show symbol address, etc)"),
|
||||||
OPT_INTEGER('C', "CPU", &profile_cpu,
|
OPT_INTEGER('C', "CPU", &profile_cpu,
|
||||||
"CPU to profile on"),
|
"CPU to profile on"),
|
||||||
|
@ -1822,7 +1822,7 @@ static const char * const replay_usage[] = {
|
||||||
static const struct option replay_options[] = {
|
static const struct option replay_options[] = {
|
||||||
OPT_INTEGER('r', "repeat", &replay_repeat,
|
OPT_INTEGER('r', "repeat", &replay_repeat,
|
||||||
"repeat the workload replay N times (-1: infinite)"),
|
"repeat the workload replay N times (-1: infinite)"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show symbol address, etc)"),
|
"be more verbose (show symbol address, etc)"),
|
||||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
||||||
"dump raw trace in ASCII"),
|
"dump raw trace in ASCII"),
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
#include "util/debug.h"
|
#include "util/debug.h"
|
||||||
#include "util/header.h"
|
#include "util/header.h"
|
||||||
#include "util/cpumap.h"
|
#include "util/cpumap.h"
|
||||||
|
#include "util/thread.h"
|
||||||
|
|
||||||
#include <sys/prctl.h>
|
#include <sys/prctl.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
@ -66,18 +67,21 @@ static struct perf_event_attr default_attrs[] = {
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int system_wide = 0;
|
static bool system_wide = false;
|
||||||
static unsigned int nr_cpus = 0;
|
static unsigned int nr_cpus = 0;
|
||||||
static int run_idx = 0;
|
static int run_idx = 0;
|
||||||
|
|
||||||
static int run_count = 1;
|
static int run_count = 1;
|
||||||
static int inherit = 1;
|
static bool inherit = true;
|
||||||
static int scale = 1;
|
static bool scale = true;
|
||||||
static pid_t target_pid = -1;
|
static pid_t target_pid = -1;
|
||||||
|
static pid_t target_tid = -1;
|
||||||
|
static pid_t *all_tids = NULL;
|
||||||
|
static int thread_num = 0;
|
||||||
static pid_t child_pid = -1;
|
static pid_t child_pid = -1;
|
||||||
static int null_run = 0;
|
static bool null_run = false;
|
||||||
|
|
||||||
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
|
static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
|
||||||
|
|
||||||
static int event_scaled[MAX_COUNTERS];
|
static int event_scaled[MAX_COUNTERS];
|
||||||
|
|
||||||
|
@ -140,9 +144,11 @@ struct stats runtime_branches_stats;
|
||||||
#define ERR_PERF_OPEN \
|
#define ERR_PERF_OPEN \
|
||||||
"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
|
"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
|
||||||
|
|
||||||
static void create_perf_stat_counter(int counter, int pid)
|
static int create_perf_stat_counter(int counter)
|
||||||
{
|
{
|
||||||
struct perf_event_attr *attr = attrs + counter;
|
struct perf_event_attr *attr = attrs + counter;
|
||||||
|
int thread;
|
||||||
|
int ncreated = 0;
|
||||||
|
|
||||||
if (scale)
|
if (scale)
|
||||||
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
|
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
|
||||||
|
@ -152,21 +158,33 @@ static void create_perf_stat_counter(int counter, int pid)
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
||||||
fd[cpu][counter] = sys_perf_event_open(attr, -1, cpumap[cpu], -1, 0);
|
fd[cpu][counter][0] = sys_perf_event_open(attr,
|
||||||
if (fd[cpu][counter] < 0 && verbose)
|
-1, cpumap[cpu], -1, 0);
|
||||||
fprintf(stderr, ERR_PERF_OPEN, counter,
|
if (fd[cpu][counter][0] < 0)
|
||||||
fd[cpu][counter], strerror(errno));
|
pr_debug(ERR_PERF_OPEN, counter,
|
||||||
|
fd[cpu][counter][0], strerror(errno));
|
||||||
|
else
|
||||||
|
++ncreated;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
attr->inherit = inherit;
|
attr->inherit = inherit;
|
||||||
attr->disabled = 1;
|
if (target_pid == -1) {
|
||||||
attr->enable_on_exec = 1;
|
attr->disabled = 1;
|
||||||
|
attr->enable_on_exec = 1;
|
||||||
fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0);
|
}
|
||||||
if (fd[0][counter] < 0 && verbose)
|
for (thread = 0; thread < thread_num; thread++) {
|
||||||
fprintf(stderr, ERR_PERF_OPEN, counter,
|
fd[0][counter][thread] = sys_perf_event_open(attr,
|
||||||
fd[0][counter], strerror(errno));
|
all_tids[thread], -1, -1, 0);
|
||||||
|
if (fd[0][counter][thread] < 0)
|
||||||
|
pr_debug(ERR_PERF_OPEN, counter,
|
||||||
|
fd[0][counter][thread],
|
||||||
|
strerror(errno));
|
||||||
|
else
|
||||||
|
++ncreated;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ncreated;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -190,25 +208,28 @@ static void read_counter(int counter)
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
size_t res, nv;
|
size_t res, nv;
|
||||||
int scaled;
|
int scaled;
|
||||||
int i;
|
int i, thread;
|
||||||
|
|
||||||
count[0] = count[1] = count[2] = 0;
|
count[0] = count[1] = count[2] = 0;
|
||||||
|
|
||||||
nv = scale ? 3 : 1;
|
nv = scale ? 3 : 1;
|
||||||
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
||||||
if (fd[cpu][counter] < 0)
|
for (thread = 0; thread < thread_num; thread++) {
|
||||||
continue;
|
if (fd[cpu][counter][thread] < 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
res = read(fd[cpu][counter], single_count, nv * sizeof(u64));
|
res = read(fd[cpu][counter][thread],
|
||||||
assert(res == nv * sizeof(u64));
|
single_count, nv * sizeof(u64));
|
||||||
|
assert(res == nv * sizeof(u64));
|
||||||
|
|
||||||
close(fd[cpu][counter]);
|
close(fd[cpu][counter][thread]);
|
||||||
fd[cpu][counter] = -1;
|
fd[cpu][counter][thread] = -1;
|
||||||
|
|
||||||
count[0] += single_count[0];
|
count[0] += single_count[0];
|
||||||
if (scale) {
|
if (scale) {
|
||||||
count[1] += single_count[1];
|
count[1] += single_count[1];
|
||||||
count[2] += single_count[2];
|
count[2] += single_count[2];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,10 +271,9 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
{
|
{
|
||||||
unsigned long long t0, t1;
|
unsigned long long t0, t1;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
int counter;
|
int counter, ncreated = 0;
|
||||||
int pid = target_pid;
|
|
||||||
int child_ready_pipe[2], go_pipe[2];
|
int child_ready_pipe[2], go_pipe[2];
|
||||||
const bool forks = (target_pid == -1 && argc > 0);
|
const bool forks = (argc > 0);
|
||||||
char buf;
|
char buf;
|
||||||
|
|
||||||
if (!system_wide)
|
if (!system_wide)
|
||||||
|
@ -265,10 +285,10 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (forks) {
|
if (forks) {
|
||||||
if ((pid = fork()) < 0)
|
if ((child_pid = fork()) < 0)
|
||||||
perror("failed to fork");
|
perror("failed to fork");
|
||||||
|
|
||||||
if (!pid) {
|
if (!child_pid) {
|
||||||
close(child_ready_pipe[0]);
|
close(child_ready_pipe[0]);
|
||||||
close(go_pipe[1]);
|
close(go_pipe[1]);
|
||||||
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
||||||
|
@ -297,7 +317,8 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
child_pid = pid;
|
if (target_tid == -1 && target_pid == -1 && !system_wide)
|
||||||
|
all_tids[0] = child_pid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for the child to be ready to exec.
|
* Wait for the child to be ready to exec.
|
||||||
|
@ -310,7 +331,16 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (counter = 0; counter < nr_counters; counter++)
|
for (counter = 0; counter < nr_counters; counter++)
|
||||||
create_perf_stat_counter(counter, pid);
|
ncreated += create_perf_stat_counter(counter);
|
||||||
|
|
||||||
|
if (ncreated == 0) {
|
||||||
|
pr_err("No permission to collect %sstats.\n"
|
||||||
|
"Consider tweaking /proc/sys/kernel/perf_event_paranoid.\n",
|
||||||
|
system_wide ? "system-wide " : "");
|
||||||
|
if (child_pid != -1)
|
||||||
|
kill(child_pid, SIGTERM);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable counters and exec the command:
|
* Enable counters and exec the command:
|
||||||
|
@ -321,7 +351,7 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
close(go_pipe[1]);
|
close(go_pipe[1]);
|
||||||
wait(&status);
|
wait(&status);
|
||||||
} else {
|
} else {
|
||||||
while(!done);
|
while(!done) sleep(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
t1 = rdclock();
|
t1 = rdclock();
|
||||||
|
@ -429,12 +459,14 @@ static void print_stat(int argc, const char **argv)
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, " Performance counter stats for ");
|
fprintf(stderr, " Performance counter stats for ");
|
||||||
if(target_pid == -1) {
|
if(target_pid == -1 && target_tid == -1) {
|
||||||
fprintf(stderr, "\'%s", argv[0]);
|
fprintf(stderr, "\'%s", argv[0]);
|
||||||
for (i = 1; i < argc; i++)
|
for (i = 1; i < argc; i++)
|
||||||
fprintf(stderr, " %s", argv[i]);
|
fprintf(stderr, " %s", argv[i]);
|
||||||
}else
|
} else if (target_pid != -1)
|
||||||
fprintf(stderr, "task pid \'%d", target_pid);
|
fprintf(stderr, "process id \'%d", target_pid);
|
||||||
|
else
|
||||||
|
fprintf(stderr, "thread id \'%d", target_tid);
|
||||||
|
|
||||||
fprintf(stderr, "\'");
|
fprintf(stderr, "\'");
|
||||||
if (run_count > 1)
|
if (run_count > 1)
|
||||||
|
@ -459,7 +491,7 @@ static volatile int signr = -1;
|
||||||
|
|
||||||
static void skip_signal(int signo)
|
static void skip_signal(int signo)
|
||||||
{
|
{
|
||||||
if(target_pid != -1)
|
if(child_pid == -1)
|
||||||
done = 1;
|
done = 1;
|
||||||
|
|
||||||
signr = signo;
|
signr = signo;
|
||||||
|
@ -489,12 +521,14 @@ static const struct option options[] = {
|
||||||
OPT_BOOLEAN('i', "inherit", &inherit,
|
OPT_BOOLEAN('i', "inherit", &inherit,
|
||||||
"child tasks inherit counters"),
|
"child tasks inherit counters"),
|
||||||
OPT_INTEGER('p', "pid", &target_pid,
|
OPT_INTEGER('p', "pid", &target_pid,
|
||||||
"stat events on existing pid"),
|
"stat events on existing process id"),
|
||||||
|
OPT_INTEGER('t', "tid", &target_tid,
|
||||||
|
"stat events on existing thread id"),
|
||||||
OPT_BOOLEAN('a', "all-cpus", &system_wide,
|
OPT_BOOLEAN('a', "all-cpus", &system_wide,
|
||||||
"system-wide collection from all CPUs"),
|
"system-wide collection from all CPUs"),
|
||||||
OPT_BOOLEAN('c', "scale", &scale,
|
OPT_BOOLEAN('c', "scale", &scale,
|
||||||
"scale/normalize counters"),
|
"scale/normalize counters"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show counter open errors, etc)"),
|
"be more verbose (show counter open errors, etc)"),
|
||||||
OPT_INTEGER('r', "repeat", &run_count,
|
OPT_INTEGER('r', "repeat", &run_count,
|
||||||
"repeat command and print average + stddev (max: 100)"),
|
"repeat command and print average + stddev (max: 100)"),
|
||||||
|
@ -506,10 +540,11 @@ static const struct option options[] = {
|
||||||
int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
int status;
|
int status;
|
||||||
|
int i,j;
|
||||||
|
|
||||||
argc = parse_options(argc, argv, options, stat_usage,
|
argc = parse_options(argc, argv, options, stat_usage,
|
||||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
if (!argc && target_pid == -1)
|
if (!argc && target_pid == -1 && target_tid == -1)
|
||||||
usage_with_options(stat_usage, options);
|
usage_with_options(stat_usage, options);
|
||||||
if (run_count <= 0)
|
if (run_count <= 0)
|
||||||
usage_with_options(stat_usage, options);
|
usage_with_options(stat_usage, options);
|
||||||
|
@ -525,6 +560,31 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
else
|
else
|
||||||
nr_cpus = 1;
|
nr_cpus = 1;
|
||||||
|
|
||||||
|
if (target_pid != -1) {
|
||||||
|
target_tid = target_pid;
|
||||||
|
thread_num = find_all_tid(target_pid, &all_tids);
|
||||||
|
if (thread_num <= 0) {
|
||||||
|
fprintf(stderr, "Can't find all threads of pid %d\n",
|
||||||
|
target_pid);
|
||||||
|
usage_with_options(stat_usage, options);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
all_tids=malloc(sizeof(pid_t));
|
||||||
|
if (!all_tids)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
all_tids[0] = target_tid;
|
||||||
|
thread_num = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < MAX_NR_CPUS; i++) {
|
||||||
|
for (j = 0; j < MAX_COUNTERS; j++) {
|
||||||
|
fd[i][j] = malloc(sizeof(int)*thread_num);
|
||||||
|
if (!fd[i][j])
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We dont want to block the signals - that would cause
|
* We dont want to block the signals - that would cause
|
||||||
* child tasks to inherit that and Ctrl-C would not work.
|
* child tasks to inherit that and Ctrl-C would not work.
|
||||||
|
@ -543,7 +603,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
status = run_perf_stat(argc, argv);
|
status = run_perf_stat(argc, argv);
|
||||||
}
|
}
|
||||||
|
|
||||||
print_stat(argc, argv);
|
if (status != -1)
|
||||||
|
print_stat(argc, argv);
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
#include "util/cache.h"
|
#include "util/cache.h"
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include "util/symbol.h"
|
#include "util/symbol.h"
|
||||||
#include "util/string.h"
|
|
||||||
#include "util/callchain.h"
|
#include "util/callchain.h"
|
||||||
#include "util/strlist.h"
|
#include "util/strlist.h"
|
||||||
|
|
||||||
|
@ -43,7 +42,7 @@ static u64 turbo_frequency;
|
||||||
|
|
||||||
static u64 first_time, last_time;
|
static u64 first_time, last_time;
|
||||||
|
|
||||||
static int power_only;
|
static bool power_only;
|
||||||
|
|
||||||
|
|
||||||
struct per_pid;
|
struct per_pid;
|
||||||
|
@ -78,8 +77,6 @@ struct per_pid {
|
||||||
|
|
||||||
struct per_pidcomm *all;
|
struct per_pidcomm *all;
|
||||||
struct per_pidcomm *current;
|
struct per_pidcomm *current;
|
||||||
|
|
||||||
int painted;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -55,9 +55,9 @@
|
||||||
#include <linux/unistd.h>
|
#include <linux/unistd.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
|
static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
|
||||||
|
|
||||||
static int system_wide = 0;
|
static bool system_wide = false;
|
||||||
|
|
||||||
static int default_interval = 0;
|
static int default_interval = 0;
|
||||||
|
|
||||||
|
@ -65,18 +65,21 @@ static int count_filter = 5;
|
||||||
static int print_entries;
|
static int print_entries;
|
||||||
|
|
||||||
static int target_pid = -1;
|
static int target_pid = -1;
|
||||||
static int inherit = 0;
|
static int target_tid = -1;
|
||||||
|
static pid_t *all_tids = NULL;
|
||||||
|
static int thread_num = 0;
|
||||||
|
static bool inherit = false;
|
||||||
static int profile_cpu = -1;
|
static int profile_cpu = -1;
|
||||||
static int nr_cpus = 0;
|
static int nr_cpus = 0;
|
||||||
static unsigned int realtime_prio = 0;
|
static unsigned int realtime_prio = 0;
|
||||||
static int group = 0;
|
static bool group = false;
|
||||||
static unsigned int page_size;
|
static unsigned int page_size;
|
||||||
static unsigned int mmap_pages = 16;
|
static unsigned int mmap_pages = 16;
|
||||||
static int freq = 1000; /* 1 KHz */
|
static int freq = 1000; /* 1 KHz */
|
||||||
|
|
||||||
static int delay_secs = 2;
|
static int delay_secs = 2;
|
||||||
static int zero = 0;
|
static bool zero = false;
|
||||||
static int dump_symtab = 0;
|
static bool dump_symtab = false;
|
||||||
|
|
||||||
static bool hide_kernel_symbols = false;
|
static bool hide_kernel_symbols = false;
|
||||||
static bool hide_user_symbols = false;
|
static bool hide_user_symbols = false;
|
||||||
|
@ -133,7 +136,7 @@ static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
|
||||||
return ((void *)self) + symbol_conf.priv_size;
|
return ((void *)self) + symbol_conf.priv_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void get_term_dimensions(struct winsize *ws)
|
void get_term_dimensions(struct winsize *ws)
|
||||||
{
|
{
|
||||||
char *s = getenv("LINES");
|
char *s = getenv("LINES");
|
||||||
|
|
||||||
|
@ -169,7 +172,7 @@ static void sig_winch_handler(int sig __used)
|
||||||
update_print_entries(&winsize);
|
update_print_entries(&winsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void parse_source(struct sym_entry *syme)
|
static int parse_source(struct sym_entry *syme)
|
||||||
{
|
{
|
||||||
struct symbol *sym;
|
struct symbol *sym;
|
||||||
struct sym_entry_source *source;
|
struct sym_entry_source *source;
|
||||||
|
@ -180,12 +183,21 @@ static void parse_source(struct sym_entry *syme)
|
||||||
u64 len;
|
u64 len;
|
||||||
|
|
||||||
if (!syme)
|
if (!syme)
|
||||||
return;
|
return -1;
|
||||||
|
|
||||||
|
sym = sym_entry__symbol(syme);
|
||||||
|
map = syme->map;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can't annotate with just /proc/kallsyms
|
||||||
|
*/
|
||||||
|
if (map->dso->origin == DSO__ORIG_KERNEL)
|
||||||
|
return -1;
|
||||||
|
|
||||||
if (syme->src == NULL) {
|
if (syme->src == NULL) {
|
||||||
syme->src = zalloc(sizeof(*source));
|
syme->src = zalloc(sizeof(*source));
|
||||||
if (syme->src == NULL)
|
if (syme->src == NULL)
|
||||||
return;
|
return -1;
|
||||||
pthread_mutex_init(&syme->src->lock, NULL);
|
pthread_mutex_init(&syme->src->lock, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,9 +207,6 @@ static void parse_source(struct sym_entry *syme)
|
||||||
pthread_mutex_lock(&source->lock);
|
pthread_mutex_lock(&source->lock);
|
||||||
goto out_assign;
|
goto out_assign;
|
||||||
}
|
}
|
||||||
|
|
||||||
sym = sym_entry__symbol(syme);
|
|
||||||
map = syme->map;
|
|
||||||
path = map->dso->long_name;
|
path = map->dso->long_name;
|
||||||
|
|
||||||
len = sym->end - sym->start;
|
len = sym->end - sym->start;
|
||||||
|
@ -209,7 +218,7 @@ static void parse_source(struct sym_entry *syme)
|
||||||
|
|
||||||
file = popen(command, "r");
|
file = popen(command, "r");
|
||||||
if (!file)
|
if (!file)
|
||||||
return;
|
return -1;
|
||||||
|
|
||||||
pthread_mutex_lock(&source->lock);
|
pthread_mutex_lock(&source->lock);
|
||||||
source->lines_tail = &source->lines;
|
source->lines_tail = &source->lines;
|
||||||
|
@ -245,6 +254,7 @@ static void parse_source(struct sym_entry *syme)
|
||||||
out_assign:
|
out_assign:
|
||||||
sym_filter_entry = syme;
|
sym_filter_entry = syme;
|
||||||
pthread_mutex_unlock(&source->lock);
|
pthread_mutex_unlock(&source->lock);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __zero_source_counters(struct sym_entry *syme)
|
static void __zero_source_counters(struct sym_entry *syme)
|
||||||
|
@ -410,7 +420,9 @@ static double sym_weight(const struct sym_entry *sym)
|
||||||
}
|
}
|
||||||
|
|
||||||
static long samples;
|
static long samples;
|
||||||
static long userspace_samples;
|
static long kernel_samples, us_samples;
|
||||||
|
static long exact_samples;
|
||||||
|
static long guest_us_samples, guest_kernel_samples;
|
||||||
static const char CONSOLE_CLEAR[] = "[H[2J";
|
static const char CONSOLE_CLEAR[] = "[H[2J";
|
||||||
|
|
||||||
static void __list_insert_active_sym(struct sym_entry *syme)
|
static void __list_insert_active_sym(struct sym_entry *syme)
|
||||||
|
@ -450,7 +462,11 @@ static void print_sym_table(void)
|
||||||
int printed = 0, j;
|
int printed = 0, j;
|
||||||
int counter, snap = !display_weighted ? sym_counter : 0;
|
int counter, snap = !display_weighted ? sym_counter : 0;
|
||||||
float samples_per_sec = samples/delay_secs;
|
float samples_per_sec = samples/delay_secs;
|
||||||
float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
|
float ksamples_per_sec = kernel_samples/delay_secs;
|
||||||
|
float us_samples_per_sec = (us_samples)/delay_secs;
|
||||||
|
float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
|
||||||
|
float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
|
||||||
|
float esamples_percent = (100.0*exact_samples)/samples;
|
||||||
float sum_ksamples = 0.0;
|
float sum_ksamples = 0.0;
|
||||||
struct sym_entry *syme, *n;
|
struct sym_entry *syme, *n;
|
||||||
struct rb_root tmp = RB_ROOT;
|
struct rb_root tmp = RB_ROOT;
|
||||||
|
@ -458,7 +474,8 @@ static void print_sym_table(void)
|
||||||
int sym_width = 0, dso_width = 0, dso_short_width = 0;
|
int sym_width = 0, dso_width = 0, dso_short_width = 0;
|
||||||
const int win_width = winsize.ws_col - 1;
|
const int win_width = winsize.ws_col - 1;
|
||||||
|
|
||||||
samples = userspace_samples = 0;
|
samples = us_samples = kernel_samples = exact_samples = 0;
|
||||||
|
guest_kernel_samples = guest_us_samples = 0;
|
||||||
|
|
||||||
/* Sort the active symbols */
|
/* Sort the active symbols */
|
||||||
pthread_mutex_lock(&active_symbols_lock);
|
pthread_mutex_lock(&active_symbols_lock);
|
||||||
|
@ -489,9 +506,30 @@ static void print_sym_table(void)
|
||||||
puts(CONSOLE_CLEAR);
|
puts(CONSOLE_CLEAR);
|
||||||
|
|
||||||
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
|
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
|
||||||
printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
|
if (!perf_guest) {
|
||||||
samples_per_sec,
|
printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
|
||||||
100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
|
" exact: %4.1f%% [",
|
||||||
|
samples_per_sec,
|
||||||
|
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
|
||||||
|
samples_per_sec)),
|
||||||
|
esamples_percent);
|
||||||
|
} else {
|
||||||
|
printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
|
||||||
|
" guest kernel:%4.1f%% guest us:%4.1f%%"
|
||||||
|
" exact: %4.1f%% [",
|
||||||
|
samples_per_sec,
|
||||||
|
100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
|
||||||
|
samples_per_sec)),
|
||||||
|
100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
|
||||||
|
samples_per_sec)),
|
||||||
|
100.0 - (100.0 * ((samples_per_sec -
|
||||||
|
guest_kernel_samples_per_sec) /
|
||||||
|
samples_per_sec)),
|
||||||
|
100.0 - (100.0 * ((samples_per_sec -
|
||||||
|
guest_us_samples_per_sec) /
|
||||||
|
samples_per_sec)),
|
||||||
|
esamples_percent);
|
||||||
|
}
|
||||||
|
|
||||||
if (nr_counters == 1 || !display_weighted) {
|
if (nr_counters == 1 || !display_weighted) {
|
||||||
printf("%Ld", (u64)attrs[0].sample_period);
|
printf("%Ld", (u64)attrs[0].sample_period);
|
||||||
|
@ -514,13 +552,15 @@ static void print_sym_table(void)
|
||||||
|
|
||||||
if (target_pid != -1)
|
if (target_pid != -1)
|
||||||
printf(" (target_pid: %d", target_pid);
|
printf(" (target_pid: %d", target_pid);
|
||||||
|
else if (target_tid != -1)
|
||||||
|
printf(" (target_tid: %d", target_tid);
|
||||||
else
|
else
|
||||||
printf(" (all");
|
printf(" (all");
|
||||||
|
|
||||||
if (profile_cpu != -1)
|
if (profile_cpu != -1)
|
||||||
printf(", cpu: %d)\n", profile_cpu);
|
printf(", cpu: %d)\n", profile_cpu);
|
||||||
else {
|
else {
|
||||||
if (target_pid != -1)
|
if (target_tid != -1)
|
||||||
printf(")\n");
|
printf(")\n");
|
||||||
else
|
else
|
||||||
printf(", %d CPUs)\n", nr_cpus);
|
printf(", %d CPUs)\n", nr_cpus);
|
||||||
|
@ -582,7 +622,6 @@ static void print_sym_table(void)
|
||||||
|
|
||||||
syme = rb_entry(nd, struct sym_entry, rb_node);
|
syme = rb_entry(nd, struct sym_entry, rb_node);
|
||||||
sym = sym_entry__symbol(syme);
|
sym = sym_entry__symbol(syme);
|
||||||
|
|
||||||
if (++printed > print_entries || (int)syme->snap_count < count_filter)
|
if (++printed > print_entries || (int)syme->snap_count < count_filter)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -746,7 +785,7 @@ static int key_mapped(int c)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void handle_keypress(int c)
|
static void handle_keypress(struct perf_session *session, int c)
|
||||||
{
|
{
|
||||||
if (!key_mapped(c)) {
|
if (!key_mapped(c)) {
|
||||||
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
||||||
|
@ -815,7 +854,7 @@ static void handle_keypress(int c)
|
||||||
case 'Q':
|
case 'Q':
|
||||||
printf("exiting.\n");
|
printf("exiting.\n");
|
||||||
if (dump_symtab)
|
if (dump_symtab)
|
||||||
dsos__fprintf(stderr);
|
dsos__fprintf(&session->kerninfo_root, stderr);
|
||||||
exit(0);
|
exit(0);
|
||||||
case 's':
|
case 's':
|
||||||
prompt_symbol(&sym_filter_entry, "Enter details symbol");
|
prompt_symbol(&sym_filter_entry, "Enter details symbol");
|
||||||
|
@ -839,7 +878,7 @@ static void handle_keypress(int c)
|
||||||
display_weighted = ~display_weighted;
|
display_weighted = ~display_weighted;
|
||||||
break;
|
break;
|
||||||
case 'z':
|
case 'z':
|
||||||
zero = ~zero;
|
zero = !zero;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -851,6 +890,7 @@ static void *display_thread(void *arg __used)
|
||||||
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
||||||
struct termios tc, save;
|
struct termios tc, save;
|
||||||
int delay_msecs, c;
|
int delay_msecs, c;
|
||||||
|
struct perf_session *session = (struct perf_session *) arg;
|
||||||
|
|
||||||
tcgetattr(0, &save);
|
tcgetattr(0, &save);
|
||||||
tc = save;
|
tc = save;
|
||||||
|
@ -871,7 +911,7 @@ repeat:
|
||||||
c = getc(stdin);
|
c = getc(stdin);
|
||||||
tcsetattr(0, TCSAFLUSH, &save);
|
tcsetattr(0, TCSAFLUSH, &save);
|
||||||
|
|
||||||
handle_keypress(c);
|
handle_keypress(session, c);
|
||||||
goto repeat;
|
goto repeat;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -942,24 +982,49 @@ static void event__process_sample(const event_t *self,
|
||||||
u64 ip = self->ip.ip;
|
u64 ip = self->ip.ip;
|
||||||
struct sym_entry *syme;
|
struct sym_entry *syme;
|
||||||
struct addr_location al;
|
struct addr_location al;
|
||||||
|
struct kernel_info *kerninfo;
|
||||||
u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||||
|
|
||||||
++samples;
|
++samples;
|
||||||
|
|
||||||
switch (origin) {
|
switch (origin) {
|
||||||
case PERF_RECORD_MISC_USER:
|
case PERF_RECORD_MISC_USER:
|
||||||
++userspace_samples;
|
++us_samples;
|
||||||
if (hide_user_symbols)
|
if (hide_user_symbols)
|
||||||
return;
|
return;
|
||||||
|
kerninfo = kerninfo__findhost(&session->kerninfo_root);
|
||||||
break;
|
break;
|
||||||
case PERF_RECORD_MISC_KERNEL:
|
case PERF_RECORD_MISC_KERNEL:
|
||||||
|
++kernel_samples;
|
||||||
if (hide_kernel_symbols)
|
if (hide_kernel_symbols)
|
||||||
return;
|
return;
|
||||||
|
kerninfo = kerninfo__findhost(&session->kerninfo_root);
|
||||||
break;
|
break;
|
||||||
|
case PERF_RECORD_MISC_GUEST_KERNEL:
|
||||||
|
++guest_kernel_samples;
|
||||||
|
kerninfo = kerninfo__find(&session->kerninfo_root,
|
||||||
|
self->ip.pid);
|
||||||
|
break;
|
||||||
|
case PERF_RECORD_MISC_GUEST_USER:
|
||||||
|
++guest_us_samples;
|
||||||
|
/*
|
||||||
|
* TODO: we don't process guest user from host side
|
||||||
|
* except simple counting.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!kerninfo && perf_guest) {
|
||||||
|
pr_err("Can't find guest [%d]'s kernel information\n",
|
||||||
|
self->ip.pid);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self->header.misc & PERF_RECORD_MISC_EXACT)
|
||||||
|
exact_samples++;
|
||||||
|
|
||||||
if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
|
if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
|
||||||
al.filtered)
|
al.filtered)
|
||||||
return;
|
return;
|
||||||
|
@ -976,7 +1041,7 @@ static void event__process_sample(const event_t *self,
|
||||||
* --hide-kernel-symbols, even if the user specifies an
|
* --hide-kernel-symbols, even if the user specifies an
|
||||||
* invalid --vmlinux ;-)
|
* invalid --vmlinux ;-)
|
||||||
*/
|
*/
|
||||||
if (al.map == session->vmlinux_maps[MAP__FUNCTION] &&
|
if (al.map == kerninfo->vmlinux_maps[MAP__FUNCTION] &&
|
||||||
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
|
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
|
||||||
pr_err("The %s file can't be used\n",
|
pr_err("The %s file can't be used\n",
|
||||||
symbol_conf.vmlinux_name);
|
symbol_conf.vmlinux_name);
|
||||||
|
@ -990,7 +1055,17 @@ static void event__process_sample(const event_t *self,
|
||||||
if (sym_filter_entry_sched) {
|
if (sym_filter_entry_sched) {
|
||||||
sym_filter_entry = sym_filter_entry_sched;
|
sym_filter_entry = sym_filter_entry_sched;
|
||||||
sym_filter_entry_sched = NULL;
|
sym_filter_entry_sched = NULL;
|
||||||
parse_source(sym_filter_entry);
|
if (parse_source(sym_filter_entry) < 0) {
|
||||||
|
struct symbol *sym = sym_entry__symbol(sym_filter_entry);
|
||||||
|
|
||||||
|
pr_err("Can't annotate %s", sym->name);
|
||||||
|
if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
|
||||||
|
pr_err(": No vmlinux file was found in the path:\n");
|
||||||
|
vmlinux_path__fprintf(stderr);
|
||||||
|
} else
|
||||||
|
pr_err(".\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
syme = symbol__priv(al.sym);
|
syme = symbol__priv(al.sym);
|
||||||
|
@ -1106,16 +1181,21 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
|
||||||
md->prev = old;
|
md->prev = old;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
|
static struct pollfd *event_array;
|
||||||
static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
|
static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
|
||||||
|
|
||||||
static void perf_session__mmap_read(struct perf_session *self)
|
static void perf_session__mmap_read(struct perf_session *self)
|
||||||
{
|
{
|
||||||
int i, counter;
|
int i, counter, thread_index;
|
||||||
|
|
||||||
for (i = 0; i < nr_cpus; i++) {
|
for (i = 0; i < nr_cpus; i++) {
|
||||||
for (counter = 0; counter < nr_counters; counter++)
|
for (counter = 0; counter < nr_counters; counter++)
|
||||||
perf_session__mmap_read_counter(self, &mmap_array[i][counter]);
|
for (thread_index = 0;
|
||||||
|
thread_index < thread_num;
|
||||||
|
thread_index++) {
|
||||||
|
perf_session__mmap_read_counter(self,
|
||||||
|
&mmap_array[i][counter][thread_index]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1126,9 +1206,10 @@ static void start_counter(int i, int counter)
|
||||||
{
|
{
|
||||||
struct perf_event_attr *attr;
|
struct perf_event_attr *attr;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
int thread_index;
|
||||||
|
|
||||||
cpu = profile_cpu;
|
cpu = profile_cpu;
|
||||||
if (target_pid == -1 && profile_cpu == -1)
|
if (target_tid == -1 && profile_cpu == -1)
|
||||||
cpu = cpumap[i];
|
cpu = cpumap[i];
|
||||||
|
|
||||||
attr = attrs + counter;
|
attr = attrs + counter;
|
||||||
|
@ -1144,55 +1225,58 @@ static void start_counter(int i, int counter)
|
||||||
attr->inherit = (cpu < 0) && inherit;
|
attr->inherit = (cpu < 0) && inherit;
|
||||||
attr->mmap = 1;
|
attr->mmap = 1;
|
||||||
|
|
||||||
|
for (thread_index = 0; thread_index < thread_num; thread_index++) {
|
||||||
try_again:
|
try_again:
|
||||||
fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
|
fd[i][counter][thread_index] = sys_perf_event_open(attr,
|
||||||
|
all_tids[thread_index], cpu, group_fd, 0);
|
||||||
|
|
||||||
if (fd[i][counter] < 0) {
|
if (fd[i][counter][thread_index] < 0) {
|
||||||
int err = errno;
|
int err = errno;
|
||||||
|
|
||||||
if (err == EPERM || err == EACCES)
|
if (err == EPERM || err == EACCES)
|
||||||
die("No permission - are you root?\n");
|
die("No permission - are you root?\n");
|
||||||
/*
|
/*
|
||||||
* If it's cycles then fall back to hrtimer
|
* If it's cycles then fall back to hrtimer
|
||||||
* based cpu-clock-tick sw counter, which
|
* based cpu-clock-tick sw counter, which
|
||||||
* is always available even if no PMU support:
|
* is always available even if no PMU support:
|
||||||
*/
|
*/
|
||||||
if (attr->type == PERF_TYPE_HARDWARE
|
if (attr->type == PERF_TYPE_HARDWARE
|
||||||
&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {
|
&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {
|
||||||
|
|
||||||
if (verbose)
|
if (verbose)
|
||||||
warning(" ... trying to fall back to cpu-clock-ticks\n");
|
warning(" ... trying to fall back to cpu-clock-ticks\n");
|
||||||
|
|
||||||
attr->type = PERF_TYPE_SOFTWARE;
|
attr->type = PERF_TYPE_SOFTWARE;
|
||||||
attr->config = PERF_COUNT_SW_CPU_CLOCK;
|
attr->config = PERF_COUNT_SW_CPU_CLOCK;
|
||||||
goto try_again;
|
goto try_again;
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
error("perfcounter syscall returned with %d (%s)\n",
|
||||||
|
fd[i][counter][thread_index], strerror(err));
|
||||||
|
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
|
||||||
|
exit(-1);
|
||||||
}
|
}
|
||||||
printf("\n");
|
assert(fd[i][counter][thread_index] >= 0);
|
||||||
error("perfcounter syscall returned with %d (%s)\n",
|
fcntl(fd[i][counter][thread_index], F_SETFL, O_NONBLOCK);
|
||||||
fd[i][counter], strerror(err));
|
|
||||||
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
|
/*
|
||||||
exit(-1);
|
* First counter acts as the group leader:
|
||||||
|
*/
|
||||||
|
if (group && group_fd == -1)
|
||||||
|
group_fd = fd[i][counter][thread_index];
|
||||||
|
|
||||||
|
event_array[nr_poll].fd = fd[i][counter][thread_index];
|
||||||
|
event_array[nr_poll].events = POLLIN;
|
||||||
|
nr_poll++;
|
||||||
|
|
||||||
|
mmap_array[i][counter][thread_index].counter = counter;
|
||||||
|
mmap_array[i][counter][thread_index].prev = 0;
|
||||||
|
mmap_array[i][counter][thread_index].mask = mmap_pages*page_size - 1;
|
||||||
|
mmap_array[i][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
|
||||||
|
PROT_READ, MAP_SHARED, fd[i][counter][thread_index], 0);
|
||||||
|
if (mmap_array[i][counter][thread_index].base == MAP_FAILED)
|
||||||
|
die("failed to mmap with %d (%s)\n", errno, strerror(errno));
|
||||||
}
|
}
|
||||||
assert(fd[i][counter] >= 0);
|
|
||||||
fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* First counter acts as the group leader:
|
|
||||||
*/
|
|
||||||
if (group && group_fd == -1)
|
|
||||||
group_fd = fd[i][counter];
|
|
||||||
|
|
||||||
event_array[nr_poll].fd = fd[i][counter];
|
|
||||||
event_array[nr_poll].events = POLLIN;
|
|
||||||
nr_poll++;
|
|
||||||
|
|
||||||
mmap_array[i][counter].counter = counter;
|
|
||||||
mmap_array[i][counter].prev = 0;
|
|
||||||
mmap_array[i][counter].mask = mmap_pages*page_size - 1;
|
|
||||||
mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
|
|
||||||
PROT_READ, MAP_SHARED, fd[i][counter], 0);
|
|
||||||
if (mmap_array[i][counter].base == MAP_FAILED)
|
|
||||||
die("failed to mmap with %d (%s)\n", errno, strerror(errno));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __cmd_top(void)
|
static int __cmd_top(void)
|
||||||
|
@ -1208,8 +1292,8 @@ static int __cmd_top(void)
|
||||||
if (session == NULL)
|
if (session == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (target_pid != -1)
|
if (target_tid != -1)
|
||||||
event__synthesize_thread(target_pid, event__process, session);
|
event__synthesize_thread(target_tid, event__process, session);
|
||||||
else
|
else
|
||||||
event__synthesize_threads(event__process, session);
|
event__synthesize_threads(event__process, session);
|
||||||
|
|
||||||
|
@ -1220,11 +1304,11 @@ static int __cmd_top(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for a minimal set of events before starting the snapshot */
|
/* Wait for a minimal set of events before starting the snapshot */
|
||||||
poll(event_array, nr_poll, 100);
|
poll(&event_array[0], nr_poll, 100);
|
||||||
|
|
||||||
perf_session__mmap_read(session);
|
perf_session__mmap_read(session);
|
||||||
|
|
||||||
if (pthread_create(&thread, NULL, display_thread, NULL)) {
|
if (pthread_create(&thread, NULL, display_thread, session)) {
|
||||||
printf("Could not create display thread.\n");
|
printf("Could not create display thread.\n");
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
@ -1263,7 +1347,9 @@ static const struct option options[] = {
|
||||||
OPT_INTEGER('c', "count", &default_interval,
|
OPT_INTEGER('c', "count", &default_interval,
|
||||||
"event period to sample"),
|
"event period to sample"),
|
||||||
OPT_INTEGER('p', "pid", &target_pid,
|
OPT_INTEGER('p', "pid", &target_pid,
|
||||||
"profile events on existing pid"),
|
"profile events on existing process id"),
|
||||||
|
OPT_INTEGER('t', "tid", &target_tid,
|
||||||
|
"profile events on existing thread id"),
|
||||||
OPT_BOOLEAN('a', "all-cpus", &system_wide,
|
OPT_BOOLEAN('a', "all-cpus", &system_wide,
|
||||||
"system-wide collection from all CPUs"),
|
"system-wide collection from all CPUs"),
|
||||||
OPT_INTEGER('C', "CPU", &profile_cpu,
|
OPT_INTEGER('C', "CPU", &profile_cpu,
|
||||||
|
@ -1296,7 +1382,7 @@ static const struct option options[] = {
|
||||||
"display this many functions"),
|
"display this many functions"),
|
||||||
OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
|
OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
|
||||||
"hide user symbols"),
|
"hide user symbols"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show counter open errors, etc)"),
|
"be more verbose (show counter open errors, etc)"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
@ -1304,6 +1390,7 @@ static const struct option options[] = {
|
||||||
int cmd_top(int argc, const char **argv, const char *prefix __used)
|
int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
int counter;
|
int counter;
|
||||||
|
int i,j;
|
||||||
|
|
||||||
page_size = sysconf(_SC_PAGE_SIZE);
|
page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
|
|
||||||
|
@ -1311,8 +1398,39 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||||
if (argc)
|
if (argc)
|
||||||
usage_with_options(top_usage, options);
|
usage_with_options(top_usage, options);
|
||||||
|
|
||||||
|
if (target_pid != -1) {
|
||||||
|
target_tid = target_pid;
|
||||||
|
thread_num = find_all_tid(target_pid, &all_tids);
|
||||||
|
if (thread_num <= 0) {
|
||||||
|
fprintf(stderr, "Can't find all threads of pid %d\n",
|
||||||
|
target_pid);
|
||||||
|
usage_with_options(top_usage, options);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
all_tids=malloc(sizeof(pid_t));
|
||||||
|
if (!all_tids)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
all_tids[0] = target_tid;
|
||||||
|
thread_num = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < MAX_NR_CPUS; i++) {
|
||||||
|
for (j = 0; j < MAX_COUNTERS; j++) {
|
||||||
|
fd[i][j] = malloc(sizeof(int)*thread_num);
|
||||||
|
mmap_array[i][j] = zalloc(
|
||||||
|
sizeof(struct mmap_data)*thread_num);
|
||||||
|
if (!fd[i][j] || !mmap_array[i][j])
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
event_array = malloc(
|
||||||
|
sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
|
||||||
|
if (!event_array)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
/* CPU and PID are mutually exclusive */
|
/* CPU and PID are mutually exclusive */
|
||||||
if (target_pid != -1 && profile_cpu != -1) {
|
if (target_tid > 0 && profile_cpu != -1) {
|
||||||
printf("WARNING: PID switch overriding CPU\n");
|
printf("WARNING: PID switch overriding CPU\n");
|
||||||
sleep(1);
|
sleep(1);
|
||||||
profile_cpu = -1;
|
profile_cpu = -1;
|
||||||
|
@ -1353,7 +1471,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||||
attrs[counter].sample_period = default_interval;
|
attrs[counter].sample_period = default_interval;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (target_pid != -1 || profile_cpu != -1)
|
if (target_tid != -1 || profile_cpu != -1)
|
||||||
nr_cpus = 1;
|
nr_cpus = 1;
|
||||||
else
|
else
|
||||||
nr_cpus = read_cpu_map();
|
nr_cpus = read_cpu_map();
|
||||||
|
|
|
@ -104,10 +104,23 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||||
static struct perf_event_ops event_ops = {
|
static struct perf_event_ops event_ops = {
|
||||||
.sample = process_sample_event,
|
.sample = process_sample_event,
|
||||||
.comm = event__process_comm,
|
.comm = event__process_comm,
|
||||||
|
.attr = event__process_attr,
|
||||||
|
.event_type = event__process_event_type,
|
||||||
|
.tracing_data = event__process_tracing_data,
|
||||||
|
.build_id = event__process_build_id,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern volatile int session_done;
|
||||||
|
|
||||||
|
static void sig_handler(int sig __unused)
|
||||||
|
{
|
||||||
|
session_done = 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int __cmd_trace(struct perf_session *session)
|
static int __cmd_trace(struct perf_session *session)
|
||||||
{
|
{
|
||||||
|
signal(SIGINT, sig_handler);
|
||||||
|
|
||||||
return perf_session__process_events(session, &event_ops);
|
return perf_session__process_events(session, &event_ops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,7 +518,7 @@ static const char * const trace_usage[] = {
|
||||||
static const struct option options[] = {
|
static const struct option options[] = {
|
||||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
||||||
"dump raw trace in ASCII"),
|
"dump raw trace in ASCII"),
|
||||||
OPT_BOOLEAN('v', "verbose", &verbose,
|
OPT_INCR('v', "verbose", &verbose,
|
||||||
"be more verbose (show symbol address, etc)"),
|
"be more verbose (show symbol address, etc)"),
|
||||||
OPT_BOOLEAN('L', "Latency", &latency_format,
|
OPT_BOOLEAN('L', "Latency", &latency_format,
|
||||||
"show latency attributes (irqs/preemption disabled, etc)"),
|
"show latency attributes (irqs/preemption disabled, etc)"),
|
||||||
|
@ -548,6 +561,65 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
|
||||||
suffix = REPORT_SUFFIX;
|
suffix = REPORT_SUFFIX;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
|
||||||
|
char *record_script_path, *report_script_path;
|
||||||
|
int live_pipe[2];
|
||||||
|
pid_t pid;
|
||||||
|
|
||||||
|
record_script_path = get_script_path(argv[1], RECORD_SUFFIX);
|
||||||
|
if (!record_script_path) {
|
||||||
|
fprintf(stderr, "record script not found\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
report_script_path = get_script_path(argv[1], REPORT_SUFFIX);
|
||||||
|
if (!report_script_path) {
|
||||||
|
fprintf(stderr, "report script not found\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pipe(live_pipe) < 0) {
|
||||||
|
perror("failed to create pipe");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
pid = fork();
|
||||||
|
if (pid < 0) {
|
||||||
|
perror("failed to fork");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pid) {
|
||||||
|
dup2(live_pipe[1], 1);
|
||||||
|
close(live_pipe[0]);
|
||||||
|
|
||||||
|
__argv = malloc(5 * sizeof(const char *));
|
||||||
|
__argv[0] = "/bin/sh";
|
||||||
|
__argv[1] = record_script_path;
|
||||||
|
__argv[2] = "-o";
|
||||||
|
__argv[3] = "-";
|
||||||
|
__argv[4] = NULL;
|
||||||
|
|
||||||
|
execvp("/bin/sh", (char **)__argv);
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
dup2(live_pipe[0], 0);
|
||||||
|
close(live_pipe[1]);
|
||||||
|
|
||||||
|
__argv = malloc((argc + 3) * sizeof(const char *));
|
||||||
|
__argv[0] = "/bin/sh";
|
||||||
|
__argv[1] = report_script_path;
|
||||||
|
for (i = 2; i < argc; i++)
|
||||||
|
__argv[i] = argv[i];
|
||||||
|
__argv[i++] = "-i";
|
||||||
|
__argv[i++] = "-";
|
||||||
|
__argv[i++] = NULL;
|
||||||
|
|
||||||
|
execvp("/bin/sh", (char **)__argv);
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
if (suffix) {
|
if (suffix) {
|
||||||
script_path = get_script_path(argv[2], suffix);
|
script_path = get_script_path(argv[2], suffix);
|
||||||
if (!script_path) {
|
if (!script_path) {
|
||||||
|
@ -580,7 +652,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
|
||||||
if (session == NULL)
|
if (session == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (!perf_session__has_traces(session, "record -R"))
|
if (strcmp(input_name, "-") &&
|
||||||
|
!perf_session__has_traces(session, "record -R"))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (generate_script_lang) {
|
if (generate_script_lang) {
|
||||||
|
|
|
@ -32,5 +32,6 @@ extern int cmd_version(int argc, const char **argv, const char *prefix);
|
||||||
extern int cmd_probe(int argc, const char **argv, const char *prefix);
|
extern int cmd_probe(int argc, const char **argv, const char *prefix);
|
||||||
extern int cmd_kmem(int argc, const char **argv, const char *prefix);
|
extern int cmd_kmem(int argc, const char **argv, const char *prefix);
|
||||||
extern int cmd_lock(int argc, const char **argv, const char *prefix);
|
extern int cmd_lock(int argc, const char **argv, const char *prefix);
|
||||||
|
extern int cmd_kvm(int argc, const char **argv, const char *prefix);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -19,3 +19,4 @@ perf-trace mainporcelain common
|
||||||
perf-probe mainporcelain common
|
perf-probe mainporcelain common
|
||||||
perf-kmem mainporcelain common
|
perf-kmem mainporcelain common
|
||||||
perf-lock mainporcelain common
|
perf-lock mainporcelain common
|
||||||
|
perf-kvm mainporcelain common
|
||||||
|
|
|
@ -30,4 +30,7 @@ done
|
||||||
|
|
||||||
tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST
|
tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST
|
||||||
rm -f $MANIFEST $BUILDIDS
|
rm -f $MANIFEST $BUILDIDS
|
||||||
|
echo -e "Now please run:\n"
|
||||||
|
echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
|
||||||
|
echo "wherever you need to run 'perf report' on."
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
@ -13,9 +13,10 @@
|
||||||
#include "util/quote.h"
|
#include "util/quote.h"
|
||||||
#include "util/run-command.h"
|
#include "util/run-command.h"
|
||||||
#include "util/parse-events.h"
|
#include "util/parse-events.h"
|
||||||
#include "util/string.h"
|
|
||||||
#include "util/debugfs.h"
|
#include "util/debugfs.h"
|
||||||
|
|
||||||
|
bool use_browser;
|
||||||
|
|
||||||
const char perf_usage_string[] =
|
const char perf_usage_string[] =
|
||||||
"perf [--version] [--help] COMMAND [ARGS]";
|
"perf [--version] [--help] COMMAND [ARGS]";
|
||||||
|
|
||||||
|
@ -262,6 +263,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
|
||||||
set_debugfs_path();
|
set_debugfs_path();
|
||||||
|
|
||||||
status = p->fn(argc, argv, prefix);
|
status = p->fn(argc, argv, prefix);
|
||||||
|
exit_browser(status);
|
||||||
|
|
||||||
if (status)
|
if (status)
|
||||||
return status & 0xff;
|
return status & 0xff;
|
||||||
|
|
||||||
|
@ -304,6 +307,7 @@ static void handle_internal_command(int argc, const char **argv)
|
||||||
{ "probe", cmd_probe, 0 },
|
{ "probe", cmd_probe, 0 },
|
||||||
{ "kmem", cmd_kmem, 0 },
|
{ "kmem", cmd_kmem, 0 },
|
||||||
{ "lock", cmd_lock, 0 },
|
{ "lock", cmd_lock, 0 },
|
||||||
|
{ "kvm", cmd_kvm, 0 },
|
||||||
};
|
};
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
static const char ext[] = STRIP_EXTENSION;
|
static const char ext[] = STRIP_EXTENSION;
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
#ifndef _PERF_PERF_H
|
#ifndef _PERF_PERF_H
|
||||||
#define _PERF_PERF_H
|
#define _PERF_PERF_H
|
||||||
|
|
||||||
|
struct winsize;
|
||||||
|
|
||||||
|
void get_term_dimensions(struct winsize *ws);
|
||||||
|
|
||||||
#if defined(__i386__)
|
#if defined(__i386__)
|
||||||
#include "../../arch/x86/include/asm/unistd.h"
|
#include "../../arch/x86/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
|
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
|
||||||
|
@ -102,8 +106,6 @@ static inline unsigned long long rdclock(void)
|
||||||
#define __user
|
#define __user
|
||||||
#define asmlinkage
|
#define asmlinkage
|
||||||
|
|
||||||
#define __used __attribute__((__unused__))
|
|
||||||
|
|
||||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||||
#define min(x, y) ({ \
|
#define min(x, y) ({ \
|
||||||
typeof(x) _min1 = (x); \
|
typeof(x) _min1 = (x); \
|
||||||
|
@ -129,4 +131,6 @@ struct ip_callchain {
|
||||||
u64 ips[0];
|
u64 ips[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern int perf_host, perf_guest;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -15,6 +15,7 @@ our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
|
||||||
|
|
||||||
our @EXPORT = qw(
|
our @EXPORT = qw(
|
||||||
avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs
|
avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs
|
||||||
|
clear_term
|
||||||
);
|
);
|
||||||
|
|
||||||
our $VERSION = '0.01';
|
our $VERSION = '0.01';
|
||||||
|
@ -55,6 +56,11 @@ sub nsecs_str {
|
||||||
return $str;
|
return $str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub clear_term
|
||||||
|
{
|
||||||
|
print "\x1b[H\x1b[2J";
|
||||||
|
}
|
||||||
|
|
||||||
1;
|
1;
|
||||||
__END__
|
__END__
|
||||||
=head1 NAME
|
=head1 NAME
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
|
perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit $@
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# description: system-wide failed syscalls
|
# description: system-wide failed syscalls
|
||||||
# args: [comm]
|
# args: [comm]
|
||||||
perf trace -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $1
|
if [ $# -gt 0 ] ; then
|
||||||
|
if ! expr match "$1" "-" ; then
|
||||||
|
comm=$1
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
perf trace $@ -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $comm
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_enter_write
|
perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,13 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# description: r/w activity for a program, by file
|
# description: r/w activity for a program, by file
|
||||||
# args: <comm>
|
# args: <comm>
|
||||||
perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $1
|
if [ $# -lt 1 ] ; then
|
||||||
|
echo "usage: rw-by-file <comm>"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
comm=$1
|
||||||
|
shift
|
||||||
|
perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $comm
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write
|
perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# description: system-wide r/w activity
|
# description: system-wide r/w activity
|
||||||
perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
|
perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/bash
|
||||||
|
perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
|
|
@ -0,0 +1,23 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# description: system-wide r/w top
|
||||||
|
# args: [interval]
|
||||||
|
n_args=0
|
||||||
|
for i in "$@"
|
||||||
|
do
|
||||||
|
if expr match "$i" "-" > /dev/null ; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
n_args=$(( $n_args + 1 ))
|
||||||
|
done
|
||||||
|
if [ "$n_args" -gt 1 ] ; then
|
||||||
|
echo "usage: rwtop-report [interval]"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
if [ "$n_args" -gt 0 ] ; then
|
||||||
|
interval=$1
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
perf trace $@ -s ~/libexec/perf-core/scripts/perl/rwtop.pl $interval
|
||||||
|
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue