perf events: Clean up definitions and initializers, update copyrights
Fix a few inconsistent style bits that were added over the past few months. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-yv4hwf9yhnzoada8pcpb3a97@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
48dbb6dc86
commit
e7e7ee2eab
|
@ -2,8 +2,8 @@
|
|||
* Performance events:
|
||||
*
|
||||
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Data type definitions, declarations, prototypes.
|
||||
*
|
||||
|
@ -484,9 +484,9 @@ enum perf_callchain_context {
|
|||
#endif
|
||||
|
||||
struct perf_guest_info_callbacks {
|
||||
int (*is_in_guest) (void);
|
||||
int (*is_user_mode) (void);
|
||||
unsigned long (*get_guest_ip) (void);
|
||||
int (*is_in_guest)(void);
|
||||
int (*is_user_mode)(void);
|
||||
unsigned long (*get_guest_ip)(void);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
|
@ -1001,8 +1001,7 @@ struct perf_sample_data {
|
|||
struct perf_raw_record *raw;
|
||||
};
|
||||
|
||||
static inline
|
||||
void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
|
||||
static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
|
||||
{
|
||||
data->addr = addr;
|
||||
data->raw = NULL;
|
||||
|
@ -1039,8 +1038,7 @@ extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
|||
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
|
||||
|
||||
#ifndef perf_arch_fetch_caller_regs
|
||||
static inline void
|
||||
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
|
||||
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -1080,8 +1078,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task)
|
|||
__perf_event_task_sched_in(task);
|
||||
}
|
||||
|
||||
static inline
|
||||
void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
|
||||
static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
|
||||
{
|
||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
|
||||
|
||||
|
@ -1099,14 +1096,10 @@ extern void perf_event_fork(struct task_struct *tsk);
|
|||
/* Callchains */
|
||||
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
|
||||
|
||||
extern void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs);
|
||||
extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs);
|
||||
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
||||
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
||||
|
||||
|
||||
static inline void
|
||||
perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
{
|
||||
if (entry->nr < PERF_MAX_STACK_DEPTH)
|
||||
entry->ip[entry->nr++] = ip;
|
||||
|
@ -1142,9 +1135,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record,
|
|||
extern void perf_bp_event(struct perf_event *event, void *data);
|
||||
|
||||
#ifndef perf_misc_flags
|
||||
#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
|
||||
PERF_RECORD_MISC_KERNEL)
|
||||
#define perf_instruction_pointer(regs) instruction_pointer(regs)
|
||||
# define perf_misc_flags(regs) \
|
||||
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
|
||||
# define perf_instruction_pointer(regs) instruction_pointer(regs)
|
||||
#endif
|
||||
|
||||
extern int perf_output_begin(struct perf_output_handle *handle,
|
||||
|
@ -1194,8 +1187,7 @@ static inline void perf_event_disable(struct perf_event *event) { }
|
|||
static inline void perf_event_task_tick(void) { }
|
||||
#endif
|
||||
|
||||
#define perf_output_put(handle, x) \
|
||||
perf_output_copy((handle), &(x), sizeof(x))
|
||||
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
|
||||
|
||||
/*
|
||||
* This has to have a higher priority than migration_notifier in sched.c.
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
* Performance events core code:
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
|
|
Loading…
Reference in New Issue