Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/urgent
This commit is contained in:
commit
d6a72fe465
|
@ -16,7 +16,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
|
||||||
return bfin_mem_access_type(addr, size);
|
return bfin_mem_access_type(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
long probe_kernel_read(void *dst, void *src, size_t size)
|
long probe_kernel_read(void *dst, const void *src, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long lsrc = (unsigned long)src;
|
unsigned long lsrc = (unsigned long)src;
|
||||||
int mem_type;
|
int mem_type;
|
||||||
|
@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
long probe_kernel_write(void *dst, void *src, size_t size)
|
long probe_kernel_write(void *dst, const void *src, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long ldst = (unsigned long)dst;
|
unsigned long ldst = (unsigned long)dst;
|
||||||
int mem_type;
|
int mem_type;
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
* using the stura instruction.
|
* using the stura instruction.
|
||||||
* Returns the number of bytes copied or -EFAULT.
|
* Returns the number of bytes copied or -EFAULT.
|
||||||
*/
|
*/
|
||||||
static long probe_kernel_write_odd(void *dst, void *src, size_t size)
|
static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long count, aligned;
|
unsigned long count, aligned;
|
||||||
int offset, mask;
|
int offset, mask;
|
||||||
|
@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size)
|
||||||
return rc ? rc : count;
|
return rc ? rc : count;
|
||||||
}
|
}
|
||||||
|
|
||||||
long probe_kernel_write(void *dst, void *src, size_t size)
|
long probe_kernel_write(void *dst, const void *src, size_t size)
|
||||||
{
|
{
|
||||||
long copied = 0;
|
long copied = 0;
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
|
||||||
static atomic_t nmi_running = ATOMIC_INIT(0);
|
static atomic_t nmi_running = ATOMIC_INIT(0);
|
||||||
static int mod_code_status; /* holds return value of text write */
|
static int mod_code_status; /* holds return value of text write */
|
||||||
static void *mod_code_ip; /* holds the IP to write to */
|
static void *mod_code_ip; /* holds the IP to write to */
|
||||||
static void *mod_code_newcode; /* holds the text to write to the IP */
|
static const void *mod_code_newcode; /* holds the text to write to the IP */
|
||||||
|
|
||||||
static unsigned nmi_wait_count;
|
static unsigned nmi_wait_count;
|
||||||
static atomic_t nmi_update_count = ATOMIC_INIT(0);
|
static atomic_t nmi_update_count = ATOMIC_INIT(0);
|
||||||
|
@ -225,7 +225,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
do_ftrace_mod_code(unsigned long ip, void *new_code)
|
do_ftrace_mod_code(unsigned long ip, const void *new_code)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* On x86_64, kernel text mappings are mapped read-only with
|
* On x86_64, kernel text mappings are mapped read-only with
|
||||||
|
@ -266,8 +266,8 @@ static const unsigned char *ftrace_nop_replace(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
|
||||||
unsigned char *new_code)
|
unsigned const char *new_code)
|
||||||
{
|
{
|
||||||
unsigned char replaced[MCOUNT_INSN_SIZE];
|
unsigned char replaced[MCOUNT_INSN_SIZE];
|
||||||
|
|
||||||
|
@ -301,7 +301,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
||||||
int ftrace_make_nop(struct module *mod,
|
int ftrace_make_nop(struct module *mod,
|
||||||
struct dyn_ftrace *rec, unsigned long addr)
|
struct dyn_ftrace *rec, unsigned long addr)
|
||||||
{
|
{
|
||||||
unsigned char *new, *old;
|
unsigned const char *new, *old;
|
||||||
unsigned long ip = rec->ip;
|
unsigned long ip = rec->ip;
|
||||||
|
|
||||||
old = ftrace_call_replace(ip, addr);
|
old = ftrace_call_replace(ip, addr);
|
||||||
|
@ -312,7 +312,7 @@ int ftrace_make_nop(struct module *mod,
|
||||||
|
|
||||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||||
{
|
{
|
||||||
unsigned char *new, *old;
|
unsigned const char *new, *old;
|
||||||
unsigned long ip = rec->ip;
|
unsigned long ip = rec->ip;
|
||||||
|
|
||||||
old = ftrace_nop_replace();
|
old = ftrace_nop_replace();
|
||||||
|
|
|
@ -16,6 +16,11 @@ struct trace_print_flags {
|
||||||
const char *name;
|
const char *name;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct trace_print_flags_u64 {
|
||||||
|
unsigned long long mask;
|
||||||
|
const char *name;
|
||||||
|
};
|
||||||
|
|
||||||
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
|
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
const struct trace_print_flags *flag_array);
|
const struct trace_print_flags *flag_array);
|
||||||
|
@ -23,6 +28,13 @@ const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
|
||||||
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
|
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
|
||||||
const struct trace_print_flags *symbol_array);
|
const struct trace_print_flags *symbol_array);
|
||||||
|
|
||||||
|
#if BITS_PER_LONG == 32
|
||||||
|
const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
|
||||||
|
unsigned long long val,
|
||||||
|
const struct trace_print_flags_u64
|
||||||
|
*symbol_array);
|
||||||
|
#endif
|
||||||
|
|
||||||
const char *ftrace_print_hex_seq(struct trace_seq *p,
|
const char *ftrace_print_hex_seq(struct trace_seq *p,
|
||||||
const unsigned char *buf, int len);
|
const unsigned char *buf, int len);
|
||||||
|
|
||||||
|
|
|
@ -1535,7 +1535,7 @@ struct task_struct {
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
/* state flags for use by tracers */
|
/* state flags for use by tracers */
|
||||||
unsigned long trace;
|
unsigned long trace;
|
||||||
/* bitmask of trace recursion */
|
/* bitmask and counter of trace recursion */
|
||||||
unsigned long trace_recursion;
|
unsigned long trace_recursion;
|
||||||
#endif /* CONFIG_TRACING */
|
#endif /* CONFIG_TRACING */
|
||||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
|
||||||
|
|
|
@ -93,8 +93,8 @@ static inline unsigned long __copy_from_user_nocache(void *to,
|
||||||
* Safely read from address @src to the buffer at @dst. If a kernel fault
|
* Safely read from address @src to the buffer at @dst. If a kernel fault
|
||||||
* happens, handle that and return -EFAULT.
|
* happens, handle that and return -EFAULT.
|
||||||
*/
|
*/
|
||||||
extern long probe_kernel_read(void *dst, void *src, size_t size);
|
extern long probe_kernel_read(void *dst, const void *src, size_t size);
|
||||||
extern long __probe_kernel_read(void *dst, void *src, size_t size);
|
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* probe_kernel_write(): safely attempt to write to a location
|
* probe_kernel_write(): safely attempt to write to a location
|
||||||
|
@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, void *src, size_t size);
|
||||||
* Safely write to address @dst from the buffer at @src. If a kernel fault
|
* Safely write to address @dst from the buffer at @src. If a kernel fault
|
||||||
* happens, handle that and return -EFAULT.
|
* happens, handle that and return -EFAULT.
|
||||||
*/
|
*/
|
||||||
extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
|
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
|
||||||
extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
|
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
|
||||||
|
|
||||||
#endif /* __LINUX_UACCESS_H__ */
|
#endif /* __LINUX_UACCESS_H__ */
|
||||||
|
|
|
@ -28,7 +28,7 @@ struct extent_buffer;
|
||||||
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
|
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
|
||||||
|
|
||||||
#define __show_root_type(obj) \
|
#define __show_root_type(obj) \
|
||||||
__print_symbolic(obj, \
|
__print_symbolic_u64(obj, \
|
||||||
{ BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
|
{ BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
|
||||||
{ BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
|
{ BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
|
||||||
{ BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
|
{ BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
|
||||||
|
@ -125,7 +125,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
|
||||||
);
|
);
|
||||||
|
|
||||||
#define __show_map_type(type) \
|
#define __show_map_type(type) \
|
||||||
__print_symbolic(type, \
|
__print_symbolic_u64(type, \
|
||||||
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
|
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
|
||||||
{ EXTENT_MAP_HOLE, "HOLE" }, \
|
{ EXTENT_MAP_HOLE, "HOLE" }, \
|
||||||
{ EXTENT_MAP_INLINE, "INLINE" }, \
|
{ EXTENT_MAP_INLINE, "INLINE" }, \
|
||||||
|
|
|
@ -205,6 +205,19 @@
|
||||||
ftrace_print_symbols_seq(p, value, symbols); \
|
ftrace_print_symbols_seq(p, value, symbols); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#undef __print_symbolic_u64
|
||||||
|
#if BITS_PER_LONG == 32
|
||||||
|
#define __print_symbolic_u64(value, symbol_array...) \
|
||||||
|
({ \
|
||||||
|
static const struct trace_print_flags_u64 symbols[] = \
|
||||||
|
{ symbol_array, { -1, NULL } }; \
|
||||||
|
ftrace_print_symbols_seq_u64(p, value, symbols); \
|
||||||
|
})
|
||||||
|
#else
|
||||||
|
#define __print_symbolic_u64(value, symbol_array...) \
|
||||||
|
__print_symbolic(value, symbol_array)
|
||||||
|
#endif
|
||||||
|
|
||||||
#undef __print_hex
|
#undef __print_hex
|
||||||
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
|
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
|
||||||
|
|
||||||
|
|
|
@ -105,9 +105,12 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __jump_label_update(struct jump_label_key *key,
|
static void __jump_label_update(struct jump_label_key *key,
|
||||||
struct jump_entry *entry, int enable)
|
struct jump_entry *entry,
|
||||||
|
struct jump_entry *stop, int enable)
|
||||||
{
|
{
|
||||||
for (; entry->key == (jump_label_t)(unsigned long)key; entry++) {
|
for (; (entry < stop) &&
|
||||||
|
(entry->key == (jump_label_t)(unsigned long)key);
|
||||||
|
entry++) {
|
||||||
/*
|
/*
|
||||||
* entry->code set to 0 invalidates module init text sections
|
* entry->code set to 0 invalidates module init text sections
|
||||||
* kernel_text_address() verifies we are not in core kernel
|
* kernel_text_address() verifies we are not in core kernel
|
||||||
|
@ -181,7 +184,11 @@ static void __jump_label_mod_update(struct jump_label_key *key, int enable)
|
||||||
struct jump_label_mod *mod = key->next;
|
struct jump_label_mod *mod = key->next;
|
||||||
|
|
||||||
while (mod) {
|
while (mod) {
|
||||||
__jump_label_update(key, mod->entries, enable);
|
struct module *m = mod->mod;
|
||||||
|
|
||||||
|
__jump_label_update(key, mod->entries,
|
||||||
|
m->jump_entries + m->num_jump_entries,
|
||||||
|
enable);
|
||||||
mod = mod->next;
|
mod = mod->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -245,7 +252,8 @@ static int jump_label_add_module(struct module *mod)
|
||||||
key->next = jlm;
|
key->next = jlm;
|
||||||
|
|
||||||
if (jump_label_enabled(key))
|
if (jump_label_enabled(key))
|
||||||
__jump_label_update(key, iter, JUMP_LABEL_ENABLE);
|
__jump_label_update(key, iter, iter_stop,
|
||||||
|
JUMP_LABEL_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -371,7 +379,7 @@ static void jump_label_update(struct jump_label_key *key, int enable)
|
||||||
|
|
||||||
/* if there are no users, entry can be NULL */
|
/* if there are no users, entry can be NULL */
|
||||||
if (entry)
|
if (entry)
|
||||||
__jump_label_update(key, entry, enable);
|
__jump_label_update(key, entry, __stop___jump_table, enable);
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
__jump_label_mod_update(key, enable);
|
__jump_label_mod_update(key, enable);
|
||||||
|
|
|
@ -109,12 +109,18 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
|
||||||
static void ftrace_global_list_func(unsigned long ip,
|
static void ftrace_global_list_func(unsigned long ip,
|
||||||
unsigned long parent_ip)
|
unsigned long parent_ip)
|
||||||
{
|
{
|
||||||
struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
|
struct ftrace_ops *op;
|
||||||
|
|
||||||
|
if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
trace_recursion_set(TRACE_GLOBAL_BIT);
|
||||||
|
op = rcu_dereference_raw(ftrace_global_list); /*see above*/
|
||||||
while (op != &ftrace_list_end) {
|
while (op != &ftrace_list_end) {
|
||||||
op->func(ip, parent_ip);
|
op->func(ip, parent_ip);
|
||||||
op = rcu_dereference_raw(op->next); /*see above*/
|
op = rcu_dereference_raw(op->next); /*see above*/
|
||||||
};
|
};
|
||||||
|
trace_recursion_clear(TRACE_GLOBAL_BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
|
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
|
||||||
|
@ -1638,12 +1644,12 @@ static void ftrace_startup_enable(int command)
|
||||||
ftrace_run_update_code(command);
|
ftrace_run_update_code(command);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_startup(struct ftrace_ops *ops, int command)
|
static int ftrace_startup(struct ftrace_ops *ops, int command)
|
||||||
{
|
{
|
||||||
bool hash_enable = true;
|
bool hash_enable = true;
|
||||||
|
|
||||||
if (unlikely(ftrace_disabled))
|
if (unlikely(ftrace_disabled))
|
||||||
return;
|
return -ENODEV;
|
||||||
|
|
||||||
ftrace_start_up++;
|
ftrace_start_up++;
|
||||||
command |= FTRACE_ENABLE_CALLS;
|
command |= FTRACE_ENABLE_CALLS;
|
||||||
|
@ -1662,6 +1668,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
|
||||||
ftrace_hash_rec_enable(ops, 1);
|
ftrace_hash_rec_enable(ops, 1);
|
||||||
|
|
||||||
ftrace_startup_enable(command);
|
ftrace_startup_enable(command);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_shutdown(struct ftrace_ops *ops, int command)
|
static void ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||||
|
@ -2501,7 +2509,7 @@ static void __enable_ftrace_function_probe(void)
|
||||||
|
|
||||||
ret = __register_ftrace_function(&trace_probe_ops);
|
ret = __register_ftrace_function(&trace_probe_ops);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ftrace_startup(&trace_probe_ops, 0);
|
ret = ftrace_startup(&trace_probe_ops, 0);
|
||||||
|
|
||||||
ftrace_probe_registered = 1;
|
ftrace_probe_registered = 1;
|
||||||
}
|
}
|
||||||
|
@ -3466,7 +3474,11 @@ device_initcall(ftrace_nodyn_init);
|
||||||
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
|
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
|
||||||
static inline void ftrace_startup_enable(int command) { }
|
static inline void ftrace_startup_enable(int command) { }
|
||||||
/* Keep as macros so we do not need to define the commands */
|
/* Keep as macros so we do not need to define the commands */
|
||||||
# define ftrace_startup(ops, command) do { } while (0)
|
# define ftrace_startup(ops, command) \
|
||||||
|
({ \
|
||||||
|
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
|
||||||
|
0; \
|
||||||
|
})
|
||||||
# define ftrace_shutdown(ops, command) do { } while (0)
|
# define ftrace_shutdown(ops, command) do { } while (0)
|
||||||
# define ftrace_startup_sysctl() do { } while (0)
|
# define ftrace_startup_sysctl() do { } while (0)
|
||||||
# define ftrace_shutdown_sysctl() do { } while (0)
|
# define ftrace_shutdown_sysctl() do { } while (0)
|
||||||
|
@ -3484,6 +3496,10 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
|
||||||
{
|
{
|
||||||
struct ftrace_ops *op;
|
struct ftrace_ops *op;
|
||||||
|
|
||||||
|
if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
trace_recursion_set(TRACE_INTERNAL_BIT);
|
||||||
/*
|
/*
|
||||||
* Some of the ops may be dynamically allocated,
|
* Some of the ops may be dynamically allocated,
|
||||||
* they must be freed after a synchronize_sched().
|
* they must be freed after a synchronize_sched().
|
||||||
|
@ -3496,6 +3512,7 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
|
||||||
op = rcu_dereference_raw(op->next);
|
op = rcu_dereference_raw(op->next);
|
||||||
};
|
};
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
|
trace_recursion_clear(TRACE_INTERNAL_BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clear_ftrace_swapper(void)
|
static void clear_ftrace_swapper(void)
|
||||||
|
@ -3799,7 +3816,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
|
||||||
|
|
||||||
ret = __register_ftrace_function(ops);
|
ret = __register_ftrace_function(ops);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ftrace_startup(ops, 0);
|
ret = ftrace_startup(ops, 0);
|
||||||
|
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
@ -4045,7 +4062,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
||||||
ftrace_graph_return = retfunc;
|
ftrace_graph_return = retfunc;
|
||||||
ftrace_graph_entry = entryfunc;
|
ftrace_graph_entry = entryfunc;
|
||||||
|
|
||||||
ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
|
ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&ftrace_lock);
|
mutex_unlock(&ftrace_lock);
|
||||||
|
|
|
@ -2216,7 +2216,7 @@ static noinline void trace_recursive_fail(void)
|
||||||
|
|
||||||
printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
|
printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
|
||||||
"HC[%lu]:SC[%lu]:NMI[%lu]\n",
|
"HC[%lu]:SC[%lu]:NMI[%lu]\n",
|
||||||
current->trace_recursion,
|
trace_recursion_buffer(),
|
||||||
hardirq_count() >> HARDIRQ_SHIFT,
|
hardirq_count() >> HARDIRQ_SHIFT,
|
||||||
softirq_count() >> SOFTIRQ_SHIFT,
|
softirq_count() >> SOFTIRQ_SHIFT,
|
||||||
in_nmi());
|
in_nmi());
|
||||||
|
@ -2226,9 +2226,9 @@ static noinline void trace_recursive_fail(void)
|
||||||
|
|
||||||
static inline int trace_recursive_lock(void)
|
static inline int trace_recursive_lock(void)
|
||||||
{
|
{
|
||||||
current->trace_recursion++;
|
trace_recursion_inc();
|
||||||
|
|
||||||
if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
|
if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
trace_recursive_fail();
|
trace_recursive_fail();
|
||||||
|
@ -2238,9 +2238,9 @@ static inline int trace_recursive_lock(void)
|
||||||
|
|
||||||
static inline void trace_recursive_unlock(void)
|
static inline void trace_recursive_unlock(void)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(!current->trace_recursion);
|
WARN_ON_ONCE(!trace_recursion_buffer());
|
||||||
|
|
||||||
current->trace_recursion--;
|
trace_recursion_dec();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -784,4 +784,19 @@ extern const char *__stop___trace_bprintk_fmt[];
|
||||||
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
|
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
|
||||||
#include "trace_entries.h"
|
#include "trace_entries.h"
|
||||||
|
|
||||||
|
/* Only current can touch trace_recursion */
|
||||||
|
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
|
||||||
|
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
|
||||||
|
|
||||||
|
/* Ring buffer has the 10 LSB bits to count */
|
||||||
|
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
|
||||||
|
|
||||||
|
/* for function tracing recursion */
|
||||||
|
#define TRACE_INTERNAL_BIT (1<<11)
|
||||||
|
#define TRACE_GLOBAL_BIT (1<<12)
|
||||||
|
|
||||||
|
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
|
||||||
|
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
|
||||||
|
#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
|
||||||
|
|
||||||
#endif /* _LINUX_KERNEL_TRACE_H */
|
#endif /* _LINUX_KERNEL_TRACE_H */
|
||||||
|
|
|
@ -1657,7 +1657,12 @@ static struct ftrace_ops trace_ops __initdata =
|
||||||
|
|
||||||
static __init void event_trace_self_test_with_function(void)
|
static __init void event_trace_self_test_with_function(void)
|
||||||
{
|
{
|
||||||
register_ftrace_function(&trace_ops);
|
int ret;
|
||||||
|
ret = register_ftrace_function(&trace_ops);
|
||||||
|
if (WARN_ON(ret < 0)) {
|
||||||
|
pr_info("Failed to enable function tracer for event tests\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
pr_info("Running tests again, along with the function tracer\n");
|
pr_info("Running tests again, along with the function tracer\n");
|
||||||
event_trace_self_tests();
|
event_trace_self_tests();
|
||||||
unregister_ftrace_function(&trace_ops);
|
unregister_ftrace_function(&trace_ops);
|
||||||
|
|
|
@ -353,6 +353,33 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ftrace_print_symbols_seq);
|
EXPORT_SYMBOL(ftrace_print_symbols_seq);
|
||||||
|
|
||||||
|
#if BITS_PER_LONG == 32
|
||||||
|
const char *
|
||||||
|
ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
|
||||||
|
const struct trace_print_flags_u64 *symbol_array)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
const char *ret = p->buffer + p->len;
|
||||||
|
|
||||||
|
for (i = 0; symbol_array[i].name; i++) {
|
||||||
|
|
||||||
|
if (val != symbol_array[i].mask)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
trace_seq_puts(p, symbol_array[i].name);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!p->len)
|
||||||
|
trace_seq_printf(p, "0x%llx", val);
|
||||||
|
|
||||||
|
trace_seq_putc(p, 0);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
|
||||||
|
#endif
|
||||||
|
|
||||||
const char *
|
const char *
|
||||||
ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
|
ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
|
||||||
{
|
{
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
* happens, handle that and return -EFAULT.
|
* happens, handle that and return -EFAULT.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
long __weak probe_kernel_read(void *dst, void *src, size_t size)
|
long __weak probe_kernel_read(void *dst, const void *src, size_t size)
|
||||||
__attribute__((alias("__probe_kernel_read")));
|
__attribute__((alias("__probe_kernel_read")));
|
||||||
|
|
||||||
long __probe_kernel_read(void *dst, void *src, size_t size)
|
long __probe_kernel_read(void *dst, const void *src, size_t size)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
mm_segment_t old_fs = get_fs();
|
mm_segment_t old_fs = get_fs();
|
||||||
|
@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
|
||||||
* Safely write to address @dst from the buffer at @src. If a kernel fault
|
* Safely write to address @dst from the buffer at @src. If a kernel fault
|
||||||
* happens, handle that and return -EFAULT.
|
* happens, handle that and return -EFAULT.
|
||||||
*/
|
*/
|
||||||
long __weak probe_kernel_write(void *dst, void *src, size_t size)
|
long __weak probe_kernel_write(void *dst, const void *src, size_t size)
|
||||||
__attribute__((alias("__probe_kernel_write")));
|
__attribute__((alias("__probe_kernel_write")));
|
||||||
|
|
||||||
long __probe_kernel_write(void *dst, void *src, size_t size)
|
long __probe_kernel_write(void *dst, const void *src, size_t size)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
mm_segment_t old_fs = get_fs();
|
mm_segment_t old_fs = get_fs();
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
#undef ELF_R_INFO
|
#undef ELF_R_INFO
|
||||||
#undef Elf_r_info
|
#undef Elf_r_info
|
||||||
#undef ELF_ST_BIND
|
#undef ELF_ST_BIND
|
||||||
|
#undef ELF_ST_TYPE
|
||||||
#undef fn_ELF_R_SYM
|
#undef fn_ELF_R_SYM
|
||||||
#undef fn_ELF_R_INFO
|
#undef fn_ELF_R_INFO
|
||||||
#undef uint_t
|
#undef uint_t
|
||||||
|
@ -76,6 +77,7 @@
|
||||||
# define ELF_R_INFO ELF64_R_INFO
|
# define ELF_R_INFO ELF64_R_INFO
|
||||||
# define Elf_r_info Elf64_r_info
|
# define Elf_r_info Elf64_r_info
|
||||||
# define ELF_ST_BIND ELF64_ST_BIND
|
# define ELF_ST_BIND ELF64_ST_BIND
|
||||||
|
# define ELF_ST_TYPE ELF64_ST_TYPE
|
||||||
# define fn_ELF_R_SYM fn_ELF64_R_SYM
|
# define fn_ELF_R_SYM fn_ELF64_R_SYM
|
||||||
# define fn_ELF_R_INFO fn_ELF64_R_INFO
|
# define fn_ELF_R_INFO fn_ELF64_R_INFO
|
||||||
# define uint_t uint64_t
|
# define uint_t uint64_t
|
||||||
|
@ -108,6 +110,7 @@
|
||||||
# define ELF_R_INFO ELF32_R_INFO
|
# define ELF_R_INFO ELF32_R_INFO
|
||||||
# define Elf_r_info Elf32_r_info
|
# define Elf_r_info Elf32_r_info
|
||||||
# define ELF_ST_BIND ELF32_ST_BIND
|
# define ELF_ST_BIND ELF32_ST_BIND
|
||||||
|
# define ELF_ST_TYPE ELF32_ST_TYPE
|
||||||
# define fn_ELF_R_SYM fn_ELF32_R_SYM
|
# define fn_ELF_R_SYM fn_ELF32_R_SYM
|
||||||
# define fn_ELF_R_INFO fn_ELF32_R_INFO
|
# define fn_ELF_R_INFO fn_ELF32_R_INFO
|
||||||
# define uint_t uint32_t
|
# define uint_t uint32_t
|
||||||
|
@ -427,6 +430,11 @@ static unsigned find_secsym_ndx(unsigned const txtndx,
|
||||||
if (txtndx == w2(symp->st_shndx)
|
if (txtndx == w2(symp->st_shndx)
|
||||||
/* avoid STB_WEAK */
|
/* avoid STB_WEAK */
|
||||||
&& (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
|
&& (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
|
||||||
|
/* function symbols on ARM have quirks, avoid them */
|
||||||
|
if (w2(ehdr->e_machine) == EM_ARM
|
||||||
|
&& ELF_ST_TYPE(symp->st_info) == STT_FUNC)
|
||||||
|
continue;
|
||||||
|
|
||||||
*recvalp = _w(symp->st_value);
|
*recvalp = _w(symp->st_value);
|
||||||
return symp - sym0;
|
return symp - sym0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,7 +132,7 @@ exuberant()
|
||||||
--regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \
|
--regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \
|
||||||
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
|
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
|
||||||
--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \
|
--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \
|
||||||
--regex-c++='/^DEFINE_EVENT\(([^,)]*).*/trace_\1/'
|
--regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/'
|
||||||
|
|
||||||
all_kconfigs | xargs $1 -a \
|
all_kconfigs | xargs $1 -a \
|
||||||
--langdef=kconfig --language-force=kconfig \
|
--langdef=kconfig --language-force=kconfig \
|
||||||
|
@ -152,7 +152,9 @@ emacs()
|
||||||
{
|
{
|
||||||
all_sources | xargs $1 -a \
|
all_sources | xargs $1 -a \
|
||||||
--regex='/^ENTRY(\([^)]*\)).*/\1/' \
|
--regex='/^ENTRY(\([^)]*\)).*/\1/' \
|
||||||
--regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/'
|
--regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' \
|
||||||
|
--regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/' \
|
||||||
|
--regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/'
|
||||||
|
|
||||||
all_kconfigs | xargs $1 -a \
|
all_kconfigs | xargs $1 -a \
|
||||||
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
|
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
|
||||||
|
|
Loading…
Reference in New Issue