tracing: Remove most or all of stack tracer stack size from stack_max_size
Currently, the depth reported in the stack tracer stack_trace file does not match the stack_max_size file. This is because the stack_max_size includes the overhead of stack tracer itself while the depth does not. The first time a max is triggered, a calculation is not performed that figures out the overhead of the stack tracer and subtracts it from the stack_max_size variable. The overhead is stored and is subtracted from the reported stack size for comparing for a new max. Now the stack_max_size corresponds to the reported depth: # cat stack_max_size 4640 # cat stack_trace Depth Size Location (48 entries) ----- ---- -------- 0) 4640 32 _raw_spin_lock+0x18/0x24 1) 4608 112 ____cache_alloc+0xb7/0x22d 2) 4496 80 kmem_cache_alloc+0x63/0x12f 3) 4416 16 mempool_alloc_slab+0x15/0x17 [...] While testing against and older gcc on x86 that uses mcount instead of fentry, I found that pasing in ip + MCOUNT_INSN_SIZE let the stack trace show one more function deep which was missing before. Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
d4ecbfc49b
commit
4df297129f
|
@ -20,27 +20,24 @@
|
||||||
|
|
||||||
#define STACK_TRACE_ENTRIES 500
|
#define STACK_TRACE_ENTRIES 500
|
||||||
|
|
||||||
/*
|
|
||||||
* If fentry is used, then the function being traced will
|
|
||||||
* jump to fentry directly before it sets up its stack frame.
|
|
||||||
* We need to ignore that one and record the parent. Since
|
|
||||||
* the stack frame for the traced function wasn't set up yet,
|
|
||||||
* the stack_trace wont see the parent. That needs to be added
|
|
||||||
* manually to stack_dump_trace[] as the first element.
|
|
||||||
*/
|
|
||||||
#ifdef CC_USING_FENTRY
|
#ifdef CC_USING_FENTRY
|
||||||
# define add_func 1
|
# define fentry 1
|
||||||
#else
|
#else
|
||||||
# define add_func 0
|
# define fentry 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
|
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
|
||||||
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
|
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
|
||||||
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
|
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reserve one entry for the passed in ip. This will allow
|
||||||
|
* us to remove most or all of the stack size overhead
|
||||||
|
* added by the stack tracer itself.
|
||||||
|
*/
|
||||||
static struct stack_trace max_stack_trace = {
|
static struct stack_trace max_stack_trace = {
|
||||||
.max_entries = STACK_TRACE_ENTRIES - add_func,
|
.max_entries = STACK_TRACE_ENTRIES - 1,
|
||||||
.entries = &stack_dump_trace[add_func],
|
.entries = &stack_dump_trace[1],
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned long max_stack_size;
|
static unsigned long max_stack_size;
|
||||||
|
@ -58,10 +55,14 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
{
|
{
|
||||||
unsigned long this_size, flags;
|
unsigned long this_size, flags;
|
||||||
unsigned long *p, *top, *start;
|
unsigned long *p, *top, *start;
|
||||||
|
static int tracer_frame;
|
||||||
|
int frame_size = ACCESS_ONCE(tracer_frame);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
|
this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
|
||||||
this_size = THREAD_SIZE - this_size;
|
this_size = THREAD_SIZE - this_size;
|
||||||
|
/* Remove the frame of the tracer */
|
||||||
|
this_size -= frame_size;
|
||||||
|
|
||||||
if (this_size <= max_stack_size)
|
if (this_size <= max_stack_size)
|
||||||
return;
|
return;
|
||||||
|
@ -73,6 +74,10 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
arch_spin_lock(&max_stack_lock);
|
arch_spin_lock(&max_stack_lock);
|
||||||
|
|
||||||
|
/* In case another CPU set the tracer_frame on us */
|
||||||
|
if (unlikely(!frame_size))
|
||||||
|
this_size -= tracer_frame;
|
||||||
|
|
||||||
/* a race could have already updated it */
|
/* a race could have already updated it */
|
||||||
if (this_size <= max_stack_size)
|
if (this_size <= max_stack_size)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -85,15 +90,12 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
save_stack_trace(&max_stack_trace);
|
save_stack_trace(&max_stack_trace);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When fentry is used, the traced function does not get
|
* Add the passed in ip from the function tracer.
|
||||||
* its stack frame set up, and we lose the parent.
|
* Searching for this on the stack will skip over
|
||||||
* Add that one in manally. We set up save_stack_trace()
|
* most of the overhead from the stack tracer itself.
|
||||||
* to not touch the first element in this case.
|
|
||||||
*/
|
*/
|
||||||
if (add_func) {
|
|
||||||
stack_dump_trace[0] = ip;
|
stack_dump_trace[0] = ip;
|
||||||
max_stack_trace.nr_entries++;
|
max_stack_trace.nr_entries++;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now find where in the stack these are.
|
* Now find where in the stack these are.
|
||||||
|
@ -123,6 +125,18 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
found = 1;
|
found = 1;
|
||||||
/* Start the search from here */
|
/* Start the search from here */
|
||||||
start = p + 1;
|
start = p + 1;
|
||||||
|
/*
|
||||||
|
* We do not want to show the overhead
|
||||||
|
* of the stack tracer stack in the
|
||||||
|
* max stack. If we haven't figured
|
||||||
|
* out what that is, then figure it out
|
||||||
|
* now.
|
||||||
|
*/
|
||||||
|
if (unlikely(!tracer_frame) && i == 1) {
|
||||||
|
tracer_frame = (p - stack) *
|
||||||
|
sizeof(unsigned long);
|
||||||
|
max_stack_size -= tracer_frame;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
||||||
if (per_cpu(trace_active, cpu)++ != 0)
|
if (per_cpu(trace_active, cpu)++ != 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
check_stack(parent_ip, &stack);
|
/*
|
||||||
|
* When fentry is used, the traced function does not get
|
||||||
|
* its stack frame set up, and we lose the parent.
|
||||||
|
* The ip is pretty useless because the function tracer
|
||||||
|
* was called before that function set up its stack frame.
|
||||||
|
* In this case, we use the parent ip.
|
||||||
|
*
|
||||||
|
* By adding the return address of either the parent ip
|
||||||
|
* or the current ip we can disregard most of the stack usage
|
||||||
|
* caused by the stack tracer itself.
|
||||||
|
*
|
||||||
|
* The function tracer always reports the address of where the
|
||||||
|
* mcount call was, but the stack will hold the return address.
|
||||||
|
*/
|
||||||
|
if (fentry)
|
||||||
|
ip = parent_ip;
|
||||||
|
else
|
||||||
|
ip += MCOUNT_INSN_SIZE;
|
||||||
|
|
||||||
|
check_stack(ip, &stack);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
per_cpu(trace_active, cpu)--;
|
per_cpu(trace_active, cpu)--;
|
||||||
|
|
Loading…
Reference in New Issue