x86/dumpstack: Add get_stack_info() interface
valid_stack_ptr() is buggy: it assumes that all stacks are of size THREAD_SIZE, which is not true for exception stacks. So the walk_stack() callbacks will need to know the location of the beginning of the stack as well as the end. Another issue is that in general the various features of a stack (type, size, next stack pointer, description string) are scattered around in various places throughout the stack dump code. Encapsulate all that information in a single place with a new stack_info struct and a get_stack_info() interface. Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nilay Vaish <nilayvaish@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/8164dd0db96b7e6a279fa17ae5e6dc375eecb4a9.1473905218.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9c00390757
commit
cb76c93982
|
@ -2251,7 +2251,7 @@ void arch_perf_update_userpage(struct perf_event *event,
|
|||
* callchain support
|
||||
*/
|
||||
|
||||
static int backtrace_stack(void *data, char *name)
|
||||
static int backtrace_stack(void *data, const char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -10,6 +10,39 @@
|
|||
#include <linux/ptrace.h>
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
enum stack_type {
|
||||
STACK_TYPE_UNKNOWN,
|
||||
STACK_TYPE_TASK,
|
||||
STACK_TYPE_IRQ,
|
||||
STACK_TYPE_SOFTIRQ,
|
||||
STACK_TYPE_EXCEPTION,
|
||||
STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
|
||||
};
|
||||
|
||||
struct stack_info {
|
||||
enum stack_type type;
|
||||
unsigned long *begin, *end, *next_sp;
|
||||
};
|
||||
|
||||
bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info);
|
||||
|
||||
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info, unsigned long *visit_mask);
|
||||
|
||||
void stack_type_str(enum stack_type type, const char **begin,
|
||||
const char **end);
|
||||
|
||||
static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
|
||||
{
|
||||
void *begin = info->begin;
|
||||
void *end = info->end;
|
||||
|
||||
return (info->type != STACK_TYPE_UNKNOWN &&
|
||||
addr >= begin && addr < end &&
|
||||
addr + len > begin && addr + len <= end);
|
||||
}
|
||||
|
||||
extern int kstack_depth_to_print;
|
||||
|
||||
struct thread_info;
|
||||
|
@ -20,27 +53,27 @@ typedef unsigned long (*walk_stack_t)(struct task_struct *task,
|
|||
unsigned long bp,
|
||||
const struct stacktrace_ops *ops,
|
||||
void *data,
|
||||
unsigned long *end,
|
||||
struct stack_info *info,
|
||||
int *graph);
|
||||
|
||||
extern unsigned long
|
||||
print_context_stack(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
unsigned long *end, int *graph);
|
||||
struct stack_info *info, int *graph);
|
||||
|
||||
extern unsigned long
|
||||
print_context_stack_bp(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
unsigned long *end, int *graph);
|
||||
struct stack_info *info, int *graph);
|
||||
|
||||
/* Generic stack tracer with callbacks */
|
||||
|
||||
struct stacktrace_ops {
|
||||
int (*address)(void *data, unsigned long address, int reliable);
|
||||
/* On negative return stop dumping */
|
||||
int (*stack)(void *data, char *name);
|
||||
int (*stack)(void *data, const char *name);
|
||||
walk_stack_t walk_stack;
|
||||
};
|
||||
|
||||
|
|
|
@ -25,6 +25,23 @@ unsigned int code_bytes = 64;
|
|||
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
|
||||
static int die_counter;
|
||||
|
||||
bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info)
|
||||
{
|
||||
unsigned long *begin = task_stack_page(task);
|
||||
unsigned long *end = task_stack_page(task) + THREAD_SIZE;
|
||||
|
||||
if (stack < begin || stack >= end)
|
||||
return false;
|
||||
|
||||
info->type = STACK_TYPE_TASK;
|
||||
info->begin = begin;
|
||||
info->end = end;
|
||||
info->next_sp = NULL;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void printk_stack_address(unsigned long address, int reliable,
|
||||
char *log_lvl)
|
||||
{
|
||||
|
@ -46,24 +63,11 @@ void printk_address(unsigned long address)
|
|||
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
||||
*/
|
||||
|
||||
static inline int valid_stack_ptr(struct task_struct *task,
|
||||
void *p, unsigned int size, void *end)
|
||||
{
|
||||
void *t = task_stack_page(task);
|
||||
if (end) {
|
||||
if (p < end && p >= (end-THREAD_SIZE))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
return p >= t && p < t + THREAD_SIZE - size;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
print_context_stack(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
unsigned long *end, int *graph)
|
||||
struct stack_info *info, int *graph)
|
||||
{
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
|
||||
|
@ -75,7 +79,7 @@ print_context_stack(struct task_struct *task,
|
|||
PAGE_SIZE)
|
||||
stack = (unsigned long *)task_stack_page(task);
|
||||
|
||||
while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
|
||||
while (on_stack(info, stack, sizeof(*stack))) {
|
||||
unsigned long addr = *stack;
|
||||
|
||||
if (__kernel_text_address(addr)) {
|
||||
|
@ -114,12 +118,12 @@ unsigned long
|
|||
print_context_stack_bp(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
unsigned long *end, int *graph)
|
||||
struct stack_info *info, int *graph)
|
||||
{
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
unsigned long *retp = &frame->return_address;
|
||||
|
||||
while (valid_stack_ptr(task, retp, sizeof(*retp), end)) {
|
||||
while (on_stack(info, stack, sizeof(*stack) * 2)) {
|
||||
unsigned long addr = *retp;
|
||||
unsigned long real_addr;
|
||||
|
||||
|
@ -138,7 +142,7 @@ print_context_stack_bp(struct task_struct *task,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(print_context_stack_bp);
|
||||
|
||||
static int print_trace_stack(void *data, char *name)
|
||||
static int print_trace_stack(void *data, const char *name)
|
||||
{
|
||||
printk("%s <%s> ", (char *)data, name);
|
||||
return 0;
|
||||
|
|
|
@ -16,61 +16,117 @@
|
|||
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
static void *is_irq_stack(void *p, void *irq)
|
||||
void stack_type_str(enum stack_type type, const char **begin, const char **end)
|
||||
{
|
||||
if (p < irq || p >= (irq + THREAD_SIZE))
|
||||
return NULL;
|
||||
return irq + THREAD_SIZE;
|
||||
switch (type) {
|
||||
case STACK_TYPE_IRQ:
|
||||
case STACK_TYPE_SOFTIRQ:
|
||||
*begin = "IRQ";
|
||||
*end = "EOI";
|
||||
break;
|
||||
default:
|
||||
*begin = NULL;
|
||||
*end = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void *is_hardirq_stack(unsigned long *stack)
|
||||
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
void *irq = this_cpu_read(hardirq_stack);
|
||||
unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack);
|
||||
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
||||
|
||||
return is_irq_stack(stack, irq);
|
||||
if (stack < begin || stack >= end)
|
||||
return false;
|
||||
|
||||
info->type = STACK_TYPE_IRQ;
|
||||
info->begin = begin;
|
||||
info->end = end;
|
||||
|
||||
/*
|
||||
* See irq_32.c -- the next stack pointer is stored at the beginning of
|
||||
* the stack.
|
||||
*/
|
||||
info->next_sp = (unsigned long *)*begin;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *is_softirq_stack(unsigned long *stack)
|
||||
static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
void *irq = this_cpu_read(softirq_stack);
|
||||
unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack);
|
||||
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
||||
|
||||
return is_irq_stack(stack, irq);
|
||||
if (stack < begin || stack >= end)
|
||||
return false;
|
||||
|
||||
info->type = STACK_TYPE_SOFTIRQ;
|
||||
info->begin = begin;
|
||||
info->end = end;
|
||||
|
||||
/*
|
||||
* The next stack pointer is stored at the beginning of the stack.
|
||||
* See irq_32.c.
|
||||
*/
|
||||
info->next_sp = (unsigned long *)*begin;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info, unsigned long *visit_mask)
|
||||
{
|
||||
if (!stack)
|
||||
goto unknown;
|
||||
|
||||
task = task ? : current;
|
||||
|
||||
if (in_task_stack(stack, task, info))
|
||||
return 0;
|
||||
|
||||
if (task != current)
|
||||
goto unknown;
|
||||
|
||||
if (in_hardirq_stack(stack, info))
|
||||
return 0;
|
||||
|
||||
if (in_softirq_stack(stack, info))
|
||||
return 0;
|
||||
|
||||
unknown:
|
||||
info->type = STACK_TYPE_UNKNOWN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data)
|
||||
{
|
||||
unsigned long visit_mask = 0;
|
||||
int graph = 0;
|
||||
u32 *prev_esp;
|
||||
|
||||
task = task ? : current;
|
||||
stack = stack ? : get_stack_pointer(task, regs);
|
||||
bp = bp ? : (unsigned long)get_frame_pointer(task, regs);
|
||||
|
||||
for (;;) {
|
||||
void *end_stack;
|
||||
const char *begin_str, *end_str;
|
||||
struct stack_info info;
|
||||
|
||||
end_stack = is_hardirq_stack(stack);
|
||||
if (!end_stack)
|
||||
end_stack = is_softirq_stack(stack);
|
||||
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data,
|
||||
end_stack, &graph);
|
||||
|
||||
/* Stop if not on irq stack */
|
||||
if (!end_stack)
|
||||
if (get_stack_info(stack, task, &info, &visit_mask))
|
||||
break;
|
||||
|
||||
/* The previous esp is saved on the bottom of the stack */
|
||||
prev_esp = (u32 *)(end_stack - THREAD_SIZE);
|
||||
stack = (unsigned long *)*prev_esp;
|
||||
if (!stack)
|
||||
stack_type_str(info.type, &begin_str, &end_str);
|
||||
|
||||
if (begin_str && ops->stack(data, begin_str) < 0)
|
||||
break;
|
||||
|
||||
if (ops->stack(data, "IRQ") < 0)
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data, &info, &graph);
|
||||
|
||||
if (end_str && ops->stack(data, end_str) < 0)
|
||||
break;
|
||||
|
||||
stack = info.next_sp;
|
||||
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,76 +28,109 @@ static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
|
|||
[DEBUG_STACK - 1] = DEBUG_STKSZ
|
||||
};
|
||||
|
||||
static unsigned long *in_exception_stack(unsigned long stack, unsigned *usedp,
|
||||
char **idp)
|
||||
void stack_type_str(enum stack_type type, const char **begin, const char **end)
|
||||
{
|
||||
unsigned long begin, end;
|
||||
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
|
||||
|
||||
switch (type) {
|
||||
case STACK_TYPE_IRQ:
|
||||
*begin = "IRQ";
|
||||
*end = "EOI";
|
||||
break;
|
||||
case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
|
||||
*begin = exception_stack_names[type - STACK_TYPE_EXCEPTION];
|
||||
*end = "EOE";
|
||||
break;
|
||||
default:
|
||||
*begin = NULL;
|
||||
*end = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool in_exception_stack(unsigned long *stack, struct stack_info *info,
|
||||
unsigned long *visit_mask)
|
||||
{
|
||||
unsigned long *begin, *end;
|
||||
struct pt_regs *regs;
|
||||
unsigned k;
|
||||
|
||||
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
|
||||
|
||||
for (k = 0; k < N_EXCEPTION_STACKS; k++) {
|
||||
end = raw_cpu_ptr(&orig_ist)->ist[k];
|
||||
begin = end - exception_stack_sizes[k];
|
||||
end = (unsigned long *)raw_cpu_ptr(&orig_ist)->ist[k];
|
||||
begin = end - (exception_stack_sizes[k] / sizeof(long));
|
||||
regs = (struct pt_regs *)end - 1;
|
||||
|
||||
if (stack < begin || stack >= end)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Make sure we only iterate through an exception stack once.
|
||||
* If it comes up for the second time then there's something
|
||||
* wrong going on - just break and return NULL:
|
||||
* Make sure we don't iterate through an exception stack more
|
||||
* than once. If it comes up a second time then there's
|
||||
* something wrong going on - just break out and report an
|
||||
* unknown stack type.
|
||||
*/
|
||||
if (*usedp & (1U << k))
|
||||
if (*visit_mask & (1U << k))
|
||||
break;
|
||||
*usedp |= 1U << k;
|
||||
*visit_mask |= 1U << k;
|
||||
|
||||
*idp = exception_stack_names[k];
|
||||
return (unsigned long *)end;
|
||||
info->type = STACK_TYPE_EXCEPTION + k;
|
||||
info->begin = begin;
|
||||
info->end = end;
|
||||
info->next_sp = (unsigned long *)regs->sp;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int
|
||||
in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
|
||||
unsigned long *irq_stack_end)
|
||||
static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
return (stack >= irq_stack && stack < irq_stack_end);
|
||||
unsigned long *end = (unsigned long *)this_cpu_read(irq_stack_ptr);
|
||||
unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
|
||||
|
||||
if (stack < begin || stack >= end)
|
||||
return false;
|
||||
|
||||
info->type = STACK_TYPE_IRQ;
|
||||
info->begin = begin;
|
||||
info->end = end;
|
||||
|
||||
/*
|
||||
* The next stack pointer is the first thing pushed by the entry code
|
||||
* after switching to the irq stack.
|
||||
*/
|
||||
info->next_sp = (unsigned long *)*(end - 1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
enum stack_type {
|
||||
STACK_IS_UNKNOWN,
|
||||
STACK_IS_NORMAL,
|
||||
STACK_IS_EXCEPTION,
|
||||
STACK_IS_IRQ,
|
||||
};
|
||||
|
||||
static enum stack_type
|
||||
analyze_stack(struct task_struct *task, unsigned long *stack,
|
||||
unsigned long **stack_end, unsigned long *irq_stack,
|
||||
unsigned *used, char **id)
|
||||
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info, unsigned long *visit_mask)
|
||||
{
|
||||
unsigned long addr;
|
||||
if (!stack)
|
||||
goto unknown;
|
||||
|
||||
addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
|
||||
if ((unsigned long)task_stack_page(task) == addr)
|
||||
return STACK_IS_NORMAL;
|
||||
task = task ? : current;
|
||||
|
||||
*stack_end = in_exception_stack((unsigned long)stack, used, id);
|
||||
if (*stack_end)
|
||||
return STACK_IS_EXCEPTION;
|
||||
if (in_task_stack(stack, task, info))
|
||||
return 0;
|
||||
|
||||
if (!irq_stack)
|
||||
return STACK_IS_NORMAL;
|
||||
if (task != current)
|
||||
goto unknown;
|
||||
|
||||
*stack_end = irq_stack;
|
||||
irq_stack -= (IRQ_STACK_SIZE / sizeof(long));
|
||||
if (in_exception_stack(stack, info, visit_mask))
|
||||
return 0;
|
||||
|
||||
if (in_irq_stack(stack, irq_stack, *stack_end))
|
||||
return STACK_IS_IRQ;
|
||||
if (in_irq_stack(stack, info))
|
||||
return 0;
|
||||
|
||||
return STACK_IS_UNKNOWN;
|
||||
return 0;
|
||||
|
||||
unknown:
|
||||
info->type = STACK_TYPE_UNKNOWN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -111,8 +144,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data)
|
||||
{
|
||||
unsigned long *irq_stack = (unsigned long *)this_cpu_read(irq_stack_ptr);
|
||||
unsigned used = 0;
|
||||
unsigned long visit_mask = 0;
|
||||
struct stack_info info;
|
||||
int graph = 0;
|
||||
int done = 0;
|
||||
|
||||
|
@ -126,57 +159,37 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||
* exceptions
|
||||
*/
|
||||
while (!done) {
|
||||
unsigned long *stack_end;
|
||||
enum stack_type stype;
|
||||
char *id;
|
||||
const char *begin_str, *end_str;
|
||||
|
||||
stype = analyze_stack(task, stack, &stack_end, irq_stack, &used,
|
||||
&id);
|
||||
get_stack_info(stack, task, &info, &visit_mask);
|
||||
|
||||
/* Default finish unless specified to continue */
|
||||
done = 1;
|
||||
|
||||
switch (stype) {
|
||||
switch (info.type) {
|
||||
|
||||
/* Break out early if we are on the thread stack */
|
||||
case STACK_IS_NORMAL:
|
||||
case STACK_TYPE_TASK:
|
||||
break;
|
||||
|
||||
case STACK_IS_EXCEPTION:
|
||||
case STACK_TYPE_IRQ:
|
||||
case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
|
||||
|
||||
if (ops->stack(data, id) < 0)
|
||||
stack_type_str(info.type, &begin_str, &end_str);
|
||||
|
||||
if (ops->stack(data, begin_str) < 0)
|
||||
break;
|
||||
|
||||
bp = ops->walk_stack(task, stack, bp, ops,
|
||||
data, stack_end, &graph);
|
||||
ops->stack(data, "EOE");
|
||||
/*
|
||||
* We link to the next stack via the
|
||||
* second-to-last pointer (index -2 to end) in the
|
||||
* exception stack:
|
||||
*/
|
||||
stack = (unsigned long *) stack_end[-2];
|
||||
data, &info, &graph);
|
||||
|
||||
ops->stack(data, end_str);
|
||||
|
||||
stack = info.next_sp;
|
||||
done = 0;
|
||||
break;
|
||||
|
||||
case STACK_IS_IRQ:
|
||||
|
||||
if (ops->stack(data, "IRQ") < 0)
|
||||
break;
|
||||
bp = ops->walk_stack(task, stack, bp,
|
||||
ops, data, stack_end, &graph);
|
||||
/*
|
||||
* We link to the next stack (which would be
|
||||
* the process stack normally) the last
|
||||
* pointer (index -1 to end) in the IRQ stack:
|
||||
*/
|
||||
stack = (unsigned long *) (stack_end[-1]);
|
||||
irq_stack = NULL;
|
||||
ops->stack(data, "EOI");
|
||||
done = 0;
|
||||
break;
|
||||
|
||||
case STACK_IS_UNKNOWN:
|
||||
default:
|
||||
ops->stack(data, "UNK");
|
||||
break;
|
||||
}
|
||||
|
@ -185,7 +198,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||
/*
|
||||
* This handles the process stack:
|
||||
*/
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data, &info, &graph);
|
||||
}
|
||||
EXPORT_SYMBOL(dump_trace);
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
static int save_stack_stack(void *data, char *name)
|
||||
static int save_stack_stack(void *data, const char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
static int backtrace_stack(void *data, char *name)
|
||||
static int backtrace_stack(void *data, const char *name)
|
||||
{
|
||||
/* Yes, we want all stacks */
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue