Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6: sparc64: Fix hardirq tracing in trap return path. sparc64: Use correct pt_regs in decode_access_size() error paths. sparc64: Fix PREEMPT_ACTIVE value. sparc64: Run NMIs on the hardirq stack. sparc64: Allocate sufficient stack space in ftrace stubs. sparc: Fix forgotten kmemleak headers inclusion
This commit is contained in:
commit
ac8bf56430
|
@ -111,7 +111,7 @@ struct thread_info {
|
|||
#define THREAD_SHIFT PAGE_SHIFT
|
||||
#endif /* PAGE_SHIFT == 13 */
|
||||
|
||||
#define PREEMPT_ACTIVE 0x4000000
|
||||
#define PREEMPT_ACTIVE 0x10000000
|
||||
|
||||
/*
|
||||
* macros/functions for gaining access to the thread information structure
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -46,6 +47,7 @@
|
|||
|
||||
#include "entry.h"
|
||||
#include "cpumap.h"
|
||||
#include "kstack.h"
|
||||
|
||||
#define NUM_IVECS (IMAP_INR + 1)
|
||||
|
||||
|
@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq)
|
|||
void *hardirq_stack[NR_CPUS];
|
||||
void *softirq_stack[NR_CPUS];
|
||||
|
||||
static __attribute__((always_inline)) void *set_hardirq_stack(void)
|
||||
{
|
||||
void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
|
||||
|
||||
__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
|
||||
if (orig_sp < sp ||
|
||||
orig_sp > (sp + THREAD_SIZE)) {
|
||||
sp += THREAD_SIZE - 192 - STACK_BIAS;
|
||||
__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
|
||||
}
|
||||
|
||||
return orig_sp;
|
||||
}
|
||||
static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
|
||||
}
|
||||
|
||||
void __irq_entry handler_irq(int irq, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long pstate, bucket_pa;
|
||||
|
|
|
@ -61,4 +61,23 @@ check_magic:
|
|||
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
|
||||
{
|
||||
void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
|
||||
|
||||
__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
|
||||
if (orig_sp < sp ||
|
||||
orig_sp > (sp + THREAD_SIZE)) {
|
||||
sp += THREAD_SIZE - 192 - STACK_BIAS;
|
||||
__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
|
||||
}
|
||||
|
||||
return orig_sp;
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
|
||||
}
|
||||
|
||||
#endif /* _KSTACK_H */
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/pcr.h>
|
||||
|
||||
#include "kstack.h"
|
||||
|
||||
/* We don't have a real NMI on sparc64, but we can fake one
|
||||
* up using profiling counter overflow interrupts and interrupt
|
||||
* levels.
|
||||
|
@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
|
|||
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int sum, touched = 0;
|
||||
void *orig_sp;
|
||||
|
||||
clear_softint(1 << irq);
|
||||
|
||||
|
@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
|
|||
|
||||
nmi_enter();
|
||||
|
||||
orig_sp = set_hardirq_stack();
|
||||
|
||||
if (notify_die(DIE_NMI, "nmi", regs, 0,
|
||||
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
|
||||
touched = 1;
|
||||
|
@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
|
|||
pcr_ops->write(pcr_enable);
|
||||
}
|
||||
|
||||
restore_hardirq_stack(orig_sp);
|
||||
|
||||
nmi_exit();
|
||||
}
|
||||
|
||||
|
|
|
@ -130,7 +130,17 @@ rtrap_xcall:
|
|||
nop
|
||||
call trace_hardirqs_on
|
||||
nop
|
||||
wrpr %l4, %pil
|
||||
/* Do not actually set the %pil here. We will do that
|
||||
* below after we clear PSTATE_IE in the %pstate register.
|
||||
* If we re-enable interrupts here, we can recurse down
|
||||
* the hardirq stack potentially endlessly, causing a
|
||||
* stack overflow.
|
||||
*
|
||||
* It is tempting to put this test and trace_hardirqs_on
|
||||
* call at the 'rt_continue' label, but that will not work
|
||||
* as that path hits unconditionally and we do not want to
|
||||
* execute this in NMI return paths, for example.
|
||||
*/
|
||||
#endif
|
||||
rtrap_no_irq_enable:
|
||||
andcc %l1, TSTATE_PRIV, %l3
|
||||
|
|
|
@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
|
|||
}
|
||||
|
||||
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
|
||||
static inline int decode_access_size(unsigned int insn)
|
||||
static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
|
||||
{
|
||||
unsigned int tmp;
|
||||
|
||||
|
@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
|
|||
return 2;
|
||||
else {
|
||||
printk("Impossible unaligned trap. insn=%08x\n", insn);
|
||||
die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
|
||||
die_if_kernel("Byte sized unaligned access?!?!", regs);
|
||||
|
||||
/* GCC should never warn that control reaches the end
|
||||
* of this function without returning a value because
|
||||
|
@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
|
|||
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
||||
{
|
||||
enum direction dir = decode_direction(insn);
|
||||
int size = decode_access_size(insn);
|
||||
int size = decode_access_size(regs, insn);
|
||||
int orig_asi, asi;
|
||||
|
||||
current_thread_info()->kern_una_regs = regs;
|
||||
|
|
|
@ -34,7 +34,7 @@ mcount:
|
|||
cmp %g1, %g2
|
||||
be,pn %icc, 1f
|
||||
mov %i7, %g3
|
||||
save %sp, -128, %sp
|
||||
save %sp, -176, %sp
|
||||
mov %g3, %o1
|
||||
jmpl %g1, %o7
|
||||
mov %i7, %o0
|
||||
|
@ -56,7 +56,7 @@ mcount:
|
|||
nop
|
||||
5: mov %i7, %g2
|
||||
mov %fp, %g3
|
||||
save %sp, -128, %sp
|
||||
save %sp, -176, %sp
|
||||
mov %g2, %l0
|
||||
ba,pt %xcc, ftrace_graph_caller
|
||||
mov %g3, %l1
|
||||
|
@ -85,7 +85,7 @@ ftrace_caller:
|
|||
lduw [%g1 + %lo(function_trace_stop)], %g1
|
||||
brnz,pn %g1, ftrace_stub
|
||||
mov %fp, %g3
|
||||
save %sp, -128, %sp
|
||||
save %sp, -176, %sp
|
||||
mov %g2, %o1
|
||||
mov %g2, %l0
|
||||
mov %g3, %l1
|
||||
|
@ -120,7 +120,7 @@ ENTRY(ftrace_graph_caller)
|
|||
END(ftrace_graph_caller)
|
||||
|
||||
ENTRY(return_to_handler)
|
||||
save %sp, -128, %sp
|
||||
save %sp, -176, %sp
|
||||
call ftrace_return_to_handler
|
||||
mov %fp, %o0
|
||||
jmpl %o0 + 8, %g0
|
||||
|
|
Loading…
Reference in New Issue