Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/perfcounters into perfcounters/core
This commit is contained in:
commit
8178d00050
|
@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
else
|
else
|
||||||
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
|
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
|
||||||
|
|
||||||
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
|
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
|
||||||
/* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
|
/* Second case is 32-bit with 64-bit PTE. In this case, we
|
||||||
* can just store as long as we do the two halves in the right order
|
* can just store as long as we do the two halves in the right order
|
||||||
* with a barrier in between. This is possible because we take care,
|
* with a barrier in between. This is possible because we take care,
|
||||||
* in the hash code, to pre-invalidate if the PTE was already hashed,
|
* in the hash code, to pre-invalidate if the PTE was already hashed,
|
||||||
|
@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
|
|
||||||
#else
|
#else
|
||||||
/* Anything else just stores the PTE normally. That covers all 64-bit
|
/* Anything else just stores the PTE normally. That covers all 64-bit
|
||||||
* cases, and 32-bit non-hash with 64-bit PTEs in UP mode
|
* cases, and 32-bit non-hash with 32-bit PTEs.
|
||||||
*/
|
*/
|
||||||
*ptep = pte;
|
*ptep = pte;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
|
||||||
|
|
||||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
||||||
obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
|
obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
|
||||||
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
|
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
|
||||||
power5+-pmu.o power6-pmu.o power7-pmu.o
|
power5+-pmu.o power6-pmu.o power7-pmu.o
|
||||||
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
|
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
|
||||||
|
|
|
@ -67,6 +67,8 @@ int main(void)
|
||||||
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
|
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
|
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
|
||||||
|
DEFINE(SIGSEGV, SIGSEGV);
|
||||||
|
DEFINE(NMI_MASK, NMI_MASK);
|
||||||
#else
|
#else
|
||||||
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
|
@ -729,6 +729,11 @@ BEGIN_FTR_SECTION
|
||||||
bne- do_ste_alloc /* If so handle it */
|
bne- do_ste_alloc /* If so handle it */
|
||||||
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
|
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
|
||||||
|
|
||||||
|
clrrdi r11,r1,THREAD_SHIFT
|
||||||
|
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
|
||||||
|
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
|
||||||
|
bne 77f /* then don't call hash_page now */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On iSeries, we soft-disable interrupts here, then
|
* On iSeries, we soft-disable interrupts here, then
|
||||||
* hard-enable interrupts so that the hash_page code can spin on
|
* hard-enable interrupts so that the hash_page code can spin on
|
||||||
|
@ -833,6 +838,20 @@ handle_page_fault:
|
||||||
bl .low_hash_fault
|
bl .low_hash_fault
|
||||||
b .ret_from_except
|
b .ret_from_except
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We come here as a result of a DSI at a point where we don't want
|
||||||
|
* to call hash_page, such as when we are accessing memory (possibly
|
||||||
|
* user memory) inside a PMU interrupt that occurred while interrupts
|
||||||
|
* were soft-disabled. We want to invoke the exception handler for
|
||||||
|
* the access, or panic if there isn't a handler.
|
||||||
|
*/
|
||||||
|
77: bl .save_nvgprs
|
||||||
|
mr r4,r3
|
||||||
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
|
li r5,SIGSEGV
|
||||||
|
bl .bad_page_fault
|
||||||
|
b .ret_from_except
|
||||||
|
|
||||||
/* here we have a segment miss */
|
/* here we have a segment miss */
|
||||||
do_ste_alloc:
|
do_ste_alloc:
|
||||||
bl .ste_allocate /* try to insert stab entry */
|
bl .ste_allocate /* try to insert stab entry */
|
||||||
|
|
|
@ -0,0 +1,527 @@
|
||||||
|
/*
|
||||||
|
* Performance counter callchain support - powerpc architecture code
|
||||||
|
*
|
||||||
|
* Copyright © 2009 Paul Mackerras, IBM Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/perf_counter.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <asm/ptrace.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/sigcontext.h>
|
||||||
|
#include <asm/ucontext.h>
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
|
#include "ppc32.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Store another value in a callchain_entry.
|
||||||
|
*/
|
||||||
|
static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||||
|
{
|
||||||
|
unsigned int nr = entry->nr;
|
||||||
|
|
||||||
|
if (nr < PERF_MAX_STACK_DEPTH) {
|
||||||
|
entry->ip[nr] = ip;
|
||||||
|
entry->nr = nr + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Is sp valid as the address of the next kernel stack frame after prev_sp?
|
||||||
|
* The next frame may be in a different stack area but should not go
|
||||||
|
* back down in the same stack area.
|
||||||
|
*/
|
||||||
|
static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
|
||||||
|
{
|
||||||
|
if (sp & 0xf)
|
||||||
|
return 0; /* must be 16-byte aligned */
|
||||||
|
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
|
||||||
|
return 0;
|
||||||
|
if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
|
||||||
|
return 1;
|
||||||
|
/*
|
||||||
|
* sp could decrease when we jump off an interrupt stack
|
||||||
|
* back to the regular process stack.
|
||||||
|
*/
|
||||||
|
if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void perf_callchain_kernel(struct pt_regs *regs,
|
||||||
|
struct perf_callchain_entry *entry)
|
||||||
|
{
|
||||||
|
unsigned long sp, next_sp;
|
||||||
|
unsigned long next_ip;
|
||||||
|
unsigned long lr;
|
||||||
|
long level = 0;
|
||||||
|
unsigned long *fp;
|
||||||
|
|
||||||
|
lr = regs->link;
|
||||||
|
sp = regs->gpr[1];
|
||||||
|
callchain_store(entry, PERF_CONTEXT_KERNEL);
|
||||||
|
callchain_store(entry, regs->nip);
|
||||||
|
|
||||||
|
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
fp = (unsigned long *) sp;
|
||||||
|
next_sp = fp[0];
|
||||||
|
|
||||||
|
if (next_sp == sp + STACK_INT_FRAME_SIZE &&
|
||||||
|
fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
|
||||||
|
/*
|
||||||
|
* This looks like an interrupt frame for an
|
||||||
|
* interrupt that occurred in the kernel
|
||||||
|
*/
|
||||||
|
regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
|
||||||
|
next_ip = regs->nip;
|
||||||
|
lr = regs->link;
|
||||||
|
level = 0;
|
||||||
|
callchain_store(entry, PERF_CONTEXT_KERNEL);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if (level == 0)
|
||||||
|
next_ip = lr;
|
||||||
|
else
|
||||||
|
next_ip = fp[STACK_FRAME_LR_SAVE];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can't tell which of the first two addresses
|
||||||
|
* we get are valid, but we can filter out the
|
||||||
|
* obviously bogus ones here. We replace them
|
||||||
|
* with 0 rather than removing them entirely so
|
||||||
|
* that userspace can tell which is which.
|
||||||
|
*/
|
||||||
|
if ((level == 1 && next_ip == lr) ||
|
||||||
|
(level <= 1 && !kernel_text_address(next_ip)))
|
||||||
|
next_ip = 0;
|
||||||
|
|
||||||
|
++level;
|
||||||
|
}
|
||||||
|
|
||||||
|
callchain_store(entry, next_ip);
|
||||||
|
if (!valid_next_sp(next_sp, sp))
|
||||||
|
return;
|
||||||
|
sp = next_sp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
|
|
||||||
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
|
#define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize])
|
||||||
|
#else
|
||||||
|
#define is_huge_psize(pagesize) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On 64-bit we don't want to invoke hash_page on user addresses from
|
||||||
|
* interrupt context, so if the access faults, we read the page tables
|
||||||
|
* to find which page (if any) is mapped and access it directly.
|
||||||
|
*/
|
||||||
|
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
|
||||||
|
{
|
||||||
|
pgd_t *pgdir;
|
||||||
|
pte_t *ptep, pte;
|
||||||
|
int pagesize;
|
||||||
|
unsigned long addr = (unsigned long) ptr;
|
||||||
|
unsigned long offset;
|
||||||
|
unsigned long pfn;
|
||||||
|
void *kaddr;
|
||||||
|
|
||||||
|
pgdir = current->mm->pgd;
|
||||||
|
if (!pgdir)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
pagesize = get_slice_psize(current->mm, addr);
|
||||||
|
|
||||||
|
/* align address to page boundary */
|
||||||
|
offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
|
||||||
|
addr -= offset;
|
||||||
|
|
||||||
|
if (is_huge_psize(pagesize))
|
||||||
|
ptep = huge_pte_offset(current->mm, addr);
|
||||||
|
else
|
||||||
|
ptep = find_linux_pte(pgdir, addr);
|
||||||
|
|
||||||
|
if (ptep == NULL)
|
||||||
|
return -EFAULT;
|
||||||
|
pte = *ptep;
|
||||||
|
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
|
||||||
|
return -EFAULT;
|
||||||
|
pfn = pte_pfn(pte);
|
||||||
|
if (!page_is_ram(pfn))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
/* no highmem to worry about here */
|
||||||
|
kaddr = pfn_to_kaddr(pfn);
|
||||||
|
memcpy(ret, kaddr + offset, nb);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
|
||||||
|
{
|
||||||
|
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
|
||||||
|
((unsigned long)ptr & 7))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (!__get_user_inatomic(*ret, ptr))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return read_user_stack_slow(ptr, ret, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||||
|
{
|
||||||
|
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
|
||||||
|
((unsigned long)ptr & 3))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (!__get_user_inatomic(*ret, ptr))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return read_user_stack_slow(ptr, ret, 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int valid_user_sp(unsigned long sp, int is_64)
|
||||||
|
{
|
||||||
|
if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 64-bit user processes use the same stack frame for RT and non-RT signals.
|
||||||
|
*/
|
||||||
|
struct signal_frame_64 {
|
||||||
|
char dummy[__SIGNAL_FRAMESIZE];
|
||||||
|
struct ucontext uc;
|
||||||
|
unsigned long unused[2];
|
||||||
|
unsigned int tramp[6];
|
||||||
|
struct siginfo *pinfo;
|
||||||
|
void *puc;
|
||||||
|
struct siginfo info;
|
||||||
|
char abigap[288];
|
||||||
|
};
|
||||||
|
|
||||||
|
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
|
||||||
|
{
|
||||||
|
if (nip == fp + offsetof(struct signal_frame_64, tramp))
|
||||||
|
return 1;
|
||||||
|
if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
|
||||||
|
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do some sanity checking on the signal frame pointed to by sp.
|
||||||
|
* We check the pinfo and puc pointers in the frame.
|
||||||
|
*/
|
||||||
|
static int sane_signal_64_frame(unsigned long sp)
|
||||||
|
{
|
||||||
|
struct signal_frame_64 __user *sf;
|
||||||
|
unsigned long pinfo, puc;
|
||||||
|
|
||||||
|
sf = (struct signal_frame_64 __user *) sp;
|
||||||
|
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
|
||||||
|
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
|
||||||
|
return 0;
|
||||||
|
return pinfo == (unsigned long) &sf->info &&
|
||||||
|
puc == (unsigned long) &sf->uc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void perf_callchain_user_64(struct pt_regs *regs,
|
||||||
|
struct perf_callchain_entry *entry)
|
||||||
|
{
|
||||||
|
unsigned long sp, next_sp;
|
||||||
|
unsigned long next_ip;
|
||||||
|
unsigned long lr;
|
||||||
|
long level = 0;
|
||||||
|
struct signal_frame_64 __user *sigframe;
|
||||||
|
unsigned long __user *fp, *uregs;
|
||||||
|
|
||||||
|
next_ip = regs->nip;
|
||||||
|
lr = regs->link;
|
||||||
|
sp = regs->gpr[1];
|
||||||
|
callchain_store(entry, PERF_CONTEXT_USER);
|
||||||
|
callchain_store(entry, next_ip);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
fp = (unsigned long __user *) sp;
|
||||||
|
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
||||||
|
return;
|
||||||
|
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: the next_sp - sp >= signal frame size check
|
||||||
|
* is true when next_sp < sp, which can happen when
|
||||||
|
* transitioning from an alternate signal stack to the
|
||||||
|
* normal stack.
|
||||||
|
*/
|
||||||
|
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
|
||||||
|
(is_sigreturn_64_address(next_ip, sp) ||
|
||||||
|
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
|
||||||
|
sane_signal_64_frame(sp)) {
|
||||||
|
/*
|
||||||
|
* This looks like an signal frame
|
||||||
|
*/
|
||||||
|
sigframe = (struct signal_frame_64 __user *) sp;
|
||||||
|
uregs = sigframe->uc.uc_mcontext.gp_regs;
|
||||||
|
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
|
||||||
|
read_user_stack_64(&uregs[PT_LNK], &lr) ||
|
||||||
|
read_user_stack_64(&uregs[PT_R1], &sp))
|
||||||
|
return;
|
||||||
|
level = 0;
|
||||||
|
callchain_store(entry, PERF_CONTEXT_USER);
|
||||||
|
callchain_store(entry, next_ip);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (level == 0)
|
||||||
|
next_ip = lr;
|
||||||
|
callchain_store(entry, next_ip);
|
||||||
|
++level;
|
||||||
|
sp = next_sp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int current_is_64bit(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We can't use test_thread_flag() here because we may be on an
|
||||||
|
* interrupt stack, and the thread flags don't get copied over
|
||||||
|
* from the thread_info on the main stack to the interrupt stack.
|
||||||
|
*/
|
||||||
|
return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_PPC64 */
|
||||||
|
/*
|
||||||
|
* On 32-bit we just access the address and let hash_page create a
|
||||||
|
* HPTE if necessary, so there is no need to fall back to reading
|
||||||
|
* the page tables. Since this is called at interrupt level,
|
||||||
|
* do_page_fault() won't treat a DSI as a page fault.
|
||||||
|
*/
|
||||||
|
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||||
|
{
|
||||||
|
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
|
||||||
|
((unsigned long)ptr & 3))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return __get_user_inatomic(*ret, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void perf_callchain_user_64(struct pt_regs *regs,
|
||||||
|
struct perf_callchain_entry *entry)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int current_is_64bit(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int valid_user_sp(unsigned long sp, int is_64)
|
||||||
|
{
|
||||||
|
if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
|
||||||
|
#define sigcontext32 sigcontext
|
||||||
|
#define mcontext32 mcontext
|
||||||
|
#define ucontext32 ucontext
|
||||||
|
#define compat_siginfo_t struct siginfo
|
||||||
|
|
||||||
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Layout for non-RT signal frames
|
||||||
|
*/
|
||||||
|
struct signal_frame_32 {
|
||||||
|
char dummy[__SIGNAL_FRAMESIZE32];
|
||||||
|
struct sigcontext32 sctx;
|
||||||
|
struct mcontext32 mctx;
|
||||||
|
int abigap[56];
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Layout for RT signal frames
|
||||||
|
*/
|
||||||
|
struct rt_signal_frame_32 {
|
||||||
|
char dummy[__SIGNAL_FRAMESIZE32 + 16];
|
||||||
|
compat_siginfo_t info;
|
||||||
|
struct ucontext32 uc;
|
||||||
|
int abigap[56];
|
||||||
|
};
|
||||||
|
|
||||||
|
static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||||
|
{
|
||||||
|
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
|
||||||
|
return 1;
|
||||||
|
if (vdso32_sigtramp && current->mm->context.vdso_base &&
|
||||||
|
nip == current->mm->context.vdso_base + vdso32_sigtramp)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||||
|
{
|
||||||
|
if (nip == fp + offsetof(struct rt_signal_frame_32,
|
||||||
|
uc.uc_mcontext.mc_pad))
|
||||||
|
return 1;
|
||||||
|
if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
|
||||||
|
nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sane_signal_32_frame(unsigned int sp)
|
||||||
|
{
|
||||||
|
struct signal_frame_32 __user *sf;
|
||||||
|
unsigned int regs;
|
||||||
|
|
||||||
|
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||||
|
if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
|
||||||
|
return 0;
|
||||||
|
return regs == (unsigned long) &sf->mctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sane_rt_signal_32_frame(unsigned int sp)
|
||||||
|
{
|
||||||
|
struct rt_signal_frame_32 __user *sf;
|
||||||
|
unsigned int regs;
|
||||||
|
|
||||||
|
sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||||
|
if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
|
||||||
|
return 0;
|
||||||
|
return regs == (unsigned long) &sf->uc.uc_mcontext;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int __user *signal_frame_32_regs(unsigned int sp,
|
||||||
|
unsigned int next_sp, unsigned int next_ip)
|
||||||
|
{
|
||||||
|
struct mcontext32 __user *mctx = NULL;
|
||||||
|
struct signal_frame_32 __user *sf;
|
||||||
|
struct rt_signal_frame_32 __user *rt_sf;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: the next_sp - sp >= signal frame size check
|
||||||
|
* is true when next_sp < sp, for example, when
|
||||||
|
* transitioning from an alternate signal stack to the
|
||||||
|
* normal stack.
|
||||||
|
*/
|
||||||
|
if (next_sp - sp >= sizeof(struct signal_frame_32) &&
|
||||||
|
is_sigreturn_32_address(next_ip, sp) &&
|
||||||
|
sane_signal_32_frame(sp)) {
|
||||||
|
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||||
|
mctx = &sf->mctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
|
||||||
|
is_rt_sigreturn_32_address(next_ip, sp) &&
|
||||||
|
sane_rt_signal_32_frame(sp)) {
|
||||||
|
rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||||
|
mctx = &rt_sf->uc.uc_mcontext;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mctx)
|
||||||
|
return NULL;
|
||||||
|
return mctx->mc_gregs;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void perf_callchain_user_32(struct pt_regs *regs,
|
||||||
|
struct perf_callchain_entry *entry)
|
||||||
|
{
|
||||||
|
unsigned int sp, next_sp;
|
||||||
|
unsigned int next_ip;
|
||||||
|
unsigned int lr;
|
||||||
|
long level = 0;
|
||||||
|
unsigned int __user *fp, *uregs;
|
||||||
|
|
||||||
|
next_ip = regs->nip;
|
||||||
|
lr = regs->link;
|
||||||
|
sp = regs->gpr[1];
|
||||||
|
callchain_store(entry, PERF_CONTEXT_USER);
|
||||||
|
callchain_store(entry, next_ip);
|
||||||
|
|
||||||
|
while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
||||||
|
fp = (unsigned int __user *) (unsigned long) sp;
|
||||||
|
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
|
||||||
|
return;
|
||||||
|
if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
|
||||||
|
return;
|
||||||
|
|
||||||
|
uregs = signal_frame_32_regs(sp, next_sp, next_ip);
|
||||||
|
if (!uregs && level <= 1)
|
||||||
|
uregs = signal_frame_32_regs(sp, next_sp, lr);
|
||||||
|
if (uregs) {
|
||||||
|
/*
|
||||||
|
* This looks like an signal frame, so restart
|
||||||
|
* the stack trace with the values in it.
|
||||||
|
*/
|
||||||
|
if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
|
||||||
|
read_user_stack_32(&uregs[PT_LNK], &lr) ||
|
||||||
|
read_user_stack_32(&uregs[PT_R1], &sp))
|
||||||
|
return;
|
||||||
|
level = 0;
|
||||||
|
callchain_store(entry, PERF_CONTEXT_USER);
|
||||||
|
callchain_store(entry, next_ip);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (level == 0)
|
||||||
|
next_ip = lr;
|
||||||
|
callchain_store(entry, next_ip);
|
||||||
|
++level;
|
||||||
|
sp = next_sp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we can't get PMU interrupts inside a PMU interrupt handler,
|
||||||
|
* we don't need separate irq and nmi entries here.
|
||||||
|
*/
|
||||||
|
static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
|
||||||
|
|
||||||
|
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
|
||||||
|
|
||||||
|
entry->nr = 0;
|
||||||
|
|
||||||
|
if (current->pid == 0) /* idle task? */
|
||||||
|
return entry;
|
||||||
|
|
||||||
|
if (!user_mode(regs)) {
|
||||||
|
perf_callchain_kernel(regs, entry);
|
||||||
|
if (current->mm)
|
||||||
|
regs = task_pt_regs(current);
|
||||||
|
else
|
||||||
|
regs = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (regs) {
|
||||||
|
if (current_is_64bit())
|
||||||
|
perf_callchain_user_64(regs, entry);
|
||||||
|
else
|
||||||
|
perf_callchain_user_32(regs, entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry;
|
||||||
|
}
|
|
@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
|
||||||
: "memory" );
|
: "memory" );
|
||||||
}
|
}
|
||||||
|
|
||||||
void slb_flush_and_rebolt(void)
|
static void __slb_flush_and_rebolt(void)
|
||||||
{
|
{
|
||||||
/* If you change this make sure you change SLB_NUM_BOLTED
|
/* If you change this make sure you change SLB_NUM_BOLTED
|
||||||
* appropriately too. */
|
* appropriately too. */
|
||||||
unsigned long linear_llp, vmalloc_llp, lflags, vflags;
|
unsigned long linear_llp, vmalloc_llp, lflags, vflags;
|
||||||
unsigned long ksp_esid_data, ksp_vsid_data;
|
unsigned long ksp_esid_data, ksp_vsid_data;
|
||||||
|
|
||||||
WARN_ON(!irqs_disabled());
|
|
||||||
|
|
||||||
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
|
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
|
||||||
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
|
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
|
||||||
lflags = SLB_VSID_KERNEL | linear_llp;
|
lflags = SLB_VSID_KERNEL | linear_llp;
|
||||||
|
@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void)
|
||||||
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
|
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We can't take a PMU exception in the following code, so hard
|
|
||||||
* disable interrupts.
|
|
||||||
*/
|
|
||||||
hard_irq_disable();
|
|
||||||
|
|
||||||
/* We need to do this all in asm, so we're sure we don't touch
|
/* We need to do this all in asm, so we're sure we don't touch
|
||||||
* the stack between the slbia and rebolting it. */
|
* the stack between the slbia and rebolting it. */
|
||||||
asm volatile("isync\n"
|
asm volatile("isync\n"
|
||||||
|
@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void slb_flush_and_rebolt(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can't take a PMU exception in the following code, so hard
|
||||||
|
* disable interrupts.
|
||||||
|
*/
|
||||||
|
hard_irq_disable();
|
||||||
|
|
||||||
|
__slb_flush_and_rebolt();
|
||||||
|
get_paca()->slb_cache_ptr = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void slb_vmalloc_update(void)
|
void slb_vmalloc_update(void)
|
||||||
{
|
{
|
||||||
unsigned long vflags;
|
unsigned long vflags;
|
||||||
|
@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
|
||||||
/* Flush all user entries from the segment table of the current processor. */
|
/* Flush all user entries from the segment table of the current processor. */
|
||||||
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
unsigned long offset = get_paca()->slb_cache_ptr;
|
unsigned long offset;
|
||||||
unsigned long slbie_data = 0;
|
unsigned long slbie_data = 0;
|
||||||
unsigned long pc = KSTK_EIP(tsk);
|
unsigned long pc = KSTK_EIP(tsk);
|
||||||
unsigned long stack = KSTK_ESP(tsk);
|
unsigned long stack = KSTK_ESP(tsk);
|
||||||
unsigned long unmapped_base;
|
unsigned long unmapped_base;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need interrupts hard-disabled here, not just soft-disabled,
|
||||||
|
* so that a PMU interrupt can't occur, which might try to access
|
||||||
|
* user memory (to get a stack trace) and possible cause an SLB miss
|
||||||
|
* which would update the slb_cache/slb_cache_ptr fields in the PACA.
|
||||||
|
*/
|
||||||
|
hard_irq_disable();
|
||||||
|
offset = get_paca()->slb_cache_ptr;
|
||||||
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
|
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
|
||||||
offset <= SLB_CACHE_ENTRIES) {
|
offset <= SLB_CACHE_ENTRIES) {
|
||||||
int i;
|
int i;
|
||||||
|
@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
asm volatile("isync" : : : "memory");
|
asm volatile("isync" : : : "memory");
|
||||||
} else {
|
} else {
|
||||||
slb_flush_and_rebolt();
|
__slb_flush_and_rebolt();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Workaround POWER5 < DD2.1 issue */
|
/* Workaround POWER5 < DD2.1 issue */
|
||||||
|
|
|
@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
|
struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
|
||||||
struct stab_entry *ste;
|
struct stab_entry *ste;
|
||||||
unsigned long offset = __get_cpu_var(stab_cache_ptr);
|
unsigned long offset;
|
||||||
unsigned long pc = KSTK_EIP(tsk);
|
unsigned long pc = KSTK_EIP(tsk);
|
||||||
unsigned long stack = KSTK_ESP(tsk);
|
unsigned long stack = KSTK_ESP(tsk);
|
||||||
unsigned long unmapped_base;
|
unsigned long unmapped_base;
|
||||||
|
@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
/* Force previous translations to complete. DRENG */
|
/* Force previous translations to complete. DRENG */
|
||||||
asm volatile("isync" : : : "memory");
|
asm volatile("isync" : : : "memory");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need interrupts hard-disabled here, not just soft-disabled,
|
||||||
|
* so that a PMU interrupt can't occur, which might try to access
|
||||||
|
* user memory (to get a stack trace) and possible cause an STAB miss
|
||||||
|
* which would update the stab_cache/stab_cache_ptr per-cpu variables.
|
||||||
|
*/
|
||||||
|
hard_irq_disable();
|
||||||
|
|
||||||
|
offset = __get_cpu_var(stab_cache_ptr);
|
||||||
if (offset <= NR_STAB_CACHE_ENTRIES) {
|
if (offset <= NR_STAB_CACHE_ENTRIES) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue