2017-10-24 15:25:00 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Copyright (C) 2005-2017 Andes Technology Corporation
|
|
|
|
|
|
|
|
#include <linux/extable.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/uaccess.h>
|
nds32: Perf porting
This is the commit that porting the perf for nds32.
1.Raw event:
The raw events start with 'r'.
Usage:
perf stat -e rXYZ ./app
X: the index of performance counter.
YZ: the index(convert to hexdecimal) of events
Example:
'perf stat -e r101 ./app' means the counter 1 will count the instruction
event.
The index of counter and events can be found in
"Andes System Privilege Architecture Version 3 Manual".
Or you can perform the 'perf list' to find the symbolic name of raw events.
2.Perf mmap2:
Fix unexpected perf mmap2() page fault
When the mmap2() called by perf application,
you will encounter such condition:"failed to write."
With return value -EFAULT
This is due to the page fault caused by "reading" buffer
from the mapped legal address region to write to the descriptor.
The page_fault handler will get a VM_FAULT_SIGBUS return value,
which should not happens here.(Due to this is a read request.)
You can refer to kernel/events/core.c:perf_mmap_fault(...)
If "(vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))" is evaluated
as true, you will get VM_FAULT_SIGBUS as return value.
However, this is not an write request. The flags which indicated
why the page fault happens is wrong.
Furthermore, NDS32 SPAv3 is not able to detect it is read or write.
It only know either it is instruction fetch or data access.
Therefore, by removing the wrong flag assignment(actually, the hardware
is not able to show the reason), we can fix this bug.
3.Perf multiple events map to same counter.
When there are multiple events map to the same counter, the counter
counts inaccurately. This is because each counter only counts one event
in the same time.
So when there are multiple events map to same counter, they have to take
turns in each context.
There are two solution:
1. Print the error message when multiple events map to the same counter.
But print the error message would let the program hang in loop. The ltp
(linux test program) would be failed when the program hang in loop.
2. Don't print the error message, the ltp would pass. But the user need to
have the knowledge that don't count the events which map to the same
counter, or the user will get the inaccurate results.
We choose method 2 for the solution
Signed-off-by: Nickhu <nickhu@andestech.com>
Acked-by: Greentime Hu <greentime@andestech.com>
Signed-off-by: Greentime Hu <greentime@andestech.com>
2018-10-25 10:24:15 +08:00
|
|
|
#include <linux/perf_event.h>
|
2017-10-24 15:25:00 +08:00
|
|
|
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
|
|
|
extern void die(const char *str, struct pt_regs *regs, long err);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is useful to dump out the page tables associated with
|
|
|
|
* 'addr' in mm 'mm'.
|
|
|
|
*/
|
|
|
|
void show_pte(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
if (!mm)
|
|
|
|
mm = &init_mm;
|
|
|
|
|
|
|
|
pr_alert("pgd = %p\n", mm->pgd);
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
|
|
pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
|
|
|
|
|
|
|
|
do {
|
2019-12-05 08:54:08 +08:00
|
|
|
p4d_t *p4d;
|
|
|
|
pud_t *pud;
|
2017-10-24 15:25:00 +08:00
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
if (pgd_none(*pgd))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (pgd_bad(*pgd)) {
|
|
|
|
pr_alert("(bad)");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-12-05 08:54:08 +08:00
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
|
|
pud = pud_offset(p4d, addr);
|
|
|
|
pmd = pmd_offset(pud, addr);
|
2017-10-24 15:25:00 +08:00
|
|
|
#if PTRS_PER_PMD != 1
|
|
|
|
pr_alert(", *pmd=%08lx", pmd_val(*pmd));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (pmd_bad(*pmd)) {
|
|
|
|
pr_alert("(bad)");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_HIGHMEM))
|
|
|
|
{
|
|
|
|
pte_t *pte;
|
|
|
|
/* We must not map this if we have highmem enabled */
|
|
|
|
pte = pte_offset_map(pmd, addr);
|
|
|
|
pr_alert(", *pte=%08lx", pte_val(*pte));
|
|
|
|
pte_unmap(pte);
|
|
|
|
}
|
|
|
|
} while (0);
|
|
|
|
|
|
|
|
pr_alert("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void do_page_fault(unsigned long entry, unsigned long addr,
|
|
|
|
unsigned int error_code, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
struct vm_area_struct *vma;
|
2018-04-17 03:58:34 +08:00
|
|
|
int si_code;
|
2018-08-18 06:44:47 +08:00
|
|
|
vm_fault_t fault;
|
2020-04-11 05:33:09 +08:00
|
|
|
unsigned int mask = VM_ACCESS_FLAGS;
|
2020-04-02 12:08:37 +08:00
|
|
|
unsigned int flags = FAULT_FLAG_DEFAULT;
|
2017-10-24 15:25:00 +08:00
|
|
|
|
|
|
|
error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
|
|
|
|
tsk = current;
|
|
|
|
mm = tsk->mm;
|
2018-04-17 03:58:34 +08:00
|
|
|
si_code = SEGV_MAPERR;
|
2017-10-24 15:25:00 +08:00
|
|
|
/*
|
|
|
|
* We fault-in kernel-space virtual memory on-demand. The
|
|
|
|
* 'reference' page table is init_mm.pgd.
|
|
|
|
*
|
|
|
|
* NOTE! We MUST NOT take any locks for this case. We may
|
|
|
|
* be in an interrupt or a critical region, and should
|
|
|
|
* only copy the information from the master page table,
|
|
|
|
* nothing more.
|
|
|
|
*/
|
|
|
|
if (addr >= TASK_SIZE) {
|
|
|
|
if (user_mode(regs))
|
|
|
|
goto bad_area_nosemaphore;
|
|
|
|
|
|
|
|
if (addr >= TASK_SIZE && addr < VMALLOC_END
|
|
|
|
&& (entry == ENTRY_PTE_NOT_PRESENT))
|
|
|
|
goto vmalloc_fault;
|
|
|
|
else
|
|
|
|
goto no_context;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send a signal to the task for handling the unalignment access. */
|
|
|
|
if (entry == ENTRY_GENERAL_EXCPETION
|
|
|
|
&& error_code == ETYPE_ALIGNMENT_CHECK) {
|
|
|
|
if (user_mode(regs))
|
|
|
|
goto bad_area_nosemaphore;
|
|
|
|
else
|
|
|
|
goto no_context;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're in an interrupt or have no user
|
|
|
|
* context, we must not take the fault..
|
|
|
|
*/
|
|
|
|
if (unlikely(faulthandler_disabled() || !mm))
|
|
|
|
goto no_context;
|
|
|
|
|
2020-08-12 09:38:19 +08:00
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
|
|
|
|
2017-10-24 15:25:00 +08:00
|
|
|
/*
|
|
|
|
* As per x86, we may deadlock here. However, since the kernel only
|
|
|
|
* validly references user space from well defined areas of the code,
|
|
|
|
* we can bug out early if this is from code which shouldn't.
|
|
|
|
*/
|
2020-06-09 12:33:25 +08:00
|
|
|
if (unlikely(!mmap_read_trylock(mm))) {
|
2017-10-24 15:25:00 +08:00
|
|
|
if (!user_mode(regs) &&
|
|
|
|
!search_exception_tables(instruction_pointer(regs)))
|
|
|
|
goto no_context;
|
|
|
|
retry:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_lock(mm);
|
2017-10-24 15:25:00 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The above down_read_trylock() might have succeeded in which
|
|
|
|
* case, we'll have missed the might_sleep() from down_read().
|
|
|
|
*/
|
|
|
|
might_sleep();
|
|
|
|
if (IS_ENABLED(CONFIG_DEBUG_VM)) {
|
|
|
|
if (!user_mode(regs) &&
|
|
|
|
!search_exception_tables(instruction_pointer(regs)))
|
|
|
|
goto no_context;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
|
|
|
|
if (unlikely(!vma))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
if (vma->vm_start <= addr)
|
|
|
|
goto good_area;
|
|
|
|
|
|
|
|
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
if (unlikely(expand_stack(vma, addr)))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, we have a good vm_area for this memory access, so
|
|
|
|
* we can handle it..
|
|
|
|
*/
|
|
|
|
|
|
|
|
good_area:
|
2018-04-17 03:58:34 +08:00
|
|
|
si_code = SEGV_ACCERR;
|
2017-10-24 15:25:00 +08:00
|
|
|
|
|
|
|
/* first do some preliminary protection checks */
|
|
|
|
if (entry == ENTRY_PTE_NOT_PRESENT) {
|
|
|
|
if (error_code & ITYPE_mskINST)
|
|
|
|
mask = VM_EXEC;
|
|
|
|
else {
|
|
|
|
mask = VM_READ | VM_WRITE;
|
|
|
|
}
|
|
|
|
} else if (entry == ENTRY_TLB_MISC) {
|
|
|
|
switch (error_code & ITYPE_mskETYPE) {
|
|
|
|
case RD_PROT:
|
|
|
|
mask = VM_READ;
|
|
|
|
break;
|
|
|
|
case WRT_PROT:
|
|
|
|
mask = VM_WRITE;
|
|
|
|
flags |= FAULT_FLAG_WRITE;
|
|
|
|
break;
|
|
|
|
case NOEXEC:
|
|
|
|
mask = VM_EXEC;
|
|
|
|
break;
|
|
|
|
case PAGE_MODIFY:
|
|
|
|
mask = VM_WRITE;
|
|
|
|
flags |= FAULT_FLAG_WRITE;
|
|
|
|
break;
|
|
|
|
case ACC_BIT:
|
|
|
|
BUG();
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
if (!(vma->vm_flags & mask))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If for any reason at all we couldn't handle the fault,
|
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
|
* the fault.
|
|
|
|
*/
|
|
|
|
|
2020-08-12 09:38:19 +08:00
|
|
|
fault = handle_mm_fault(vma, addr, flags, regs);
|
2017-10-24 15:25:00 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we need to retry but a fatal signal is pending, handle the
|
2020-06-09 12:33:54 +08:00
|
|
|
* signal first. We do not need to release the mmap_lock because it
|
2017-10-24 15:25:00 +08:00
|
|
|
* would already be released in __lock_page_or_retry in mm/filemap.c.
|
|
|
|
*/
|
2020-04-02 12:08:06 +08:00
|
|
|
if (fault_signal_pending(fault, regs)) {
|
2017-10-24 15:25:00 +08:00
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
|
if (fault & VM_FAULT_OOM)
|
|
|
|
goto out_of_memory;
|
|
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
|
|
goto do_sigbus;
|
|
|
|
else
|
|
|
|
goto bad_area;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
|
if (fault & VM_FAULT_RETRY) {
|
|
|
|
flags |= FAULT_FLAG_TRIED;
|
|
|
|
|
2020-06-09 12:33:51 +08:00
|
|
|
/* No need to mmap_read_unlock(mm) as we would
|
2017-10-24 15:25:00 +08:00
|
|
|
* have already released it in __lock_page_or_retry
|
|
|
|
* in mm/filemap.c.
|
|
|
|
*/
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-10-24 15:25:00 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Something tried to access memory that isn't in our memory map..
|
|
|
|
* Fix it, but check if it's kernel or user first..
|
|
|
|
*/
|
|
|
|
bad_area:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-10-24 15:25:00 +08:00
|
|
|
|
|
|
|
bad_area_nosemaphore:
|
|
|
|
|
|
|
|
/* User mode accesses just cause a SIGSEGV */
|
|
|
|
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
tsk->thread.address = addr;
|
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = entry;
|
2019-05-24 00:04:24 +08:00
|
|
|
force_sig_fault(SIGSEGV, si_code, (void __user *)addr);
|
2017-10-24 15:25:00 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
no_context:
|
|
|
|
|
|
|
|
/* Are we prepared to handle this kernel fault?
|
|
|
|
*
|
|
|
|
* (The kernel has valid exception-points in the source
|
|
|
|
* when it acesses user-memory. When it fails in one
|
|
|
|
* of those points, we find it in a table and do a jump
|
|
|
|
* to some fixup code that loads an appropriate error
|
|
|
|
* code)
|
|
|
|
*/
|
|
|
|
|
|
|
|
{
|
|
|
|
const struct exception_table_entry *entry;
|
|
|
|
|
|
|
|
if ((entry =
|
|
|
|
search_exception_tables(instruction_pointer(regs))) !=
|
|
|
|
NULL) {
|
|
|
|
/* Adjust the instruction pointer in the stackframe */
|
|
|
|
instruction_pointer(regs) = entry->fixup;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Oops. The kernel tried to access some bad page. We'll have to
|
|
|
|
* terminate things with extreme prejudice.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bust_spinlocks(1);
|
|
|
|
pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
|
|
|
|
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
|
|
|
"paging request", addr);
|
|
|
|
|
|
|
|
show_pte(mm, addr);
|
|
|
|
die("Oops", regs, error_code);
|
|
|
|
bust_spinlocks(0);
|
|
|
|
do_exit(SIGKILL);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We ran out of memory, or some other thing happened to us that made
|
|
|
|
* us unable to handle the page fault gracefully.
|
|
|
|
*/
|
|
|
|
|
|
|
|
out_of_memory:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-10-24 15:25:00 +08:00
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
|
|
|
pagefault_out_of_memory();
|
|
|
|
return;
|
|
|
|
|
|
|
|
do_sigbus:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-10-24 15:25:00 +08:00
|
|
|
|
|
|
|
/* Kernel mode? Handle exceptions or die */
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a sigbus
|
|
|
|
*/
|
|
|
|
tsk->thread.address = addr;
|
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = entry;
|
2019-05-24 00:04:24 +08:00
|
|
|
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr);
|
2017-10-24 15:25:00 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
vmalloc_fault:
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Synchronize this task's top level page-table
|
|
|
|
* with the 'reference' page table.
|
|
|
|
*
|
|
|
|
* Use current_pgd instead of tsk->active_mm->pgd
|
|
|
|
* since the latter might be unavailable if this
|
|
|
|
* code is executed in a misfortunately run irq
|
|
|
|
* (like inside schedule() between switch_mm and
|
|
|
|
* switch_to...).
|
|
|
|
*/
|
|
|
|
|
|
|
|
unsigned int index = pgd_index(addr);
|
|
|
|
pgd_t *pgd, *pgd_k;
|
2019-12-05 08:54:08 +08:00
|
|
|
p4d_t *p4d, *p4d_k;
|
2017-10-24 15:25:00 +08:00
|
|
|
pud_t *pud, *pud_k;
|
|
|
|
pmd_t *pmd, *pmd_k;
|
|
|
|
pte_t *pte_k;
|
|
|
|
|
|
|
|
pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
|
|
|
|
pgd_k = init_mm.pgd + index;
|
|
|
|
|
|
|
|
if (!pgd_present(*pgd_k))
|
|
|
|
goto no_context;
|
|
|
|
|
2019-12-05 08:54:08 +08:00
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
|
|
p4d_k = p4d_offset(pgd_k, addr);
|
|
|
|
if (!p4d_present(*p4d_k))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
pud = pud_offset(p4d, addr);
|
|
|
|
pud_k = pud_offset(p4d_k, addr);
|
2017-10-24 15:25:00 +08:00
|
|
|
if (!pud_present(*pud_k))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
pmd_k = pmd_offset(pud_k, addr);
|
|
|
|
if (!pmd_present(*pmd_k))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
if (!pmd_present(*pmd))
|
|
|
|
set_pmd(pmd, *pmd_k);
|
|
|
|
else
|
|
|
|
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since the vmalloc area is global, we don't
|
|
|
|
* need to copy individual PTE's, it is enough to
|
|
|
|
* copy the pgd pointer into the pte page of the
|
|
|
|
* root task. If that is there, we'll find our pte if
|
|
|
|
* it exists.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Make sure the actual PTE exists as well to
|
|
|
|
* catch kernel vmalloc-area accesses to non-mapped
|
|
|
|
* addres. If we don't do this, this will just
|
|
|
|
* silently loop forever.
|
|
|
|
*/
|
|
|
|
|
|
|
|
pte_k = pte_offset_kernel(pmd_k, addr);
|
|
|
|
if (!pte_present(*pte_k))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|