2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
|
2007-06-27 00:12:33 +08:00
|
|
|
* Added mmcra[slot] support:
|
|
|
|
* Copyright (C) 2006-2007 Will Schmidt <willschm@us.ibm.com>, IBM
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/oprofile.h>
|
|
|
|
#include <linux/smp.h>
|
2006-03-21 17:45:59 +08:00
|
|
|
#include <asm/firmware.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/rtas.h>
|
2005-09-06 12:55:35 +08:00
|
|
|
#include <asm/oprofile_impl.h>
|
2005-11-07 15:43:56 +08:00
|
|
|
#include <asm/reg.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#define dbg(args...)
|
2012-08-03 11:02:17 +08:00
|
|
|
#define OPROFILE_PM_PMCSEL_MSK 0xffULL
|
|
|
|
#define OPROFILE_PM_UNIT_SHIFT 60
|
|
|
|
#define OPROFILE_PM_UNIT_MSK 0xfULL
|
|
|
|
#define OPROFILE_MAX_PMC_NUM 3
|
|
|
|
#define OPROFILE_PMSEL_FIELD_WIDTH 8
|
|
|
|
#define OPROFILE_UNIT_FIELD_WIDTH 4
|
|
|
|
#define MMCRA_SIAR_VALID_MASK 0x10000000ULL
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static unsigned long reset_value[OP_MAX_COUNTER];
|
|
|
|
|
|
|
|
static int oprofile_running;
|
powerpc: Fix oprofile sampling of marked events on POWER7
Description
-----------
Change ppc64 oprofile kernel driver to use the SLOT bits (MMCRA[37:39]only on
older processors where those bits are defined.
Background
----------
The performance monitor unit of the 64-bit POWER processor family has the
ability to collect accurate instruction-level samples when profiling on marked
events (i.e., "PM_MRK_<event-name>"). In processors prior to POWER6, the MMCRA
register contained "slot information" that the oprofile kernel driver used to
adjust the value latched in the SIAR at the time of a PMU interrupt. But as of
POWER6, these slot bits in MMCRA are no longer necessary for oprofile to use,
since the SIAR itself holds the accurate sampled instruction address. With
POWER6, these MMCRA slot bits were zero'ed out by hardware so oprofile's use of
these slot bits was, in effect, a NOP. But with POWER7, these bits are no
longer zero'ed out; however, they serve some other purpose rather than slot
information. Thus, using these bits on POWER7 to adjust the SIAR value results
in samples being attributed to the wrong instructions. The attached patch
changes the oprofile kernel driver to ignore these slot bits on all newer
processors starting with POWER6.
Signed-off-by: Maynard Johnson <maynardj@us.ibm.com>
Signed-off-by: Michael Wolf <mjw@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2009-05-07 13:48:32 +08:00
|
|
|
static int use_slot_nums;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
|
|
|
|
static u32 mmcr0_val;
|
|
|
|
static u64 mmcr1_val;
|
2006-03-27 09:00:45 +08:00
|
|
|
static u64 mmcra_val;
|
2012-08-03 11:02:17 +08:00
|
|
|
static u32 cntr_marked_events;
|
|
|
|
|
|
|
|
static int power7_marked_instr_event(u64 mmcr1)
|
|
|
|
{
|
|
|
|
u64 psel, unit;
|
|
|
|
int pmc, cntr_marked_events = 0;
|
|
|
|
|
|
|
|
/* Given the MMCR1 value, look at the field for each counter to
|
|
|
|
* determine if it is a marked event. Code based on the function
|
|
|
|
* power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c.
|
|
|
|
*/
|
|
|
|
for (pmc = 0; pmc < 4; pmc++) {
|
|
|
|
psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
|
|
|
|
<< (OPROFILE_MAX_PMC_NUM - pmc)
|
2012-11-29 14:42:03 +08:00
|
|
|
* OPROFILE_PMSEL_FIELD_WIDTH);
|
2012-08-03 11:02:17 +08:00
|
|
|
psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
|
|
|
|
* OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
|
|
|
|
unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
|
|
|
|
<< (OPROFILE_PM_UNIT_SHIFT
|
|
|
|
- (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
|
|
|
|
unit = unit >> (OPROFILE_PM_UNIT_SHIFT
|
|
|
|
- (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
|
|
|
|
|
|
|
|
switch (psel >> 4) {
|
|
|
|
case 2:
|
|
|
|
cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
if (psel == 0x3c) {
|
|
|
|
cntr_marked_events |= (pmc == 0) << pmc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (psel == 0x3e) {
|
|
|
|
cntr_marked_events |= (pmc != 1) << pmc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cntr_marked_events |= 1 << pmc;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
case 5:
|
|
|
|
cntr_marked_events |= (unit == 0xd) << pmc;
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
if (psel == 0x64)
|
|
|
|
cntr_marked_events |= (pmc >= 2) << pmc;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
cntr_marked_events |= (unit == 0xd) << pmc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cntr_marked_events;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-21 03:39:53 +08:00
|
|
|
static int power4_reg_setup(struct op_counter_config *ctr,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct op_system_config *sys,
|
|
|
|
int num_ctrs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The performance counter event settings are given in the mmcr0,
|
|
|
|
* mmcr1 and mmcra values passed from the user in the
|
|
|
|
* op_system_config structure (sys variable).
|
|
|
|
*/
|
|
|
|
mmcr0_val = sys->mmcr0;
|
|
|
|
mmcr1_val = sys->mmcr1;
|
|
|
|
mmcra_val = sys->mmcra;
|
|
|
|
|
2012-08-03 11:02:17 +08:00
|
|
|
/* Power 7+ and newer architectures:
|
|
|
|
* Determine which counter events in the group (the group of events is
|
|
|
|
* specified by the bit settings in the MMCR1 register) are marked
|
|
|
|
* events for use in the interrupt handler. Do the calculation once
|
|
|
|
* before OProfile starts. Information is used in the interrupt
|
|
|
|
* handler. Starting with Power 7+ we only record the sample for
|
|
|
|
* marked events if the SIAR valid bit is set. For non marked events
|
|
|
|
* the sample is always recorded.
|
|
|
|
*/
|
|
|
|
if (pvr_version_is(PVR_POWER7p))
|
|
|
|
cntr_marked_events = power7_marked_instr_event(mmcr1_val);
|
|
|
|
else
|
|
|
|
cntr_marked_events = 0; /* For older processors, set the bit map
|
|
|
|
* to zero so the sample will always be
|
|
|
|
* be recorded.
|
|
|
|
*/
|
|
|
|
|
2005-09-06 12:52:12 +08:00
|
|
|
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
|
2005-04-17 06:20:36 +08:00
|
|
|
reset_value[i] = 0x80000000UL - ctr[i].count;
|
|
|
|
|
|
|
|
/* setup user and kernel profiling */
|
|
|
|
if (sys->enable_kernel)
|
|
|
|
mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
|
|
|
|
else
|
|
|
|
mmcr0_val |= MMCR0_KERNEL_DISABLE;
|
|
|
|
|
|
|
|
if (sys->enable_user)
|
|
|
|
mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
|
|
|
|
else
|
|
|
|
mmcr0_val |= MMCR0_PROBLEM_DISABLE;
|
2007-07-21 03:39:53 +08:00
|
|
|
|
2012-08-20 05:44:01 +08:00
|
|
|
if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
|
|
|
|
pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
|
|
|
|
pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) ||
|
|
|
|
pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p))
|
powerpc: Fix oprofile sampling of marked events on POWER7
Description
-----------
Change ppc64 oprofile kernel driver to use the SLOT bits (MMCRA[37:39]only on
older processors where those bits are defined.
Background
----------
The performance monitor unit of the 64-bit POWER processor family has the
ability to collect accurate instruction-level samples when profiling on marked
events (i.e., "PM_MRK_<event-name>"). In processors prior to POWER6, the MMCRA
register contained "slot information" that the oprofile kernel driver used to
adjust the value latched in the SIAR at the time of a PMU interrupt. But as of
POWER6, these slot bits in MMCRA are no longer necessary for oprofile to use,
since the SIAR itself holds the accurate sampled instruction address. With
POWER6, these MMCRA slot bits were zero'ed out by hardware so oprofile's use of
these slot bits was, in effect, a NOP. But with POWER7, these bits are no
longer zero'ed out; however, they serve some other purpose rather than slot
information. Thus, using these bits on POWER7 to adjust the SIAR value results
in samples being attributed to the wrong instructions. The attached patch
changes the oprofile kernel driver to ignore these slot bits on all newer
processors starting with POWER6.
Signed-off-by: Maynard Johnson <maynardj@us.ibm.com>
Signed-off-by: Michael Wolf <mjw@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2009-05-07 13:48:32 +08:00
|
|
|
use_slot_nums = 1;
|
|
|
|
|
2007-07-21 03:39:53 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-08-18 12:23:51 +08:00
|
|
|
extern void ppc_enable_pmcs(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-07 15:43:56 +08:00
|
|
|
/*
|
|
|
|
* Older CPUs require the MMCRA sample bit to be always set, but newer
|
|
|
|
* CPUs only want it set for some groups. Eventually we will remove all
|
|
|
|
* knowledge of this bit in the kernel, oprofile userspace should be
|
|
|
|
* setting it when required.
|
|
|
|
*
|
|
|
|
* In order to keep current installations working we force the bit for
|
|
|
|
* those older CPUs. Once everyone has updated their oprofile userspace we
|
|
|
|
* can remove this hack.
|
|
|
|
*/
|
|
|
|
static inline int mmcra_must_set_sample(void)
|
|
|
|
{
|
2012-08-20 05:44:01 +08:00
|
|
|
if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
|
|
|
|
pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
|
|
|
|
pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX))
|
2005-11-07 15:43:56 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-21 03:39:53 +08:00
|
|
|
static int power4_cpu_setup(struct op_counter_config *ctr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned int mmcr0 = mmcr0_val;
|
|
|
|
unsigned long mmcra = mmcra_val;
|
|
|
|
|
2008-08-18 12:23:51 +08:00
|
|
|
ppc_enable_pmcs();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* set the freeze bit */
|
|
|
|
mmcr0 |= MMCR0_FC;
|
|
|
|
mtspr(SPRN_MMCR0, mmcr0);
|
|
|
|
|
|
|
|
mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
|
|
|
|
mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
|
|
|
|
mtspr(SPRN_MMCR0, mmcr0);
|
|
|
|
|
|
|
|
mtspr(SPRN_MMCR1, mmcr1_val);
|
|
|
|
|
2005-11-07 15:43:56 +08:00
|
|
|
if (mmcra_must_set_sample())
|
|
|
|
mmcra |= MMCRA_SAMPLE_ENABLE;
|
2005-04-17 06:20:36 +08:00
|
|
|
mtspr(SPRN_MMCRA, mmcra);
|
|
|
|
|
|
|
|
dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
|
|
|
|
mfspr(SPRN_MMCR0));
|
|
|
|
dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
|
|
|
|
mfspr(SPRN_MMCR1));
|
|
|
|
dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
|
|
|
|
mfspr(SPRN_MMCRA));
|
2007-07-21 03:39:53 +08:00
|
|
|
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-07-21 03:39:53 +08:00
|
|
|
static int power4_start(struct op_counter_config *ctr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned int mmcr0;
|
|
|
|
|
|
|
|
/* set the PMM bit (see comment below) */
|
2015-07-07 11:56:59 +08:00
|
|
|
mtmsr(mfmsr() | MSR_PMM);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-09-06 12:52:12 +08:00
|
|
|
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ctr[i].enabled) {
|
2007-01-29 11:23:14 +08:00
|
|
|
classic_ctr_write(i, reset_value[i]);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2007-01-29 11:23:14 +08:00
|
|
|
classic_ctr_write(i, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mmcr0 = mfspr(SPRN_MMCR0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We must clear the PMAO bit on some (GQ) chips. Just do it
|
|
|
|
* all the time
|
|
|
|
*/
|
|
|
|
mmcr0 &= ~MMCR0_PMAO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now clear the freeze bit, counting will not start until we
|
|
|
|
* rfid from this excetion, because only at that point will
|
|
|
|
* the PMM bit be cleared
|
|
|
|
*/
|
|
|
|
mmcr0 &= ~MMCR0_FC;
|
|
|
|
mtspr(SPRN_MMCR0, mmcr0);
|
|
|
|
|
|
|
|
oprofile_running = 1;
|
|
|
|
|
|
|
|
dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
|
2007-07-21 03:39:53 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void power4_stop(void)
|
|
|
|
{
|
|
|
|
unsigned int mmcr0;
|
|
|
|
|
|
|
|
/* freeze counters */
|
|
|
|
mmcr0 = mfspr(SPRN_MMCR0);
|
|
|
|
mmcr0 |= MMCR0_FC;
|
|
|
|
mtspr(SPRN_MMCR0, mmcr0);
|
|
|
|
|
|
|
|
oprofile_running = 0;
|
|
|
|
|
|
|
|
dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
|
|
|
|
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fake functions used by canonicalize_pc */
|
2008-01-25 05:16:20 +08:00
|
|
|
static void __used hypervisor_bucket(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2008-01-25 05:16:20 +08:00
|
|
|
static void __used rtas_bucket(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2008-01-25 05:16:20 +08:00
|
|
|
static void __used kernel_unknown_bucket(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On GQ and newer the MMCRA stores the HV and PR bits at the time
|
|
|
|
* the SIAR was sampled. We use that to work out if the SIAR was sampled in
|
|
|
|
* the hypervisor, our exception vectors or RTAS.
|
2007-06-27 00:12:33 +08:00
|
|
|
* If the MMCRA_SAMPLE_ENABLE bit is set, we can use the MMCRA[slot] bits
|
|
|
|
* to more accurately identify the address of the sampled instruction. The
|
|
|
|
* mmcra[slot] bits represent the slot number of a sampled instruction
|
|
|
|
* within an instruction group. The slot will contain a value between 1
|
|
|
|
* and 5 if MMCRA_SAMPLE_ENABLE is set, otherwise 0.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static unsigned long get_pc(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long pc = mfspr(SPRN_SIAR);
|
|
|
|
unsigned long mmcra;
|
2007-06-27 00:12:33 +08:00
|
|
|
unsigned long slot;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-31 09:57:33 +08:00
|
|
|
/* Can't do much about it */
|
2006-06-08 12:42:34 +08:00
|
|
|
if (!cur_cpu_spec->oprofile_mmcra_sihv)
|
2006-03-27 09:00:45 +08:00
|
|
|
return pc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
mmcra = mfspr(SPRN_MMCRA);
|
|
|
|
|
powerpc: Fix oprofile sampling of marked events on POWER7
Description
-----------
Change ppc64 oprofile kernel driver to use the SLOT bits (MMCRA[37:39]only on
older processors where those bits are defined.
Background
----------
The performance monitor unit of the 64-bit POWER processor family has the
ability to collect accurate instruction-level samples when profiling on marked
events (i.e., "PM_MRK_<event-name>"). In processors prior to POWER6, the MMCRA
register contained "slot information" that the oprofile kernel driver used to
adjust the value latched in the SIAR at the time of a PMU interrupt. But as of
POWER6, these slot bits in MMCRA are no longer necessary for oprofile to use,
since the SIAR itself holds the accurate sampled instruction address. With
POWER6, these MMCRA slot bits were zero'ed out by hardware so oprofile's use of
these slot bits was, in effect, a NOP. But with POWER7, these bits are no
longer zero'ed out; however, they serve some other purpose rather than slot
information. Thus, using these bits on POWER7 to adjust the SIAR value results
in samples being attributed to the wrong instructions. The attached patch
changes the oprofile kernel driver to ignore these slot bits on all newer
processors starting with POWER6.
Signed-off-by: Maynard Johnson <maynardj@us.ibm.com>
Signed-off-by: Michael Wolf <mjw@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2009-05-07 13:48:32 +08:00
|
|
|
if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
|
2007-06-27 00:12:33 +08:00
|
|
|
slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
|
|
|
|
if (slot > 1)
|
|
|
|
pc += 4 * (slot - 1);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Were we in the hypervisor? */
|
2006-06-08 12:42:34 +08:00
|
|
|
if (firmware_has_feature(FW_FEATURE_LPAR) &&
|
|
|
|
(mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
|
2005-04-17 06:20:36 +08:00
|
|
|
/* function descriptor madness */
|
|
|
|
return *((unsigned long *)hypervisor_bucket);
|
|
|
|
|
|
|
|
/* We were in userspace, nothing to do */
|
2006-06-08 12:42:34 +08:00
|
|
|
if (mmcra & cur_cpu_spec->oprofile_mmcra_sipr)
|
2005-04-17 06:20:36 +08:00
|
|
|
return pc;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_RTAS
|
|
|
|
/* Were we in RTAS? */
|
|
|
|
if (pc >= rtas.base && pc < (rtas.base + rtas.size))
|
|
|
|
/* function descriptor madness */
|
|
|
|
return *((unsigned long *)rtas_bucket);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Were we in our exception vectors or SLB real mode miss handler? */
|
|
|
|
if (pc < 0x1000000UL)
|
|
|
|
return (unsigned long)__va(pc);
|
|
|
|
|
|
|
|
/* Not sure where we were */
|
2005-12-04 15:39:15 +08:00
|
|
|
if (!is_kernel_addr(pc))
|
2005-04-17 06:20:36 +08:00
|
|
|
/* function descriptor madness */
|
|
|
|
return *((unsigned long *)kernel_unknown_bucket);
|
|
|
|
|
2006-03-27 09:00:45 +08:00
|
|
|
return pc;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-06-08 12:42:34 +08:00
|
|
|
static int get_kernel(unsigned long pc, unsigned long mmcra)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int is_kernel;
|
|
|
|
|
2006-06-08 12:42:34 +08:00
|
|
|
if (!cur_cpu_spec->oprofile_mmcra_sihv) {
|
2005-12-04 15:39:15 +08:00
|
|
|
is_kernel = is_kernel_addr(pc);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2006-06-08 12:42:34 +08:00
|
|
|
is_kernel = ((mmcra & cur_cpu_spec->oprofile_mmcra_sipr) == 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return is_kernel;
|
|
|
|
}
|
|
|
|
|
2011-05-23 12:22:40 +08:00
|
|
|
static bool pmc_overflow(unsigned long val)
|
|
|
|
{
|
|
|
|
if ((int)val < 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Events on POWER7 can roll back if a speculative event doesn't
|
|
|
|
* eventually complete. Unfortunately in some rare cases they will
|
|
|
|
* raise a performance monitor exception. We need to catch this to
|
|
|
|
* ensure we reset the PMC. In all cases the PMC will be 256 or less
|
|
|
|
* cycles from overflow.
|
|
|
|
*
|
|
|
|
* We only do this if the first pass fails to find any overflowing
|
|
|
|
* PMCs because a user might set a period of less than 256 and we
|
|
|
|
* don't want to mistakenly reset them.
|
|
|
|
*/
|
2012-08-20 05:44:01 +08:00
|
|
|
if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
|
2011-05-23 12:22:40 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void power4_handle_interrupt(struct pt_regs *regs,
|
|
|
|
struct op_counter_config *ctr)
|
|
|
|
{
|
|
|
|
unsigned long pc;
|
|
|
|
int is_kernel;
|
|
|
|
int val;
|
|
|
|
int i;
|
|
|
|
unsigned int mmcr0;
|
2006-06-08 12:42:34 +08:00
|
|
|
unsigned long mmcra;
|
2012-08-03 11:02:17 +08:00
|
|
|
bool siar_valid = false;
|
2006-06-08 12:42:34 +08:00
|
|
|
|
|
|
|
mmcra = mfspr(SPRN_MMCRA);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
pc = get_pc(regs);
|
2006-06-08 12:42:34 +08:00
|
|
|
is_kernel = get_kernel(pc, mmcra);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* set the PMM bit (see comment below) */
|
2015-07-07 11:56:59 +08:00
|
|
|
mtmsr(mfmsr() | MSR_PMM);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-08-03 11:02:17 +08:00
|
|
|
/* Check that the SIAR valid bit in MMCRA is set to 1. */
|
|
|
|
if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
|
|
|
|
siar_valid = true;
|
|
|
|
|
2005-09-06 12:52:12 +08:00
|
|
|
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
|
2007-01-29 11:23:14 +08:00
|
|
|
val = classic_ctr_read(i);
|
2011-05-23 12:22:40 +08:00
|
|
|
if (pmc_overflow(val)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (oprofile_running && ctr[i].enabled) {
|
2012-08-03 11:02:17 +08:00
|
|
|
/* Power 7+ and newer architectures:
|
|
|
|
* If the event is a marked event, then only
|
|
|
|
* save the sample if the SIAR valid bit is
|
|
|
|
* set. If the event is not marked, then
|
|
|
|
* always save the sample.
|
|
|
|
* Note, the Sample enable bit in the MMCRA
|
|
|
|
* register must be set to 1 if the group
|
|
|
|
* contains a marked event.
|
|
|
|
*/
|
|
|
|
if ((siar_valid &&
|
|
|
|
(cntr_marked_events & (1 << i)))
|
|
|
|
|| !(cntr_marked_events & (1 << i)))
|
|
|
|
oprofile_add_ext_sample(pc, regs, i,
|
|
|
|
is_kernel);
|
|
|
|
|
2007-01-29 11:23:14 +08:00
|
|
|
classic_ctr_write(i, reset_value[i]);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2007-01-29 11:23:14 +08:00
|
|
|
classic_ctr_write(i, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mmcr0 = mfspr(SPRN_MMCR0);
|
|
|
|
|
|
|
|
/* reset the perfmon trigger */
|
|
|
|
mmcr0 |= MMCR0_PMXE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We must clear the PMAO bit on some (GQ) chips. Just do it
|
|
|
|
* all the time
|
|
|
|
*/
|
|
|
|
mmcr0 &= ~MMCR0_PMAO;
|
|
|
|
|
2006-06-08 12:42:34 +08:00
|
|
|
/* Clear the appropriate bits in the MMCRA */
|
|
|
|
mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear;
|
|
|
|
mtspr(SPRN_MMCRA, mmcra);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* now clear the freeze bit, counting will not start until we
|
|
|
|
* rfid from this exception, because only at that point will
|
|
|
|
* the PMM bit be cleared
|
|
|
|
*/
|
|
|
|
mmcr0 &= ~MMCR0_FC;
|
|
|
|
mtspr(SPRN_MMCR0, mmcr0);
|
|
|
|
}
|
|
|
|
|
2005-09-19 21:18:31 +08:00
|
|
|
struct op_powerpc_model op_model_power4 = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.reg_setup = power4_reg_setup,
|
|
|
|
.cpu_setup = power4_cpu_setup,
|
|
|
|
.start = power4_start,
|
|
|
|
.stop = power4_stop,
|
|
|
|
.handle_interrupt = power4_handle_interrupt,
|
|
|
|
};
|