2019-05-19 20:08:20 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-07-14 08:19:01 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
#include <asm/cpu_device_id.h>
|
2016-06-03 08:19:42 +08:00
|
|
|
#include <asm/intel-family.h>
|
2016-02-10 17:55:15 +08:00
|
|
|
#include "uncore.h"
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
#include "uncore_discovery.h"
|
2012-06-15 14:31:34 +08:00
|
|
|
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
static bool uncore_no_discover;
|
|
|
|
module_param(uncore_no_discover, bool, 0);
|
|
|
|
MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism "
|
|
|
|
"(default: enable the discovery mechanism).");
|
2021-03-18 01:59:34 +08:00
|
|
|
struct intel_uncore_type *empty_uncore[] = { NULL, };
|
2014-07-30 15:22:12 +08:00
|
|
|
struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
|
|
|
|
struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
|
2019-05-01 08:53:46 +08:00
|
|
|
struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
static bool pcidrv_registered;
|
|
|
|
struct pci_driver *uncore_pci_driver;
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
/* The PCI driver for the device which the uncore doesn't own. */
|
|
|
|
struct pci_driver *uncore_pci_sub_driver;
|
2014-07-30 15:22:12 +08:00
|
|
|
/* pci bus to socket mapping */
|
2015-09-24 20:10:21 +08:00
|
|
|
DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
|
|
|
|
struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
|
2016-02-23 06:19:16 +08:00
|
|
|
struct pci_extra_dev *uncore_extra_pci_dev;
|
2020-06-01 16:35:42 +08:00
|
|
|
int __uncore_max_dies;
|
2013-08-07 14:17:23 +08:00
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
/* mask of cpus that collect uncore events */
|
|
|
|
static cpumask_t uncore_cpu_mask;
|
|
|
|
|
|
|
|
/* constraint for the fixed counter */
|
2014-07-30 15:22:12 +08:00
|
|
|
static struct event_constraint uncore_constraint_fixed =
|
2012-06-15 14:31:34 +08:00
|
|
|
EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
|
2014-07-30 15:22:12 +08:00
|
|
|
struct event_constraint uncore_constraint_empty =
|
2012-07-04 14:00:15 +08:00
|
|
|
EVENT_CONSTRAINT(0, 0, 0);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
int uncore_pcibus_to_dieid(struct pci_bus *bus)
|
2015-09-24 20:10:21 +08:00
|
|
|
{
|
|
|
|
struct pci2phy_map *map;
|
2021-01-08 23:35:48 +08:00
|
|
|
int die_id = -1;
|
2015-09-24 20:10:21 +08:00
|
|
|
|
|
|
|
raw_spin_lock(&pci2phy_map_lock);
|
|
|
|
list_for_each_entry(map, &pci2phy_map_head, list) {
|
|
|
|
if (map->segment == pci_domain_nr(bus)) {
|
2021-01-08 23:35:48 +08:00
|
|
|
die_id = map->pbus_to_dieid[bus->number];
|
2015-09-24 20:10:21 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
raw_spin_unlock(&pci2phy_map_lock);
|
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
return die_id;
|
2015-09-24 20:10:21 +08:00
|
|
|
}
|
|
|
|
|
2021-03-23 23:05:07 +08:00
|
|
|
int uncore_die_to_segment(int die)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus = NULL;
|
|
|
|
|
|
|
|
/* Find first pci bus which attributes to specified die. */
|
|
|
|
while ((bus = pci_find_next_bus(bus)) &&
|
|
|
|
(die != uncore_pcibus_to_dieid(bus)))
|
|
|
|
;
|
|
|
|
|
|
|
|
return bus ? pci_domain_nr(bus) : -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
static void uncore_free_pcibus_map(void)
|
|
|
|
{
|
|
|
|
struct pci2phy_map *map, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
|
|
|
|
list_del(&map->list);
|
|
|
|
kfree(map);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-24 20:10:21 +08:00
|
|
|
struct pci2phy_map *__find_pci2phy_map(int segment)
|
|
|
|
{
|
|
|
|
struct pci2phy_map *map, *alloc = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
lockdep_assert_held(&pci2phy_map_lock);
|
|
|
|
|
|
|
|
lookup:
|
|
|
|
list_for_each_entry(map, &pci2phy_map_head, list) {
|
|
|
|
if (map->segment == segment)
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!alloc) {
|
|
|
|
raw_spin_unlock(&pci2phy_map_lock);
|
|
|
|
alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
|
|
|
|
raw_spin_lock(&pci2phy_map_lock);
|
|
|
|
|
|
|
|
if (!alloc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
goto lookup;
|
|
|
|
}
|
|
|
|
|
|
|
|
map = alloc;
|
|
|
|
alloc = NULL;
|
|
|
|
map->segment = segment;
|
|
|
|
for (i = 0; i < 256; i++)
|
2021-01-08 23:35:48 +08:00
|
|
|
map->pbus_to_dieid[i] = -1;
|
2015-09-24 20:10:21 +08:00
|
|
|
list_add_tail(&map->list, &pci2phy_map_head);
|
|
|
|
|
|
|
|
end:
|
|
|
|
kfree(alloc);
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2020-11-14 02:31:26 +08:00
|
|
|
ssize_t uncore_event_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
2014-07-30 15:22:12 +08:00
|
|
|
{
|
|
|
|
struct uncore_event_desc *event =
|
|
|
|
container_of(attr, struct uncore_event_desc, attr);
|
|
|
|
return sprintf(buf, "%s", event->config);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
2014-02-11 23:20:11 +08:00
|
|
|
{
|
2019-05-14 01:59:02 +08:00
|
|
|
unsigned int dieid = topology_logical_die_id(cpu);
|
2017-02-01 06:58:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The unsigned check also catches the '-1' return value for non
|
|
|
|
* existent mappings in the topology map.
|
|
|
|
*/
|
2020-06-01 16:35:42 +08:00
|
|
|
return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL;
|
2014-02-11 23:20:11 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
2012-07-05 14:32:17 +08:00
|
|
|
{
|
|
|
|
u64 count;
|
|
|
|
|
|
|
|
rdmsrl(event->hw.event_base, count);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2019-05-01 08:53:47 +08:00
|
|
|
void uncore_mmio_exit_box(struct intel_uncore_box *box)
|
|
|
|
{
|
|
|
|
if (box->io_addr)
|
|
|
|
iounmap(box->io_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
if (!box->io_addr)
|
|
|
|
return 0;
|
|
|
|
|
2020-05-28 23:19:29 +08:00
|
|
|
if (!uncore_mmio_is_valid_offset(box, event->hw.event_base))
|
|
|
|
return 0;
|
|
|
|
|
2019-05-01 08:53:47 +08:00
|
|
|
return readq(box->io_addr + event->hw.event_base);
|
|
|
|
}
|
|
|
|
|
2012-07-05 14:32:17 +08:00
|
|
|
/*
|
|
|
|
* generic get constraint function for shared match/mask registers.
|
|
|
|
*/
|
2014-07-30 15:22:12 +08:00
|
|
|
struct event_constraint *
|
2012-07-05 14:32:17 +08:00
|
|
|
uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct intel_uncore_extra_reg *er;
|
|
|
|
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
|
struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
|
unsigned long flags;
|
|
|
|
bool ok = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reg->alloc can be set due to existing state, so for fake box we
|
|
|
|
* need to ignore this, otherwise we might fail to allocate proper
|
|
|
|
* fake state for this extra reg constraint.
|
|
|
|
*/
|
|
|
|
if (reg1->idx == EXTRA_REG_NONE ||
|
|
|
|
(!uncore_box_is_fake(box) && reg1->alloc))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
er = &box->shared_regs[reg1->idx];
|
|
|
|
raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
|
if (!atomic_read(&er->ref) ||
|
|
|
|
(er->config1 == reg1->config && er->config2 == reg2->config)) {
|
|
|
|
atomic_inc(&er->ref);
|
|
|
|
er->config1 = reg1->config;
|
|
|
|
er->config2 = reg2->config;
|
|
|
|
ok = true;
|
|
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
|
|
|
|
|
if (ok) {
|
|
|
|
if (!uncore_box_is_fake(box))
|
|
|
|
reg1->alloc = 1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
return &uncore_constraint_empty;
|
2012-07-05 14:32:17 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
2012-07-05 14:32:17 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_extra_reg *er;
|
|
|
|
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only put constraint if extra reg was actually allocated. Also
|
|
|
|
* takes care of event which do not use an extra shared reg.
|
|
|
|
*
|
|
|
|
* Also, if this is a fake box we shouldn't touch any event state
|
|
|
|
* (reg->alloc) and we don't care about leaving inconsistent box
|
|
|
|
* state either since it will be thrown out.
|
|
|
|
*/
|
|
|
|
if (uncore_box_is_fake(box) || !reg1->alloc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
er = &box->shared_regs[reg1->idx];
|
|
|
|
atomic_dec(&er->ref);
|
|
|
|
reg1->alloc = 0;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
|
2013-04-16 19:51:06 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_extra_reg *er;
|
|
|
|
unsigned long flags;
|
|
|
|
u64 config;
|
|
|
|
|
|
|
|
er = &box->shared_regs[idx];
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
|
config = er->config;
|
|
|
|
raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_assign_hw_event(struct intel_uncore_box *box,
|
|
|
|
struct perf_event *event, int idx)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
|
|
|
hwc->idx = idx;
|
|
|
|
hwc->last_tag = ++box->tags[idx];
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
if (uncore_pmc_fixed(hwc->idx)) {
|
2012-06-15 14:31:36 +08:00
|
|
|
hwc->event_base = uncore_fixed_ctr(box);
|
|
|
|
hwc->config_base = uncore_fixed_ctl(box);
|
2012-06-15 14:31:34 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:36 +08:00
|
|
|
hwc->config_base = uncore_event_ctl(box, hwc->idx);
|
|
|
|
hwc->event_base = uncore_perf_ctr(box, hwc->idx);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
u64 prev_count, new_count, delta;
|
|
|
|
int shift;
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
if (uncore_pmc_freerunning(event->hw.idx))
|
|
|
|
shift = 64 - uncore_freerunning_bits(box, event);
|
|
|
|
else if (uncore_pmc_fixed(event->hw.idx))
|
2012-06-15 14:31:34 +08:00
|
|
|
shift = 64 - uncore_fixed_ctr_bits(box);
|
|
|
|
else
|
|
|
|
shift = 64 - uncore_perf_ctr_bits(box);
|
|
|
|
|
|
|
|
/* the hrtimer might modify the previous event value */
|
|
|
|
again:
|
|
|
|
prev_count = local64_read(&event->hw.prev_count);
|
|
|
|
new_count = uncore_read_counter(box, event);
|
|
|
|
if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
|
|
|
|
goto again;
|
|
|
|
|
|
|
|
delta = (new_count << shift) - (prev_count << shift);
|
|
|
|
delta >>= shift;
|
|
|
|
|
|
|
|
local64_add(delta, &event->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The overflow interrupt is unavailable for SandyBridge-EP, is broken
|
|
|
|
* for SandyBridge. So we use hrtimer to periodically poll the counter
|
|
|
|
* to avoid overflow.
|
|
|
|
*/
|
|
|
|
static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
|
|
|
|
{
|
|
|
|
struct intel_uncore_box *box;
|
2014-02-11 23:20:13 +08:00
|
|
|
struct perf_event *event;
|
2012-06-15 14:31:34 +08:00
|
|
|
unsigned long flags;
|
|
|
|
int bit;
|
|
|
|
|
|
|
|
box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
|
|
|
|
if (!box->n_active || box->cpu != smp_processor_id())
|
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
/*
|
|
|
|
* disable local interrupt to prevent uncore_pmu_event_start/stop
|
|
|
|
* to interrupt the update process
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2014-02-11 23:20:13 +08:00
|
|
|
/*
|
|
|
|
* handle boxes with an active event list as opposed to active
|
|
|
|
* counters
|
|
|
|
*/
|
|
|
|
list_for_each_entry(event, &box->active_list, active_entry) {
|
|
|
|
uncore_perf_event_update(box, event);
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
|
|
|
|
uncore_perf_event_update(box, box->events[bit]);
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
2014-02-11 23:20:10 +08:00
|
|
|
hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
|
2012-06-15 14:31:34 +08:00
|
|
|
return HRTIMER_RESTART;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2015-04-15 05:09:01 +08:00
|
|
|
hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
|
|
|
|
HRTIMER_MODE_REL_PINNED);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
hrtimer_cancel(&box->hrtimer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
|
|
|
|
{
|
|
|
|
hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
box->hrtimer.function = uncore_pmu_hrtimer;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
|
|
|
|
int node)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:12 +08:00
|
|
|
int i, size, numshared = type->num_shared_regs ;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct intel_uncore_box *box;
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
|
2012-07-04 14:00:15 +08:00
|
|
|
|
2013-09-17 14:48:13 +08:00
|
|
|
box = kzalloc_node(size, GFP_KERNEL, node);
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!box)
|
|
|
|
return NULL;
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
for (i = 0; i < numshared; i++)
|
2012-07-04 14:00:15 +08:00
|
|
|
raw_spin_lock_init(&box->shared_regs[i].lock);
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
uncore_pmu_init_hrtimer(box);
|
|
|
|
box->cpu = -1;
|
2019-05-14 01:59:02 +08:00
|
|
|
box->dieid = -1;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2014-02-11 23:20:10 +08:00
|
|
|
/* set default hrtimer timeout */
|
|
|
|
box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2014-02-11 23:20:13 +08:00
|
|
|
INIT_LIST_HEAD(&box->active_list);
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
return box;
|
|
|
|
}
|
|
|
|
|
2014-12-11 04:23:50 +08:00
|
|
|
/*
|
|
|
|
* Using uncore_pmu_event_init pmu event_init callback
|
|
|
|
* as a detection point for uncore events.
|
|
|
|
*/
|
|
|
|
static int uncore_pmu_event_init(struct perf_event *event);
|
|
|
|
|
2016-11-18 20:53:54 +08:00
|
|
|
static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
|
2014-12-11 04:23:50 +08:00
|
|
|
{
|
2016-11-18 20:53:54 +08:00
|
|
|
return &box->pmu->pmu == event->pmu;
|
2014-12-11 04:23:50 +08:00
|
|
|
}
|
|
|
|
|
2012-07-05 14:32:17 +08:00
|
|
|
static int
|
2016-02-23 06:19:12 +08:00
|
|
|
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
|
|
|
|
bool dogrp)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct perf_event *event;
|
|
|
|
int n, max_count;
|
|
|
|
|
|
|
|
max_count = box->pmu->type->num_counters;
|
|
|
|
if (box->pmu->type->fixed_ctl)
|
|
|
|
max_count++;
|
|
|
|
|
|
|
|
if (box->n_events >= max_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
n = box->n_events;
|
2014-12-11 04:23:50 +08:00
|
|
|
|
2016-11-18 20:53:54 +08:00
|
|
|
if (is_box_event(box, leader)) {
|
2014-12-11 04:23:50 +08:00
|
|
|
box->event_list[n] = leader;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!dogrp)
|
|
|
|
return n;
|
|
|
|
|
2018-03-16 00:36:56 +08:00
|
|
|
for_each_sibling_event(event, leader) {
|
2016-11-18 20:53:54 +08:00
|
|
|
if (!is_box_event(box, event) ||
|
2014-12-11 04:23:50 +08:00
|
|
|
event->state <= PERF_EVENT_STATE_OFF)
|
2012-06-15 14:31:34 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (n >= max_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
box->event_list[n] = event;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct event_constraint *
|
2012-07-05 14:32:17 +08:00
|
|
|
uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2012-07-04 14:00:15 +08:00
|
|
|
struct intel_uncore_type *type = box->pmu->type;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct event_constraint *c;
|
|
|
|
|
2012-07-04 14:00:15 +08:00
|
|
|
if (type->ops->get_constraint) {
|
|
|
|
c = type->ops->get_constraint(box, event);
|
|
|
|
if (c)
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2013-09-10 03:53:50 +08:00
|
|
|
if (event->attr.config == UNCORE_FIXED_EVENT)
|
2014-07-30 15:22:12 +08:00
|
|
|
return &uncore_constraint_fixed;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
if (type->constraints) {
|
|
|
|
for_each_event_constraint(c, type->constraints) {
|
|
|
|
if ((event->hw.config & c->cmask) == c->code)
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &type->unconstrainted;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_put_event_constraint(struct intel_uncore_box *box,
|
|
|
|
struct perf_event *event)
|
2012-07-04 14:00:15 +08:00
|
|
|
{
|
|
|
|
if (box->pmu->type->ops->put_constraint)
|
|
|
|
box->pmu->type->ops->put_constraint(box, event);
|
|
|
|
}
|
|
|
|
|
2012-07-05 14:32:17 +08:00
|
|
|
static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
|
2013-05-24 02:07:03 +08:00
|
|
|
struct event_constraint *c;
|
2012-07-04 14:00:15 +08:00
|
|
|
int i, wmin, wmax, ret = 0;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct hw_perf_event *hwc;
|
|
|
|
|
|
|
|
bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
|
|
|
|
|
|
|
|
for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
|
2012-07-04 14:00:15 +08:00
|
|
|
c = uncore_get_event_constraint(box, box->event_list[i]);
|
2015-05-21 16:57:13 +08:00
|
|
|
box->event_constraint[i] = c;
|
2012-06-15 14:31:34 +08:00
|
|
|
wmin = min(wmin, c->weight);
|
|
|
|
wmax = max(wmax, c->weight);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fastpath, try to reuse previous register */
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
hwc = &box->event_list[i]->hw;
|
2015-05-21 16:57:13 +08:00
|
|
|
c = box->event_constraint[i];
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
/* never assigned */
|
|
|
|
if (hwc->idx == -1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* constraint still honored */
|
|
|
|
if (!test_bit(hwc->idx, c->idxmsk))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* not already used */
|
|
|
|
if (test_bit(hwc->idx, used_mask))
|
|
|
|
break;
|
|
|
|
|
|
|
|
__set_bit(hwc->idx, used_mask);
|
2012-07-04 14:00:15 +08:00
|
|
|
if (assign)
|
|
|
|
assign[i] = hwc->idx;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
/* slow path */
|
2012-07-04 14:00:15 +08:00
|
|
|
if (i != n)
|
2015-05-21 16:57:13 +08:00
|
|
|
ret = perf_assign_events(box->event_constraint, n,
|
2015-05-21 16:57:17 +08:00
|
|
|
wmin, wmax, n, assign);
|
2012-07-04 14:00:15 +08:00
|
|
|
|
|
|
|
if (!assign || ret) {
|
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
uncore_put_event_constraint(box, box->event_list[i]);
|
|
|
|
}
|
2012-06-15 14:31:34 +08:00
|
|
|
return ret ? -EINVAL : 0;
|
|
|
|
}
|
|
|
|
|
2018-05-04 02:25:12 +08:00
|
|
|
void uncore_pmu_event_start(struct perf_event *event, int flags)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
int idx = event->hw.idx;
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
|
2012-06-15 14:31:34 +08:00
|
|
|
return;
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
/*
|
|
|
|
* Free running counter is read-only and always active.
|
|
|
|
* Use the current counter value as start point.
|
|
|
|
* There is no overflow interrupt for free running counter.
|
|
|
|
* Use hrtimer to periodically poll the counter to avoid overflow.
|
|
|
|
*/
|
|
|
|
if (uncore_pmc_freerunning(event->hw.idx)) {
|
|
|
|
list_add_tail(&event->active_entry, &box->active_list);
|
|
|
|
local64_set(&event->hw.prev_count,
|
|
|
|
uncore_read_counter(box, event));
|
|
|
|
if (box->n_active++ == 0)
|
|
|
|
uncore_pmu_start_hrtimer(box);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
2012-06-15 14:31:34 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
event->hw.state = 0;
|
|
|
|
box->events[idx] = event;
|
|
|
|
box->n_active++;
|
|
|
|
__set_bit(idx, box->active_mask);
|
|
|
|
|
|
|
|
local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
|
|
|
|
uncore_enable_event(box, event);
|
|
|
|
|
perf/x86/uncore: Fix event group support
The events in the same group don't start or stop simultaneously.
Here is the ftrace when enabling event group for uncore_iio_0:
# perf stat -e "{uncore_iio_0/event=0x1/,uncore_iio_0/event=0xe/}"
<idle>-0 [000] d.h. 8959.064832: read_msr: a41, value
b2b0b030 //Read counter reg of IIO unit0 counter0
<idle>-0 [000] d.h. 8959.064835: write_msr: a48, value
400001 //Write Ctrl reg of IIO unit0 counter0 to enable
counter0. <------ Although counter0 is enabled, Unit Ctrl is still
freezed. Nothing will count. We are still good here.
<idle>-0 [000] d.h. 8959.064836: read_msr: a40, value
30100 //Read Unit Ctrl reg of IIO unit0
<idle>-0 [000] d.h. 8959.064838: write_msr: a40, value
30000 //Write Unit Ctrl reg of IIO unit0 to enable all
counters in the unit by clear Freeze bit <------Unit0 is un-freezed.
Counter0 has been enabled. Now it starts counting. But counter1 has not
been enabled yet. The issue starts here.
<idle>-0 [000] d.h. 8959.064846: read_msr: a42, value 0
//Read counter reg of IIO unit0 counter1
<idle>-0 [000] d.h. 8959.064847: write_msr: a49, value
40000e //Write Ctrl reg of IIO unit0 counter1 to enable
counter1. <------ Now, counter1 just starts to count. Counter0 has
been running for a while.
Current code un-freezes the Unit Ctrl right after the first counter is
enabled. The subsequent group events always loses some counter values.
Implement pmu_enable and pmu_disable support for uncore, which can help
to batch hardware accesses.
No one uses uncore_enable_box and uncore_disable_box. Remove them.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-drivers-review@eclists.intel.com
Cc: linux-perf@eclists.intel.com
Fixes: 087bfbb03269 ("perf/x86: Add generic Intel uncore PMU support")
Link: https://lkml.kernel.org/r/1572014593-31591-1-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-10-25 22:43:13 +08:00
|
|
|
if (box->n_active == 1)
|
2012-06-15 14:31:34 +08:00
|
|
|
uncore_pmu_start_hrtimer(box);
|
|
|
|
}
|
|
|
|
|
2018-05-04 02:25:12 +08:00
|
|
|
void uncore_pmu_event_stop(struct perf_event *event, int flags)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
/* Cannot disable free running counter which is read-only */
|
|
|
|
if (uncore_pmc_freerunning(hwc->idx)) {
|
|
|
|
list_del(&event->active_entry);
|
|
|
|
if (--box->n_active == 0)
|
|
|
|
uncore_pmu_cancel_hrtimer(box);
|
|
|
|
uncore_perf_event_update(box, event);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
|
|
|
|
uncore_disable_event(box, event);
|
|
|
|
box->n_active--;
|
|
|
|
box->events[hwc->idx] = NULL;
|
|
|
|
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
|
|
|
hwc->state |= PERF_HES_STOPPED;
|
|
|
|
|
perf/x86/uncore: Fix event group support
The events in the same group don't start or stop simultaneously.
Here is the ftrace when enabling event group for uncore_iio_0:
# perf stat -e "{uncore_iio_0/event=0x1/,uncore_iio_0/event=0xe/}"
<idle>-0 [000] d.h. 8959.064832: read_msr: a41, value
b2b0b030 //Read counter reg of IIO unit0 counter0
<idle>-0 [000] d.h. 8959.064835: write_msr: a48, value
400001 //Write Ctrl reg of IIO unit0 counter0 to enable
counter0. <------ Although counter0 is enabled, Unit Ctrl is still
freezed. Nothing will count. We are still good here.
<idle>-0 [000] d.h. 8959.064836: read_msr: a40, value
30100 //Read Unit Ctrl reg of IIO unit0
<idle>-0 [000] d.h. 8959.064838: write_msr: a40, value
30000 //Write Unit Ctrl reg of IIO unit0 to enable all
counters in the unit by clear Freeze bit <------Unit0 is un-freezed.
Counter0 has been enabled. Now it starts counting. But counter1 has not
been enabled yet. The issue starts here.
<idle>-0 [000] d.h. 8959.064846: read_msr: a42, value 0
//Read counter reg of IIO unit0 counter1
<idle>-0 [000] d.h. 8959.064847: write_msr: a49, value
40000e //Write Ctrl reg of IIO unit0 counter1 to enable
counter1. <------ Now, counter1 just starts to count. Counter0 has
been running for a while.
Current code un-freezes the Unit Ctrl right after the first counter is
enabled. The subsequent group events always loses some counter values.
Implement pmu_enable and pmu_disable support for uncore, which can help
to batch hardware accesses.
No one uses uncore_enable_box and uncore_disable_box. Remove them.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-drivers-review@eclists.intel.com
Cc: linux-perf@eclists.intel.com
Fixes: 087bfbb03269 ("perf/x86: Add generic Intel uncore PMU support")
Link: https://lkml.kernel.org/r/1572014593-31591-1-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-10-25 22:43:13 +08:00
|
|
|
if (box->n_active == 0)
|
2012-06-15 14:31:34 +08:00
|
|
|
uncore_pmu_cancel_hrtimer(box);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
|
|
|
/*
|
|
|
|
* Drain the remaining delta count out of a event
|
|
|
|
* that we are disabling:
|
|
|
|
*/
|
|
|
|
uncore_perf_event_update(box, event);
|
|
|
|
hwc->state |= PERF_HES_UPTODATE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-04 02:25:12 +08:00
|
|
|
int uncore_pmu_event_add(struct perf_event *event, int flags)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
int assign[UNCORE_PMC_IDX_MAX];
|
|
|
|
int i, n, ret;
|
|
|
|
|
|
|
|
if (!box)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
/*
|
|
|
|
* The free funning counter is assigned in event_init().
|
|
|
|
* The free running counter event and free running counter
|
|
|
|
* are 1:1 mapped. It doesn't need to be tracked in event_list.
|
|
|
|
*/
|
|
|
|
if (uncore_pmc_freerunning(hwc->idx)) {
|
|
|
|
if (flags & PERF_EF_START)
|
|
|
|
uncore_pmu_event_start(event, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
ret = n = uncore_collect_events(box, event, false);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
|
|
|
if (!(flags & PERF_EF_START))
|
|
|
|
hwc->state |= PERF_HES_ARCH;
|
|
|
|
|
|
|
|
ret = uncore_assign_events(box, assign, n);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* save events moving to new counters */
|
|
|
|
for (i = 0; i < box->n_events; i++) {
|
|
|
|
event = box->event_list[i];
|
|
|
|
hwc = &event->hw;
|
|
|
|
|
|
|
|
if (hwc->idx == assign[i] &&
|
|
|
|
hwc->last_tag == box->tags[assign[i]])
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Ensure we don't accidentally enable a stopped
|
|
|
|
* counter simply because we rescheduled.
|
|
|
|
*/
|
|
|
|
if (hwc->state & PERF_HES_STOPPED)
|
|
|
|
hwc->state |= PERF_HES_ARCH;
|
|
|
|
|
|
|
|
uncore_pmu_event_stop(event, PERF_EF_UPDATE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reprogram moved events into new counters */
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
event = box->event_list[i];
|
|
|
|
hwc = &event->hw;
|
|
|
|
|
|
|
|
if (hwc->idx != assign[i] ||
|
|
|
|
hwc->last_tag != box->tags[assign[i]])
|
|
|
|
uncore_assign_hw_event(box, event, assign[i]);
|
|
|
|
else if (i < box->n_events)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (hwc->state & PERF_HES_ARCH)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
uncore_pmu_event_start(event, 0);
|
|
|
|
}
|
|
|
|
box->n_events = n;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-04 02:25:12 +08:00
|
|
|
void uncore_pmu_event_del(struct perf_event *event, int flags)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
uncore_pmu_event_stop(event, PERF_EF_UPDATE);
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
/*
|
|
|
|
* The event for free running counter is not tracked by event_list.
|
|
|
|
* It doesn't need to force event->hw.idx = -1 to reassign the counter.
|
|
|
|
* Because the event and the free running counter are 1:1 mapped.
|
|
|
|
*/
|
|
|
|
if (uncore_pmc_freerunning(event->hw.idx))
|
|
|
|
return;
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
for (i = 0; i < box->n_events; i++) {
|
|
|
|
if (event == box->event_list[i]) {
|
2012-07-04 14:00:15 +08:00
|
|
|
uncore_put_event_constraint(box, event);
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
for (++i; i < box->n_events; i++)
|
2012-06-15 14:31:34 +08:00
|
|
|
box->event_list[i - 1] = box->event_list[i];
|
|
|
|
|
|
|
|
--box->n_events;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
event->hw.idx = -1;
|
|
|
|
event->hw.last_tag = ~0ULL;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_pmu_event_read(struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
uncore_perf_event_update(box, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* validation ensures the group can be loaded onto the
|
|
|
|
* PMU if it was the only group available.
|
|
|
|
*/
|
|
|
|
static int uncore_validate_group(struct intel_uncore_pmu *pmu,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct perf_event *leader = event->group_leader;
|
|
|
|
struct intel_uncore_box *fake_box;
|
|
|
|
int ret = -EINVAL, n;
|
|
|
|
|
2018-05-04 02:25:10 +08:00
|
|
|
/* The free running counter is always active. */
|
|
|
|
if (uncore_pmc_freerunning(event->hw.idx))
|
|
|
|
return 0;
|
|
|
|
|
2013-09-17 14:48:13 +08:00
|
|
|
fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!fake_box)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
fake_box->pmu = pmu;
|
|
|
|
/*
|
|
|
|
* the event is not yet connected with its
|
|
|
|
* siblings therefore we must first collect
|
|
|
|
* existing siblings, then add the new event
|
|
|
|
* before we can simulate the scheduling
|
|
|
|
*/
|
|
|
|
n = uncore_collect_events(fake_box, leader, true);
|
|
|
|
if (n < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fake_box->n_events = n;
|
|
|
|
n = uncore_collect_events(fake_box, event, false);
|
|
|
|
if (n < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fake_box->n_events = n;
|
|
|
|
|
2012-07-04 14:00:15 +08:00
|
|
|
ret = uncore_assign_events(fake_box, NULL, n);
|
2012-06-15 14:31:34 +08:00
|
|
|
out:
|
|
|
|
kfree(fake_box);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-04-16 19:51:06 +08:00
|
|
|
static int uncore_pmu_event_init(struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct intel_uncore_box *box;
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (event->attr.type != event->pmu->type)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
pmu = uncore_event_to_pmu(event);
|
|
|
|
/* no device found for this pmu */
|
|
|
|
if (pmu->func_id < 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* Sampling not supported yet */
|
|
|
|
if (hwc->sample_period)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Place all uncore events for a particular physical package
|
|
|
|
* onto a single cpu
|
|
|
|
*/
|
|
|
|
if (event->cpu < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
box = uncore_pmu_to_box(pmu, event->cpu);
|
|
|
|
if (!box || box->cpu < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
event->cpu = box->cpu;
|
2016-02-23 06:19:14 +08:00
|
|
|
event->pmu_private = box;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-08-18 04:55:07 +08:00
|
|
|
event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
|
|
|
|
|
2012-07-04 14:00:15 +08:00
|
|
|
event->hw.idx = -1;
|
|
|
|
event->hw.last_tag = ~0ULL;
|
|
|
|
event->hw.extra_reg.idx = EXTRA_REG_NONE;
|
2012-08-06 13:11:21 +08:00
|
|
|
event->hw.branch_reg.idx = EXTRA_REG_NONE;
|
2012-07-04 14:00:15 +08:00
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
if (event->attr.config == UNCORE_FIXED_EVENT) {
|
|
|
|
/* no fixed counter */
|
|
|
|
if (!pmu->type->fixed_ctl)
|
|
|
|
return -EINVAL;
|
|
|
|
/*
|
|
|
|
* if there is only one fixed counter, only the first pmu
|
|
|
|
* can access the fixed counter
|
|
|
|
*/
|
|
|
|
if (pmu->type->single_fixed && pmu->pmu_idx > 0)
|
|
|
|
return -EINVAL;
|
2013-09-10 03:53:50 +08:00
|
|
|
|
|
|
|
/* fixed counters have event field hardcoded to zero */
|
|
|
|
hwc->config = 0ULL;
|
2018-05-04 02:25:10 +08:00
|
|
|
} else if (is_freerunning_event(event)) {
|
perf/x86/intel/uncore: Fix client IMC events return huge result
The client IMC bandwidth events currently return very large values:
$ perf stat -e uncore_imc/data_reads/ -e uncore_imc/data_writes/ -I 10000 -a
10.000117222 34,788.76 MiB uncore_imc/data_reads/
10.000117222 8.26 MiB uncore_imc/data_writes/
20.000374584 34,842.89 MiB uncore_imc/data_reads/
20.000374584 10.45 MiB uncore_imc/data_writes/
30.000633299 37,965.29 MiB uncore_imc/data_reads/
30.000633299 323.62 MiB uncore_imc/data_writes/
40.000891548 41,012.88 MiB uncore_imc/data_reads/
40.000891548 6.98 MiB uncore_imc/data_writes/
50.001142480 1,125,899,906,621,494.75 MiB uncore_imc/data_reads/
50.001142480 6.97 MiB uncore_imc/data_writes/
The client IMC events are freerunning counters. They still use the
old event encoding format (0x1 for data_read and 0x2 for data write).
The counter bit width is calculated by common code, which assume that
the standard encoding format is used for the freerunning counters.
Error bit width information is calculated.
The patch intends to convert the old client IMC event encoding to the
standard encoding format.
Current common code uses event->attr.config which directly copy from
user space. We should not implicitly modify it for a converted event.
The event->hw.config is used to replace the event->attr.config in
common code.
For client IMC events, the event->attr.config is used to calculate a
converted event with standard encoding format in the custom
event_init(). The converted event is stored in event->hw.config.
For other events of freerunning counters, they already use the standard
encoding format. The same value as event->attr.config is assigned to
event->hw.config in common event_init().
Reported-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Jin Yao <yao.jin@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: stable@kernel.org # v4.18+
Fixes: 9aae1780e7e8 ("perf/x86/intel/uncore: Clean up client IMC uncore")
Link: https://lkml.kernel.org/r/20190227165729.1861-1-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-02-28 00:57:29 +08:00
|
|
|
hwc->config = event->attr.config;
|
2018-05-04 02:25:10 +08:00
|
|
|
if (!check_valid_freerunning_event(box, event))
|
|
|
|
return -EINVAL;
|
|
|
|
event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
|
|
|
|
/*
|
|
|
|
* The free running counter event and free running counter
|
|
|
|
* are always 1:1 mapped.
|
|
|
|
* The free running counter is always active.
|
|
|
|
* Assign the free running counter here.
|
|
|
|
*/
|
|
|
|
event->hw.event_base = uncore_freerunning_counter(box, event);
|
2012-06-15 14:31:34 +08:00
|
|
|
} else {
|
2016-08-17 04:09:50 +08:00
|
|
|
hwc->config = event->attr.config &
|
|
|
|
(pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
|
2012-07-04 14:00:15 +08:00
|
|
|
if (pmu->type->ops->hw_config) {
|
|
|
|
ret = pmu->type->ops->hw_config(box, event);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (event->group_leader != event)
|
|
|
|
ret = uncore_validate_group(pmu, event);
|
|
|
|
else
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
perf/x86/uncore: Fix event group support
The events in the same group don't start or stop simultaneously.
Here is the ftrace when enabling event group for uncore_iio_0:
# perf stat -e "{uncore_iio_0/event=0x1/,uncore_iio_0/event=0xe/}"
<idle>-0 [000] d.h. 8959.064832: read_msr: a41, value
b2b0b030 //Read counter reg of IIO unit0 counter0
<idle>-0 [000] d.h. 8959.064835: write_msr: a48, value
400001 //Write Ctrl reg of IIO unit0 counter0 to enable
counter0. <------ Although counter0 is enabled, Unit Ctrl is still
freezed. Nothing will count. We are still good here.
<idle>-0 [000] d.h. 8959.064836: read_msr: a40, value
30100 //Read Unit Ctrl reg of IIO unit0
<idle>-0 [000] d.h. 8959.064838: write_msr: a40, value
30000 //Write Unit Ctrl reg of IIO unit0 to enable all
counters in the unit by clear Freeze bit <------Unit0 is un-freezed.
Counter0 has been enabled. Now it starts counting. But counter1 has not
been enabled yet. The issue starts here.
<idle>-0 [000] d.h. 8959.064846: read_msr: a42, value 0
//Read counter reg of IIO unit0 counter1
<idle>-0 [000] d.h. 8959.064847: write_msr: a49, value
40000e //Write Ctrl reg of IIO unit0 counter1 to enable
counter1. <------ Now, counter1 just starts to count. Counter0 has
been running for a while.
Current code un-freezes the Unit Ctrl right after the first counter is
enabled. The subsequent group events always loses some counter values.
Implement pmu_enable and pmu_disable support for uncore, which can help
to batch hardware accesses.
No one uses uncore_enable_box and uncore_disable_box. Remove them.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-drivers-review@eclists.intel.com
Cc: linux-perf@eclists.intel.com
Fixes: 087bfbb03269 ("perf/x86: Add generic Intel uncore PMU support")
Link: https://lkml.kernel.org/r/1572014593-31591-1-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-10-25 22:43:13 +08:00
|
|
|
static void uncore_pmu_enable(struct pmu *pmu)
|
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *uncore_pmu;
|
|
|
|
struct intel_uncore_box *box;
|
|
|
|
|
|
|
|
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
|
|
|
|
if (!uncore_pmu)
|
|
|
|
return;
|
|
|
|
|
|
|
|
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
|
|
|
|
if (!box)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (uncore_pmu->type->ops->enable_box)
|
|
|
|
uncore_pmu->type->ops->enable_box(box);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uncore_pmu_disable(struct pmu *pmu)
|
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *uncore_pmu;
|
|
|
|
struct intel_uncore_box *box;
|
|
|
|
|
|
|
|
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
|
|
|
|
if (!uncore_pmu)
|
|
|
|
return;
|
|
|
|
|
|
|
|
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
|
|
|
|
if (!box)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (uncore_pmu->type->ops->disable_box)
|
|
|
|
uncore_pmu->type->ops->disable_box(box);
|
|
|
|
}
|
|
|
|
|
2012-09-10 15:53:49 +08:00
|
|
|
static ssize_t uncore_get_attr_cpumask(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2014-09-30 21:48:22 +08:00
|
|
|
return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
|
2012-09-10 15:53:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
|
|
|
|
|
|
|
|
static struct attribute *uncore_pmu_attrs[] = {
|
|
|
|
&dev_attr_cpumask.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2017-07-20 19:30:32 +08:00
|
|
|
static const struct attribute_group uncore_pmu_attr_group = {
|
2012-09-10 15:53:49 +08:00
|
|
|
.attrs = uncore_pmu_attrs,
|
|
|
|
};
|
|
|
|
|
2021-03-18 01:59:34 +08:00
|
|
|
static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
|
|
|
|
{
|
|
|
|
struct intel_uncore_type *type = pmu->type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No uncore block name in discovery table.
|
|
|
|
* Use uncore_type_&typeid_&boxid as name.
|
|
|
|
*/
|
|
|
|
if (!type->name) {
|
|
|
|
if (type->num_boxes == 1)
|
|
|
|
sprintf(pmu->name, "uncore_type_%u", type->type_id);
|
|
|
|
else {
|
|
|
|
sprintf(pmu->name, "uncore_type_%u_%d",
|
|
|
|
type->type_id, type->box_ids[pmu->pmu_idx]);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type->num_boxes == 1) {
|
|
|
|
if (strlen(type->name) > 0)
|
|
|
|
sprintf(pmu->name, "uncore_%s", type->name);
|
|
|
|
else
|
|
|
|
sprintf(pmu->name, "uncore");
|
|
|
|
} else
|
|
|
|
sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-08-30 01:20:58 +08:00
|
|
|
static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-02-11 23:20:08 +08:00
|
|
|
if (!pmu->type->pmu) {
|
|
|
|
pmu->pmu = (struct pmu) {
|
|
|
|
.attr_groups = pmu->type->attr_groups,
|
|
|
|
.task_ctx_nr = perf_invalid_context,
|
perf/x86/uncore: Fix event group support
The events in the same group don't start or stop simultaneously.
Here is the ftrace when enabling event group for uncore_iio_0:
# perf stat -e "{uncore_iio_0/event=0x1/,uncore_iio_0/event=0xe/}"
<idle>-0 [000] d.h. 8959.064832: read_msr: a41, value
b2b0b030 //Read counter reg of IIO unit0 counter0
<idle>-0 [000] d.h. 8959.064835: write_msr: a48, value
400001 //Write Ctrl reg of IIO unit0 counter0 to enable
counter0. <------ Although counter0 is enabled, Unit Ctrl is still
freezed. Nothing will count. We are still good here.
<idle>-0 [000] d.h. 8959.064836: read_msr: a40, value
30100 //Read Unit Ctrl reg of IIO unit0
<idle>-0 [000] d.h. 8959.064838: write_msr: a40, value
30000 //Write Unit Ctrl reg of IIO unit0 to enable all
counters in the unit by clear Freeze bit <------Unit0 is un-freezed.
Counter0 has been enabled. Now it starts counting. But counter1 has not
been enabled yet. The issue starts here.
<idle>-0 [000] d.h. 8959.064846: read_msr: a42, value 0
//Read counter reg of IIO unit0 counter1
<idle>-0 [000] d.h. 8959.064847: write_msr: a49, value
40000e //Write Ctrl reg of IIO unit0 counter1 to enable
counter1. <------ Now, counter1 just starts to count. Counter0 has
been running for a while.
Current code un-freezes the Unit Ctrl right after the first counter is
enabled. The subsequent group events always loses some counter values.
Implement pmu_enable and pmu_disable support for uncore, which can help
to batch hardware accesses.
No one uses uncore_enable_box and uncore_disable_box. Remove them.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-drivers-review@eclists.intel.com
Cc: linux-perf@eclists.intel.com
Fixes: 087bfbb03269 ("perf/x86: Add generic Intel uncore PMU support")
Link: https://lkml.kernel.org/r/1572014593-31591-1-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-10-25 22:43:13 +08:00
|
|
|
.pmu_enable = uncore_pmu_enable,
|
|
|
|
.pmu_disable = uncore_pmu_disable,
|
2014-02-11 23:20:08 +08:00
|
|
|
.event_init = uncore_pmu_event_init,
|
|
|
|
.add = uncore_pmu_event_add,
|
|
|
|
.del = uncore_pmu_event_del,
|
|
|
|
.start = uncore_pmu_event_start,
|
|
|
|
.stop = uncore_pmu_event_stop,
|
|
|
|
.read = uncore_pmu_event_read,
|
2016-12-23 09:17:40 +08:00
|
|
|
.module = THIS_MODULE,
|
2019-01-10 21:53:33 +08:00
|
|
|
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
|
2020-06-01 16:35:41 +08:00
|
|
|
.attr_update = pmu->type->attr_update,
|
2014-02-11 23:20:08 +08:00
|
|
|
};
|
|
|
|
} else {
|
|
|
|
pmu->pmu = *pmu->type->pmu;
|
|
|
|
pmu->pmu.attr_groups = pmu->type->attr_groups;
|
2020-06-01 16:35:41 +08:00
|
|
|
pmu->pmu.attr_update = pmu->type->attr_update;
|
2014-02-11 23:20:08 +08:00
|
|
|
}
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2021-03-18 01:59:34 +08:00
|
|
|
uncore_get_pmu_name(pmu);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (!ret)
|
|
|
|
pmu->registered = true;
|
2012-06-15 14:31:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
|
|
|
|
{
|
|
|
|
if (!pmu->registered)
|
|
|
|
return;
|
|
|
|
perf_pmu_unregister(&pmu->pmu);
|
|
|
|
pmu->registered = false;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
|
|
|
|
{
|
2019-05-14 01:59:02 +08:00
|
|
|
int die;
|
2016-02-23 06:19:16 +08:00
|
|
|
|
2020-06-01 16:35:42 +08:00
|
|
|
for (die = 0; die < uncore_max_dies(); die++)
|
2019-05-14 01:59:02 +08:00
|
|
|
kfree(pmu->boxes[die]);
|
2016-02-23 06:19:16 +08:00
|
|
|
kfree(pmu->boxes);
|
|
|
|
}
|
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
static void uncore_type_exit(struct intel_uncore_type *type)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_pmu *pmu = type->pmus;
|
2012-06-15 14:31:34 +08:00
|
|
|
int i;
|
|
|
|
|
2020-06-01 16:35:41 +08:00
|
|
|
if (type->cleanup_mapping)
|
|
|
|
type->cleanup_mapping(type);
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
if (pmu) {
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
|
uncore_pmu_unregister(pmu);
|
|
|
|
uncore_free_boxes(pmu);
|
2016-02-23 06:19:09 +08:00
|
|
|
}
|
2016-02-23 06:19:09 +08:00
|
|
|
kfree(type->pmus);
|
|
|
|
type->pmus = NULL;
|
|
|
|
}
|
2021-03-18 01:59:34 +08:00
|
|
|
if (type->box_ids) {
|
|
|
|
kfree(type->box_ids);
|
|
|
|
type->box_ids = NULL;
|
|
|
|
}
|
2012-09-10 15:53:49 +08:00
|
|
|
kfree(type->events_group);
|
|
|
|
type->events_group = NULL;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
static void uncore_types_exit(struct intel_uncore_type **types)
|
2012-06-15 14:31:36 +08:00
|
|
|
{
|
2016-02-23 06:19:12 +08:00
|
|
|
for (; *types; types++)
|
|
|
|
uncore_type_exit(*types);
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *pmus;
|
2016-02-23 06:19:16 +08:00
|
|
|
size_t size;
|
2012-06-15 14:31:34 +08:00
|
|
|
int i, j;
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!pmus)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-06-01 16:35:42 +08:00
|
|
|
size = uncore_max_dies() * sizeof(struct intel_uncore_box *);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
for (i = 0; i < type->num_boxes; i++) {
|
2016-02-23 06:19:16 +08:00
|
|
|
pmus[i].func_id = setid ? i : -1;
|
|
|
|
pmus[i].pmu_idx = i;
|
|
|
|
pmus[i].type = type;
|
|
|
|
pmus[i].boxes = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!pmus[i].boxes)
|
2017-10-10 01:26:55 +08:00
|
|
|
goto err;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
type->pmus = pmus;
|
|
|
|
type->unconstrainted = (struct event_constraint)
|
|
|
|
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
|
|
|
|
0, type->num_counters, 0, 0);
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
if (type->event_descs) {
|
2018-06-07 22:57:20 +08:00
|
|
|
struct {
|
|
|
|
struct attribute_group group;
|
|
|
|
struct attribute *attrs[];
|
|
|
|
} *attr_group;
|
2016-02-23 06:19:16 +08:00
|
|
|
for (i = 0; type->event_descs[i].attr.attr.name; i++);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2018-06-07 22:57:20 +08:00
|
|
|
attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
|
|
|
|
GFP_KERNEL);
|
2013-04-30 18:02:33 +08:00
|
|
|
if (!attr_group)
|
2017-10-10 01:26:55 +08:00
|
|
|
goto err;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2018-06-07 22:57:20 +08:00
|
|
|
attr_group->group.name = "events";
|
|
|
|
attr_group->group.attrs = attr_group->attrs;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
for (j = 0; j < i; j++)
|
2018-06-07 22:57:20 +08:00
|
|
|
attr_group->attrs[j] = &type->event_descs[j].attr.attr;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2018-06-07 22:57:20 +08:00
|
|
|
type->events_group = &attr_group->group;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2012-09-10 15:53:49 +08:00
|
|
|
type->pmu_group = &uncore_pmu_attr_group;
|
2017-10-10 01:26:55 +08:00
|
|
|
|
2020-06-01 16:35:41 +08:00
|
|
|
if (type->set_mapping)
|
|
|
|
type->set_mapping(type);
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
return 0;
|
2017-10-10 01:26:55 +08:00
|
|
|
|
|
|
|
err:
|
|
|
|
for (i = 0; i < type->num_boxes; i++)
|
|
|
|
kfree(pmus[i].boxes);
|
|
|
|
kfree(pmus);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static int __init
|
|
|
|
uncore_types_init(struct intel_uncore_type **types, bool setid)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
int ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
for (; *types; types++) {
|
|
|
|
ret = uncore_type_init(*types, setid);
|
2012-06-15 14:31:34 +08:00
|
|
|
if (ret)
|
2016-02-23 06:19:09 +08:00
|
|
|
return ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-14 22:34:15 +08:00
|
|
|
/*
|
|
|
|
* Get the die information of a PCI device.
|
|
|
|
* @pdev: The PCI device.
|
|
|
|
* @die: The die id which the device maps to.
|
|
|
|
*/
|
2021-01-08 23:35:48 +08:00
|
|
|
static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die)
|
2020-09-14 22:34:15 +08:00
|
|
|
{
|
2021-01-08 23:35:48 +08:00
|
|
|
*die = uncore_pcibus_to_dieid(pdev->bus);
|
2020-09-14 22:34:15 +08:00
|
|
|
if (*die < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-09-14 22:34:16 +08:00
|
|
|
|
2021-03-18 01:59:36 +08:00
|
|
|
static struct intel_uncore_pmu *
|
|
|
|
uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct intel_uncore_type **types = uncore_pci_uncores;
|
|
|
|
struct intel_uncore_type *type;
|
|
|
|
u64 box_ctl;
|
|
|
|
int i, die;
|
|
|
|
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
for (die = 0; die < __uncore_max_dies; die++) {
|
|
|
|
for (i = 0; i < type->num_boxes; i++) {
|
|
|
|
if (!type->box_ctls[die])
|
|
|
|
continue;
|
|
|
|
box_ctl = type->box_ctls[die] + type->pci_offsets[i];
|
|
|
|
if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) &&
|
|
|
|
pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) &&
|
|
|
|
pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl))
|
|
|
|
return &type->pmus[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-09-14 22:34:16 +08:00
|
|
|
/*
|
|
|
|
* Find the PMU of a PCI device.
|
|
|
|
* @pdev: The PCI device.
|
|
|
|
* @ids: The ID table of the available PCI devices with a PMU.
|
2021-03-18 01:59:36 +08:00
|
|
|
* If NULL, search the whole uncore_pci_uncores.
|
2020-09-14 22:34:16 +08:00
|
|
|
*/
|
|
|
|
static struct intel_uncore_pmu *
|
|
|
|
uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids)
|
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *pmu = NULL;
|
|
|
|
struct intel_uncore_type *type;
|
|
|
|
kernel_ulong_t data;
|
|
|
|
unsigned int devfn;
|
|
|
|
|
2021-03-18 01:59:36 +08:00
|
|
|
if (!ids)
|
|
|
|
return uncore_pci_find_dev_pmu_from_types(pdev);
|
|
|
|
|
2020-09-14 22:34:16 +08:00
|
|
|
while (ids && ids->vendor) {
|
|
|
|
if ((ids->vendor == pdev->vendor) &&
|
|
|
|
(ids->device == pdev->device)) {
|
|
|
|
data = ids->driver_data;
|
|
|
|
devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data),
|
|
|
|
UNCORE_PCI_DEV_FUNC(data));
|
|
|
|
if (devfn == pdev->devfn) {
|
|
|
|
type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)];
|
|
|
|
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ids++;
|
|
|
|
}
|
|
|
|
return pmu;
|
|
|
|
}
|
|
|
|
|
2020-09-14 22:34:17 +08:00
|
|
|
/*
|
|
|
|
* Register the PMU for a PCI device
|
|
|
|
* @pdev: The PCI device.
|
|
|
|
* @type: The corresponding PMU type of the device.
|
|
|
|
* @pmu: The corresponding PMU of the device.
|
|
|
|
* @die: The die id which the device maps to.
|
|
|
|
*/
|
|
|
|
static int uncore_pci_pmu_register(struct pci_dev *pdev,
|
|
|
|
struct intel_uncore_type *type,
|
|
|
|
struct intel_uncore_pmu *pmu,
|
2021-01-08 23:35:48 +08:00
|
|
|
int die)
|
2020-09-14 22:34:17 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
box = uncore_alloc_box(type, NUMA_NO_NODE);
|
|
|
|
if (!box)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (pmu->func_id < 0)
|
|
|
|
pmu->func_id = pdev->devfn;
|
|
|
|
else
|
|
|
|
WARN_ON_ONCE(pmu->func_id != pdev->devfn);
|
|
|
|
|
|
|
|
atomic_inc(&box->refcnt);
|
|
|
|
box->dieid = die;
|
|
|
|
box->pci_dev = pdev;
|
|
|
|
box->pmu = pmu;
|
|
|
|
uncore_box_init(box);
|
|
|
|
|
|
|
|
pmu->boxes[die] = box;
|
|
|
|
if (atomic_inc_return(&pmu->activeboxes) > 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* First active box registers the pmu */
|
|
|
|
ret = uncore_pmu_register(pmu);
|
|
|
|
if (ret) {
|
|
|
|
pmu->boxes[die] = NULL;
|
|
|
|
uncore_box_exit(box);
|
|
|
|
kfree(box);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:36 +08:00
|
|
|
/*
|
|
|
|
* add a pci uncore device
|
|
|
|
*/
|
2013-08-07 14:17:23 +08:00
|
|
|
static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
2012-06-15 14:31:36 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_type *type;
|
2016-05-16 14:18:24 +08:00
|
|
|
struct intel_uncore_pmu *pmu = NULL;
|
2021-01-08 23:35:48 +08:00
|
|
|
int die, ret;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
ret = uncore_pci_get_dev_die_info(pdev, &die);
|
2020-09-14 22:34:15 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-02-23 06:19:16 +08:00
|
|
|
|
2013-08-07 14:17:23 +08:00
|
|
|
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
|
2014-07-30 15:22:12 +08:00
|
|
|
int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
|
2016-02-23 06:19:16 +08:00
|
|
|
|
2019-05-14 01:59:02 +08:00
|
|
|
uncore_extra_pci_dev[die].dev[idx] = pdev;
|
2013-08-07 14:17:23 +08:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
|
2016-05-16 14:18:24 +08:00
|
|
|
|
2012-06-15 14:31:36 +08:00
|
|
|
/*
|
2016-05-16 14:18:24 +08:00
|
|
|
* Some platforms, e.g. Knights Landing, use a common PCI device ID
|
|
|
|
* for multiple instances of an uncore PMU device type. We should check
|
|
|
|
* PCI slot and func to indicate the uncore box.
|
2012-06-15 14:31:36 +08:00
|
|
|
*/
|
2016-05-16 14:18:24 +08:00
|
|
|
if (id->driver_data & ~0xffff) {
|
|
|
|
struct pci_driver *pci_drv = pdev->driver;
|
2020-09-14 22:34:16 +08:00
|
|
|
|
|
|
|
pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table);
|
2016-05-16 14:18:24 +08:00
|
|
|
if (pmu == NULL)
|
|
|
|
return -ENODEV;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* for performance monitoring unit with multiple boxes,
|
|
|
|
* each box has a different function id.
|
|
|
|
*/
|
|
|
|
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
|
2016-02-23 06:19:12 +08:00
|
|
|
}
|
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
ret = uncore_pci_pmu_register(pdev, type, pmu, die);
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2020-09-14 22:34:17 +08:00
|
|
|
pci_set_drvdata(pdev, pmu->boxes[die]);
|
2016-02-23 06:19:09 +08:00
|
|
|
|
|
|
|
return ret;
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
|
2020-09-14 22:34:18 +08:00
|
|
|
/*
|
|
|
|
* Unregister the PMU of a PCI device
|
|
|
|
* @pmu: The corresponding PMU is unregistered.
|
|
|
|
* @die: The die id which the device maps to.
|
|
|
|
*/
|
2021-01-08 23:35:48 +08:00
|
|
|
static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, int die)
|
2020-09-14 22:34:18 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = pmu->boxes[die];
|
|
|
|
|
|
|
|
pmu->boxes[die] = NULL;
|
|
|
|
if (atomic_dec_return(&pmu->activeboxes) == 0)
|
|
|
|
uncore_pmu_unregister(pmu);
|
|
|
|
uncore_box_exit(box);
|
|
|
|
kfree(box);
|
|
|
|
}
|
|
|
|
|
2012-06-21 00:39:27 +08:00
|
|
|
static void uncore_pci_remove(struct pci_dev *pdev)
|
2012-06-15 14:31:36 +08:00
|
|
|
{
|
2016-06-01 05:25:27 +08:00
|
|
|
struct intel_uncore_box *box;
|
2013-08-07 14:17:23 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
2021-01-08 23:35:48 +08:00
|
|
|
int i, die;
|
2013-08-07 14:17:23 +08:00
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
if (uncore_pci_get_dev_die_info(pdev, &die))
|
2020-09-14 22:34:18 +08:00
|
|
|
return;
|
2016-02-23 06:19:16 +08:00
|
|
|
|
2013-08-07 14:17:23 +08:00
|
|
|
box = pci_get_drvdata(pdev);
|
|
|
|
if (!box) {
|
|
|
|
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
|
2019-05-14 01:59:02 +08:00
|
|
|
if (uncore_extra_pci_dev[die].dev[i] == pdev) {
|
|
|
|
uncore_extra_pci_dev[die].dev[i] = NULL;
|
2013-08-07 14:17:23 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
|
|
|
|
return;
|
|
|
|
}
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2013-08-07 14:17:23 +08:00
|
|
|
pmu = box->pmu;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2013-04-16 19:51:07 +08:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
2020-09-14 22:34:18 +08:00
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
uncore_pci_pmu_unregister(pmu, die);
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
static int uncore_bus_notify(struct notifier_block *nb,
|
2021-03-18 01:59:35 +08:00
|
|
|
unsigned long action, void *data,
|
|
|
|
const struct pci_device_id *ids)
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
{
|
|
|
|
struct device *dev = data;
|
|
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
|
struct intel_uncore_pmu *pmu;
|
2021-01-08 23:35:48 +08:00
|
|
|
int die;
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
|
|
|
|
/* Unregister the PMU when the device is going to be deleted. */
|
|
|
|
if (action != BUS_NOTIFY_DEL_DEVICE)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2021-03-18 01:59:35 +08:00
|
|
|
pmu = uncore_pci_find_dev_pmu(pdev, ids);
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
if (!pmu)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
if (uncore_pci_get_dev_die_info(pdev, &die))
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
uncore_pci_pmu_unregister(pmu, die);
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
2021-03-18 01:59:35 +08:00
|
|
|
static int uncore_pci_sub_bus_notify(struct notifier_block *nb,
|
|
|
|
unsigned long action, void *data)
|
|
|
|
{
|
|
|
|
return uncore_bus_notify(nb, action, data,
|
|
|
|
uncore_pci_sub_driver->id_table);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block uncore_pci_sub_notifier = {
|
|
|
|
.notifier_call = uncore_pci_sub_bus_notify,
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void uncore_pci_sub_driver_init(void)
|
|
|
|
{
|
|
|
|
const struct pci_device_id *ids = uncore_pci_sub_driver->id_table;
|
|
|
|
struct intel_uncore_type *type;
|
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct pci_dev *pci_sub_dev;
|
|
|
|
bool notify = false;
|
|
|
|
unsigned int devfn;
|
2021-01-08 23:35:48 +08:00
|
|
|
int die;
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
|
|
|
|
while (ids && ids->vendor) {
|
|
|
|
pci_sub_dev = NULL;
|
|
|
|
type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)];
|
|
|
|
/*
|
|
|
|
* Search the available device, and register the
|
|
|
|
* corresponding PMU.
|
|
|
|
*/
|
|
|
|
while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
|
|
|
|
ids->device, pci_sub_dev))) {
|
|
|
|
devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
|
|
|
|
UNCORE_PCI_DEV_FUNC(ids->driver_data));
|
|
|
|
if (devfn != pci_sub_dev->devfn)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
|
|
|
|
if (!pmu)
|
|
|
|
continue;
|
|
|
|
|
2021-01-08 23:35:48 +08:00
|
|
|
if (uncore_pci_get_dev_die_info(pci_sub_dev, &die))
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu,
|
2021-01-08 23:35:48 +08:00
|
|
|
die))
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
notify = true;
|
|
|
|
}
|
|
|
|
ids++;
|
|
|
|
}
|
|
|
|
|
2021-03-18 01:59:35 +08:00
|
|
|
if (notify && bus_register_notifier(&pci_bus_type, &uncore_pci_sub_notifier))
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
notify = false;
|
|
|
|
|
|
|
|
if (!notify)
|
|
|
|
uncore_pci_sub_driver = NULL;
|
|
|
|
}
|
|
|
|
|
2021-03-18 01:59:36 +08:00
|
|
|
static int uncore_pci_bus_notify(struct notifier_block *nb,
|
|
|
|
unsigned long action, void *data)
|
|
|
|
{
|
|
|
|
return uncore_bus_notify(nb, action, data, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block uncore_pci_notifier = {
|
|
|
|
.notifier_call = uncore_pci_bus_notify,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void uncore_pci_pmus_register(void)
|
|
|
|
{
|
|
|
|
struct intel_uncore_type **types = uncore_pci_uncores;
|
|
|
|
struct intel_uncore_type *type;
|
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
u64 box_ctl;
|
|
|
|
int i, die;
|
|
|
|
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
for (die = 0; die < __uncore_max_dies; die++) {
|
|
|
|
for (i = 0; i < type->num_boxes; i++) {
|
|
|
|
if (!type->box_ctls[die])
|
|
|
|
continue;
|
|
|
|
box_ctl = type->box_ctls[die] + type->pci_offsets[i];
|
|
|
|
pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl),
|
|
|
|
UNCORE_DISCOVERY_PCI_BUS(box_ctl),
|
|
|
|
UNCORE_DISCOVERY_PCI_DEVFN(box_ctl));
|
|
|
|
if (!pdev)
|
|
|
|
continue;
|
|
|
|
pmu = &type->pmus[i];
|
|
|
|
|
|
|
|
uncore_pci_pmu_register(pdev, type, pmu, die);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bus_register_notifier(&pci_bus_type, &uncore_pci_notifier);
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:36 +08:00
|
|
|
static int __init uncore_pci_init(void)
|
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
size_t size;
|
2012-06-15 14:31:36 +08:00
|
|
|
int ret;
|
|
|
|
|
2020-06-01 16:35:42 +08:00
|
|
|
size = uncore_max_dies() * sizeof(struct pci_extra_dev);
|
2016-02-23 06:19:16 +08:00
|
|
|
uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!uncore_extra_pci_dev) {
|
|
|
|
ret = -ENOMEM;
|
2016-02-23 06:19:09 +08:00
|
|
|
goto err;
|
2016-02-23 06:19:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = uncore_types_init(uncore_pci_uncores, false);
|
|
|
|
if (ret)
|
|
|
|
goto errtype;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2021-03-18 01:59:36 +08:00
|
|
|
if (uncore_pci_driver) {
|
|
|
|
uncore_pci_driver->probe = uncore_pci_probe;
|
|
|
|
uncore_pci_driver->remove = uncore_pci_remove;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2021-03-18 01:59:36 +08:00
|
|
|
ret = pci_register_driver(uncore_pci_driver);
|
|
|
|
if (ret)
|
|
|
|
goto errtype;
|
|
|
|
} else
|
|
|
|
uncore_pci_pmus_register();
|
2016-02-23 06:19:09 +08:00
|
|
|
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
if (uncore_pci_sub_driver)
|
|
|
|
uncore_pci_sub_driver_init();
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
pcidrv_registered = true;
|
|
|
|
return 0;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
errtype:
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_types_exit(uncore_pci_uncores);
|
2016-02-23 06:19:16 +08:00
|
|
|
kfree(uncore_extra_pci_dev);
|
|
|
|
uncore_extra_pci_dev = NULL;
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_free_pcibus_map();
|
2016-02-23 06:19:16 +08:00
|
|
|
err:
|
|
|
|
uncore_pci_uncores = empty_uncore;
|
2012-06-15 14:31:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
static void uncore_pci_exit(void)
|
2012-06-15 14:31:36 +08:00
|
|
|
{
|
|
|
|
if (pcidrv_registered) {
|
|
|
|
pcidrv_registered = false;
|
perf/x86/intel/uncore: Generic support for the PCI sub driver
Some uncore counters may be located in the configuration space of a PCI
device, which already has a bonded driver. Currently, the uncore driver
cannot register a PCI uncore PMU for these counters, because, to
register a PCI uncore PMU, the uncore driver must be bond to the device.
However, one device can only have one bonded driver.
Add an uncore PCI sub driver to support such kind of devices.
The sub driver doesn't own the device. In initialization, the sub
driver searches the device via pci_get_device(), and register the
corresponding PMU for the device. In the meantime, the sub driver
registers a PCI bus notifier, which is used to notify the sub driver
once the device is removed. The sub driver can unregister the PMU
accordingly.
The sub driver only searches the devices defined in its id table. The
id table varies on different platforms, which will be implemented in the
following platform-specific patch.
Suggested-by: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1600094060-82746-6-git-send-email-kan.liang@linux.intel.com
2020-09-14 22:34:19 +08:00
|
|
|
if (uncore_pci_sub_driver)
|
2021-03-18 01:59:35 +08:00
|
|
|
bus_unregister_notifier(&pci_bus_type, &uncore_pci_sub_notifier);
|
2021-03-18 01:59:36 +08:00
|
|
|
if (uncore_pci_driver)
|
|
|
|
pci_unregister_driver(uncore_pci_driver);
|
|
|
|
else
|
|
|
|
bus_unregister_notifier(&pci_bus_type, &uncore_pci_notifier);
|
2014-07-30 15:22:12 +08:00
|
|
|
uncore_types_exit(uncore_pci_uncores);
|
2016-02-23 06:19:16 +08:00
|
|
|
kfree(uncore_extra_pci_dev);
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_free_pcibus_map();
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
|
|
|
|
int new_cpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:12 +08:00
|
|
|
struct intel_uncore_pmu *pmu = type->pmus;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct intel_uncore_box *box;
|
2019-05-14 01:59:02 +08:00
|
|
|
int i, die;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2019-05-14 01:59:02 +08:00
|
|
|
die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
|
2016-02-23 06:19:12 +08:00
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
2019-05-14 01:59:02 +08:00
|
|
|
box = pmu->boxes[die];
|
2016-02-23 06:19:12 +08:00
|
|
|
if (!box)
|
|
|
|
continue;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
if (old_cpu < 0) {
|
|
|
|
WARN_ON_ONCE(box->cpu != -1);
|
|
|
|
box->cpu = new_cpu;
|
|
|
|
continue;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
2016-02-23 06:19:12 +08:00
|
|
|
|
|
|
|
WARN_ON_ONCE(box->cpu != old_cpu);
|
|
|
|
box->cpu = -1;
|
|
|
|
if (new_cpu < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
uncore_pmu_cancel_hrtimer(box);
|
|
|
|
perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
|
|
|
|
box->cpu = new_cpu;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_change_context(struct intel_uncore_type **uncores,
|
|
|
|
int old_cpu, int new_cpu)
|
|
|
|
{
|
|
|
|
for (; *uncores; uncores++)
|
|
|
|
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
|
|
|
|
}
|
|
|
|
|
2019-05-01 08:53:45 +08:00
|
|
|
static void uncore_box_unref(struct intel_uncore_type **types, int id)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2019-05-01 08:53:45 +08:00
|
|
|
struct intel_uncore_type *type;
|
2017-02-01 06:58:40 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct intel_uncore_box *box;
|
2019-05-01 08:53:45 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
pmu = type->pmus;
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
|
box = pmu->boxes[id];
|
|
|
|
if (box && atomic_dec_return(&box->refcnt) == 0)
|
|
|
|
uncore_box_exit(box);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uncore_event_cpu_offline(unsigned int cpu)
|
|
|
|
{
|
|
|
|
int die, target;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/* Check if exiting cpu is used for collecting uncore events */
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
2017-02-01 06:58:40 +08:00
|
|
|
goto unref;
|
2016-02-23 06:19:16 +08:00
|
|
|
/* Find a new cpu to collect uncore events */
|
2019-05-14 01:58:57 +08:00
|
|
|
target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/* Migrate uncore events to the new target */
|
|
|
|
if (target < nr_cpu_ids)
|
2012-06-15 14:31:34 +08:00
|
|
|
cpumask_set_cpu(target, &uncore_cpu_mask);
|
2016-02-23 06:19:16 +08:00
|
|
|
else
|
|
|
|
target = -1;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
uncore_change_context(uncore_msr_uncores, cpu, target);
|
2019-05-01 08:53:46 +08:00
|
|
|
uncore_change_context(uncore_mmio_uncores, cpu, target);
|
2014-07-30 15:22:12 +08:00
|
|
|
uncore_change_context(uncore_pci_uncores, cpu, target);
|
2017-02-01 06:58:40 +08:00
|
|
|
|
|
|
|
unref:
|
|
|
|
/* Clear the references */
|
2019-05-14 01:59:02 +08:00
|
|
|
die = topology_logical_die_id(cpu);
|
2019-05-01 08:53:45 +08:00
|
|
|
uncore_box_unref(uncore_msr_uncores, die);
|
2019-05-01 08:53:46 +08:00
|
|
|
uncore_box_unref(uncore_mmio_uncores, die);
|
2016-07-14 01:16:12 +08:00
|
|
|
return 0;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2017-02-01 06:58:40 +08:00
|
|
|
static int allocate_boxes(struct intel_uncore_type **types,
|
2019-05-14 01:59:02 +08:00
|
|
|
unsigned int die, unsigned int cpu)
|
2017-02-01 06:58:40 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box, *tmp;
|
|
|
|
struct intel_uncore_type *type;
|
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
LIST_HEAD(allocated);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Try to allocate all required boxes */
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
pmu = type->pmus;
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
2019-05-14 01:59:02 +08:00
|
|
|
if (pmu->boxes[die])
|
2017-02-01 06:58:40 +08:00
|
|
|
continue;
|
|
|
|
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
|
|
|
if (!box)
|
|
|
|
goto cleanup;
|
|
|
|
box->pmu = pmu;
|
2019-05-14 01:59:02 +08:00
|
|
|
box->dieid = die;
|
2017-02-01 06:58:40 +08:00
|
|
|
list_add(&box->active_list, &allocated);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Install them in the pmus */
|
|
|
|
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
|
|
|
list_del_init(&box->active_list);
|
2019-05-14 01:59:02 +08:00
|
|
|
box->pmu->boxes[die] = box;
|
2017-02-01 06:58:40 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
|
|
|
list_del_init(&box->active_list);
|
|
|
|
kfree(box);
|
|
|
|
}
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-05-01 08:53:45 +08:00
|
|
|
static int uncore_box_ref(struct intel_uncore_type **types,
|
|
|
|
int id, unsigned int cpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2019-05-01 08:53:45 +08:00
|
|
|
struct intel_uncore_type *type;
|
2017-02-01 06:58:40 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct intel_uncore_box *box;
|
2019-05-01 08:53:45 +08:00
|
|
|
int i, ret;
|
2017-02-01 06:58:40 +08:00
|
|
|
|
2019-05-01 08:53:45 +08:00
|
|
|
ret = allocate_boxes(types, id, cpu);
|
2017-02-01 06:58:40 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
pmu = type->pmus;
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
2019-05-01 08:53:45 +08:00
|
|
|
box = pmu->boxes[id];
|
2017-06-30 03:09:26 +08:00
|
|
|
if (box && atomic_inc_return(&box->refcnt) == 1)
|
2017-02-01 06:58:40 +08:00
|
|
|
uncore_box_init(box);
|
|
|
|
}
|
|
|
|
}
|
2019-05-01 08:53:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uncore_event_cpu_online(unsigned int cpu)
|
|
|
|
{
|
2019-05-01 08:53:46 +08:00
|
|
|
int die, target, msr_ret, mmio_ret;
|
2019-05-01 08:53:45 +08:00
|
|
|
|
|
|
|
die = topology_logical_die_id(cpu);
|
2019-05-01 08:53:46 +08:00
|
|
|
msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
|
|
|
|
mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
|
|
|
|
if (msr_ret && mmio_ret)
|
|
|
|
return -ENOMEM;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/*
|
|
|
|
* Check if there is an online cpu in the package
|
|
|
|
* which collects uncore events already.
|
|
|
|
*/
|
2019-05-14 01:58:57 +08:00
|
|
|
target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
|
2016-02-23 06:19:16 +08:00
|
|
|
if (target < nr_cpu_ids)
|
2016-07-14 01:16:12 +08:00
|
|
|
return 0;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
cpumask_set_cpu(cpu, &uncore_cpu_mask);
|
|
|
|
|
2019-05-01 08:53:46 +08:00
|
|
|
if (!msr_ret)
|
|
|
|
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
|
|
|
if (!mmio_ret)
|
|
|
|
uncore_change_context(uncore_mmio_uncores, -1, cpu);
|
2014-07-30 15:22:12 +08:00
|
|
|
uncore_change_context(uncore_pci_uncores, -1, cpu);
|
2016-07-14 01:16:12 +08:00
|
|
|
return 0;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
static int __init type_pmu_register(struct intel_uncore_type *type)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:09 +08:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < type->num_boxes; i++) {
|
|
|
|
ret = uncore_pmu_register(&type->pmus[i]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init uncore_msr_pmus_register(void)
|
|
|
|
{
|
|
|
|
struct intel_uncore_type **types = uncore_msr_uncores;
|
|
|
|
int ret;
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
for (; *types; types++) {
|
|
|
|
ret = type_pmu_register(*types);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init uncore_cpu_init(void)
|
|
|
|
{
|
2014-07-30 15:22:15 +08:00
|
|
|
int ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
ret = uncore_types_init(uncore_msr_uncores, true);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = uncore_msr_pmus_register();
|
2012-06-15 14:31:34 +08:00
|
|
|
if (ret)
|
2016-02-23 06:19:09 +08:00
|
|
|
goto err;
|
2012-06-15 14:31:34 +08:00
|
|
|
return 0;
|
2016-02-23 06:19:09 +08:00
|
|
|
err:
|
|
|
|
uncore_types_exit(uncore_msr_uncores);
|
|
|
|
uncore_msr_uncores = empty_uncore;
|
|
|
|
return ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2019-05-01 08:53:46 +08:00
|
|
|
static int __init uncore_mmio_init(void)
|
|
|
|
{
|
|
|
|
struct intel_uncore_type **types = uncore_mmio_uncores;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = uncore_types_init(types, true);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
for (; *types; types++) {
|
|
|
|
ret = type_pmu_register(*types);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
uncore_types_exit(uncore_mmio_uncores);
|
|
|
|
uncore_mmio_uncores = empty_uncore;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
struct intel_uncore_init_fun {
|
|
|
|
void (*cpu_init)(void);
|
|
|
|
int (*pci_init)(void);
|
2019-05-01 08:53:46 +08:00
|
|
|
void (*mmio_init)(void);
|
2016-03-20 16:33:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
|
|
|
|
.cpu_init = nhm_uncore_cpu_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
|
|
|
|
.cpu_init = snb_uncore_cpu_init,
|
|
|
|
.pci_init = snb_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
|
|
|
|
.cpu_init = snb_uncore_cpu_init,
|
|
|
|
.pci_init = ivb_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
|
|
|
|
.cpu_init = snb_uncore_cpu_init,
|
|
|
|
.pci_init = hsw_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
|
|
|
|
.cpu_init = snb_uncore_cpu_init,
|
|
|
|
.pci_init = bdw_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
|
|
|
|
.cpu_init = snbep_uncore_cpu_init,
|
|
|
|
.pci_init = snbep_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
|
|
|
|
.cpu_init = nhmex_uncore_cpu_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
|
|
|
|
.cpu_init = ivbep_uncore_cpu_init,
|
|
|
|
.pci_init = ivbep_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
|
|
|
|
.cpu_init = hswep_uncore_cpu_init,
|
|
|
|
.pci_init = hswep_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
|
|
|
|
.cpu_init = bdx_uncore_cpu_init,
|
|
|
|
.pci_init = bdx_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
|
|
|
|
.cpu_init = knl_uncore_cpu_init,
|
|
|
|
.pci_init = knl_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
|
2016-06-29 22:01:51 +08:00
|
|
|
.cpu_init = skl_uncore_cpu_init,
|
2016-03-20 16:33:36 +08:00
|
|
|
.pci_init = skl_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
2016-08-17 04:09:50 +08:00
|
|
|
static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
|
|
|
|
.cpu_init = skx_uncore_cpu_init,
|
|
|
|
.pci_init = skx_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
2019-04-03 03:45:09 +08:00
|
|
|
static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
|
|
|
|
.cpu_init = icl_uncore_cpu_init,
|
|
|
|
.pci_init = skl_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
2020-02-07 00:15:27 +08:00
|
|
|
static const struct intel_uncore_init_fun tgl_uncore_init __initconst = {
|
2020-09-25 21:49:03 +08:00
|
|
|
.cpu_init = tgl_uncore_cpu_init,
|
2020-02-07 00:15:27 +08:00
|
|
|
.mmio_init = tgl_uncore_mmio_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = {
|
2020-09-25 21:49:03 +08:00
|
|
|
.cpu_init = tgl_uncore_cpu_init,
|
2020-02-07 00:15:27 +08:00
|
|
|
.mmio_init = tgl_l_uncore_mmio_init,
|
|
|
|
};
|
|
|
|
|
2020-10-19 23:35:28 +08:00
|
|
|
static const struct intel_uncore_init_fun rkl_uncore_init __initconst = {
|
|
|
|
.cpu_init = tgl_uncore_cpu_init,
|
|
|
|
.pci_init = skl_uncore_pci_init,
|
|
|
|
};
|
|
|
|
|
2021-04-12 22:31:02 +08:00
|
|
|
static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
|
|
|
|
.cpu_init = adl_uncore_cpu_init,
|
|
|
|
.mmio_init = tgl_uncore_mmio_init,
|
|
|
|
};
|
|
|
|
|
perf/x86/intel/uncore: Add Ice Lake server uncore support
The uncore subsystem in Ice Lake server is similar to previous server.
There are some differences in config register encoding and pci device
IDs. The uncore PMON units in Ice Lake server include Ubox, Chabox, IIO,
IRP, M2PCIE, PCU, M2M, PCIE3 and IMC.
- For CHA, filter 1 register has been removed. The filter 0 register can
be used by and of CHA events to be filterd by Thread/Core-ID. To do
so, the control register's tid_en bit must be set to 1.
- For IIO, there are some changes on event constraints. The MSR address
and MSR offsets among counters are also changed.
- For IRP, the MSR address and MSR offsets among counters are changed.
- For M2PCIE, the counters are accessed by MSR now. Add new MSR address
and MSR offsets. Change event constraints.
- To determine the number of CHAs, have to read CAPID6(Low) and CAPID7
(High) now.
- For M2M, update the PCICFG address and Device ID.
- For UPI, update the PCICFG address, Device ID and counter address.
- For M3UPI, update the PCICFG address, Device ID, counter address and
event constraints.
- For IMC, update the formular to calculate MMIO BAR address, which is
MMIO_BASE + specific MEM_BAR offset.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/1585842411-150452-1-git-send-email-kan.liang@linux.intel.com
2020-04-02 23:46:51 +08:00
|
|
|
static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
|
|
|
|
.cpu_init = icx_uncore_cpu_init,
|
|
|
|
.pci_init = icx_uncore_pci_init,
|
|
|
|
.mmio_init = icx_uncore_mmio_init,
|
|
|
|
};
|
|
|
|
|
perf/x86/intel/uncore: Add uncore support for Snow Ridge server
The uncore subsystem on Snow Ridge is similar as previous SKX server.
The uncore units on Snow Ridge include Ubox, Chabox, IIO, IRP, M2PCIE,
PCU, M2M, PCIE3 and IMC.
- The config register encoding and pci device IDs are changed.
- For CHA, the umask_ext and filter_tid fields are changed.
- For IIO, the ch_mask and fc_mask fields are changed.
- For M2M, the mask_ext field is changed.
- Add new PCIe3 unit for PCIe3 root port which provides the interface
between PCIe devices, plugged into the PCIe port, and the components
(in M2IOSF).
- IMC can only be accessed via MMIO on Snow Ridge now. Current common
code doesn't support it yet. IMC will be supported in following
patches.
- There are 9 free running counters for IIO CLOCKS and bandwidth In.
- Full uncore event list is not published yet. Event constrain is not
included in this patch. It will be added later separately.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@kernel.org
Cc: eranian@google.com
Link: https://lkml.kernel.org/r/1556672028-119221-3-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-05-01 08:53:44 +08:00
|
|
|
static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
|
|
|
|
.cpu_init = snr_uncore_cpu_init,
|
|
|
|
.pci_init = snr_uncore_pci_init,
|
2019-05-01 08:53:48 +08:00
|
|
|
.mmio_init = snr_uncore_mmio_init,
|
perf/x86/intel/uncore: Add uncore support for Snow Ridge server
The uncore subsystem on Snow Ridge is similar as previous SKX server.
The uncore units on Snow Ridge include Ubox, Chabox, IIO, IRP, M2PCIE,
PCU, M2M, PCIE3 and IMC.
- The config register encoding and pci device IDs are changed.
- For CHA, the umask_ext and filter_tid fields are changed.
- For IIO, the ch_mask and fc_mask fields are changed.
- For M2M, the mask_ext field is changed.
- Add new PCIe3 unit for PCIe3 root port which provides the interface
between PCIe devices, plugged into the PCIe port, and the components
(in M2IOSF).
- IMC can only be accessed via MMIO on Snow Ridge now. Current common
code doesn't support it yet. IMC will be supported in following
patches.
- There are 9 free running counters for IIO CLOCKS and bandwidth In.
- Full uncore event list is not published yet. Event constrain is not
included in this patch. It will be added later separately.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@kernel.org
Cc: eranian@google.com
Link: https://lkml.kernel.org/r/1556672028-119221-3-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-05-01 08:53:44 +08:00
|
|
|
};
|
|
|
|
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
|
2021-03-18 01:59:34 +08:00
|
|
|
.cpu_init = intel_uncore_generic_uncore_cpu_init,
|
2021-03-18 01:59:36 +08:00
|
|
|
.pci_init = intel_uncore_generic_uncore_pci_init,
|
2021-03-18 01:59:37 +08:00
|
|
|
.mmio_init = intel_uncore_generic_uncore_mmio_init,
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
};
|
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
2020-03-20 21:13:49 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init),
|
2020-05-20 03:18:25 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init),
|
2020-03-20 21:13:49 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init),
|
perf/x86/intel/uncore: Add Ice Lake server uncore support
The uncore subsystem in Ice Lake server is similar to previous server.
There are some differences in config register encoding and pci device
IDs. The uncore PMON units in Ice Lake server include Ubox, Chabox, IIO,
IRP, M2PCIE, PCU, M2M, PCIE3 and IMC.
- For CHA, filter 1 register has been removed. The filter 0 register can
be used by and of CHA events to be filterd by Thread/Core-ID. To do
so, the control register's tid_en bit must be set to 1.
- For IIO, there are some changes on event constraints. The MSR address
and MSR offsets among counters are also changed.
- For IRP, the MSR address and MSR offsets among counters are changed.
- For M2PCIE, the counters are accessed by MSR now. Add new MSR address
and MSR offsets. Change event constraints.
- To determine the number of CHAs, have to read CAPID6(Low) and CAPID7
(High) now.
- For M2M, update the PCICFG address and Device ID.
- For UPI, update the PCICFG address, Device ID and counter address.
- For M3UPI, update the PCICFG address, Device ID, counter address and
event constraints.
- For IMC, update the formular to calculate MMIO BAR address, which is
MMIO_BASE + specific MEM_BAR offset.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/1585842411-150452-1-git-send-email-kan.liang@linux.intel.com
2020-04-02 23:46:51 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init),
|
2020-03-25 22:20:44 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
|
2020-10-19 23:35:28 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
|
2021-04-12 22:31:02 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init),
|
2020-03-20 21:13:49 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
|
2016-03-20 16:33:36 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
static int __init intel_uncore_init(void)
|
|
|
|
{
|
2016-03-20 16:33:36 +08:00
|
|
|
const struct x86_cpu_id *id;
|
|
|
|
struct intel_uncore_init_fun *uncore_init;
|
2019-05-01 08:53:46 +08:00
|
|
|
int pret = 0, cret = 0, mret = 0, ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-03-29 23:41:55 +08:00
|
|
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
2012-08-21 17:08:37 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2020-06-01 16:35:42 +08:00
|
|
|
__uncore_max_dies =
|
|
|
|
topology_max_packages() * topology_max_die_per_package();
|
2016-02-23 06:19:16 +08:00
|
|
|
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
id = x86_match_cpu(intel_uncore_match);
|
|
|
|
if (!id) {
|
|
|
|
if (!uncore_no_discover && intel_uncore_has_discovery_tables())
|
|
|
|
uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
|
|
|
|
else
|
|
|
|
return -ENODEV;
|
|
|
|
} else
|
|
|
|
uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
|
|
|
|
|
2016-03-20 16:33:36 +08:00
|
|
|
if (uncore_init->pci_init) {
|
|
|
|
pret = uncore_init->pci_init();
|
|
|
|
if (!pret)
|
|
|
|
pret = uncore_pci_init();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uncore_init->cpu_init) {
|
|
|
|
uncore_init->cpu_init();
|
|
|
|
cret = uncore_cpu_init();
|
|
|
|
}
|
2016-02-23 06:19:17 +08:00
|
|
|
|
2019-05-01 08:53:46 +08:00
|
|
|
if (uncore_init->mmio_init) {
|
|
|
|
uncore_init->mmio_init();
|
|
|
|
mret = uncore_mmio_init();
|
|
|
|
}
|
|
|
|
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
if (cret && pret && mret) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto free_discovery;
|
|
|
|
}
|
2016-02-23 06:19:16 +08:00
|
|
|
|
2017-02-01 06:58:40 +08:00
|
|
|
/* Install hotplug callbacks to setup the targets for each package */
|
|
|
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
|
|
|
"perf/x86/intel/uncore:online",
|
|
|
|
uncore_event_cpu_online,
|
|
|
|
uncore_event_cpu_offline);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2012-06-15 14:31:34 +08:00
|
|
|
return 0;
|
2016-02-23 06:19:09 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
err:
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_types_exit(uncore_msr_uncores);
|
2019-05-01 08:53:46 +08:00
|
|
|
uncore_types_exit(uncore_mmio_uncores);
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_pci_exit();
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
free_discovery:
|
|
|
|
intel_uncore_clear_discovery_tables();
|
2012-06-15 14:31:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2016-03-20 16:33:36 +08:00
|
|
|
module_init(intel_uncore_init);
|
|
|
|
|
|
|
|
static void __exit intel_uncore_exit(void)
|
|
|
|
{
|
2017-02-01 06:58:40 +08:00
|
|
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
2016-03-20 16:33:36 +08:00
|
|
|
uncore_types_exit(uncore_msr_uncores);
|
2019-05-01 08:53:46 +08:00
|
|
|
uncore_types_exit(uncore_mmio_uncores);
|
2016-03-20 16:33:36 +08:00
|
|
|
uncore_pci_exit();
|
perf/x86/intel/uncore: Parse uncore discovery tables
A self-describing mechanism for the uncore PerfMon hardware has been
introduced with the latest Intel platforms. By reading through an MMIO
page worth of information, perf can 'discover' all the standard uncore
PerfMon registers in a machine.
The discovery mechanism relies on BIOS's support. With a proper BIOS,
a PCI device with the unique capability ID 0x23 can be found on each
die. Perf can retrieve the information of all available uncore PerfMons
from the device via MMIO. The information is composed of one global
discovery table and several unit discovery tables.
- The global discovery table includes global uncore information of the
die, e.g., the address of the global control register, the offset of
the global status register, the number of uncore units, the offset of
unit discovery tables, etc.
- The unit discovery table includes generic uncore unit information,
e.g., the access type, the counter width, the address of counters,
the address of the counter control, the unit ID, the unit type, etc.
The unit is also called "box" in the code.
Perf can provide basic uncore support based on this information
with the following patches.
To locate the PCI device with the discovery tables, check the generic
PCI ID first. If it doesn't match, go through the entire PCI device tree
and locate the device with the unique capability ID.
The uncore information is similar among dies. To save parsing time and
space, only completely parse and store the discovery tables on the first
die and the first box of each die. The parsed information is stored in
an
RB tree structure, intel_uncore_discovery_type. The size of the stored
discovery tables varies among platforms. It's around 4KB for a Sapphire
Rapids server.
If a BIOS doesn't support the 'discovery' mechanism, the uncore driver
will exit with -ENODEV. There is nothing changed.
Add a module parameter to disable the discovery feature. If a BIOS gets
the discovery tables wrong, users can have an option to disable the
feature. For the current patchset, the uncore driver will exit with
-ENODEV. In the future, it may fall back to the hardcode uncore driver
on a known platform.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-2-git-send-email-kan.liang@linux.intel.com
2021-03-18 01:59:33 +08:00
|
|
|
intel_uncore_clear_discovery_tables();
|
2016-03-20 16:33:36 +08:00
|
|
|
}
|
|
|
|
module_exit(intel_uncore_exit);
|