2016-02-10 17:55:15 +08:00
|
|
|
#include "uncore.h"
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
static struct intel_uncore_type *empty_uncore[] = { NULL, };
|
2014-07-30 15:22:12 +08:00
|
|
|
struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
|
|
|
|
struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
static bool pcidrv_registered;
|
|
|
|
struct pci_driver *uncore_pci_driver;
|
|
|
|
/* pci bus to socket mapping */
|
2015-09-24 20:10:21 +08:00
|
|
|
DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
|
|
|
|
struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
|
2016-02-23 06:19:16 +08:00
|
|
|
struct pci_extra_dev *uncore_extra_pci_dev;
|
|
|
|
static int max_packages;
|
2013-08-07 14:17:23 +08:00
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
/* mask of cpus that collect uncore events */
|
|
|
|
static cpumask_t uncore_cpu_mask;
|
|
|
|
|
|
|
|
/* constraint for the fixed counter */
|
2014-07-30 15:22:12 +08:00
|
|
|
static struct event_constraint uncore_constraint_fixed =
|
2012-06-15 14:31:34 +08:00
|
|
|
EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
|
2014-07-30 15:22:12 +08:00
|
|
|
struct event_constraint uncore_constraint_empty =
|
2012-07-04 14:00:15 +08:00
|
|
|
EVENT_CONSTRAINT(0, 0, 0);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:13 +08:00
|
|
|
static int uncore_pcibus_to_physid(struct pci_bus *bus)
|
2015-09-24 20:10:21 +08:00
|
|
|
{
|
|
|
|
struct pci2phy_map *map;
|
|
|
|
int phys_id = -1;
|
|
|
|
|
|
|
|
raw_spin_lock(&pci2phy_map_lock);
|
|
|
|
list_for_each_entry(map, &pci2phy_map_head, list) {
|
|
|
|
if (map->segment == pci_domain_nr(bus)) {
|
|
|
|
phys_id = map->pbus_to_physid[bus->number];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
raw_spin_unlock(&pci2phy_map_lock);
|
|
|
|
|
|
|
|
return phys_id;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
static void uncore_free_pcibus_map(void)
|
|
|
|
{
|
|
|
|
struct pci2phy_map *map, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
|
|
|
|
list_del(&map->list);
|
|
|
|
kfree(map);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-24 20:10:21 +08:00
|
|
|
struct pci2phy_map *__find_pci2phy_map(int segment)
|
|
|
|
{
|
|
|
|
struct pci2phy_map *map, *alloc = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
lockdep_assert_held(&pci2phy_map_lock);
|
|
|
|
|
|
|
|
lookup:
|
|
|
|
list_for_each_entry(map, &pci2phy_map_head, list) {
|
|
|
|
if (map->segment == segment)
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!alloc) {
|
|
|
|
raw_spin_unlock(&pci2phy_map_lock);
|
|
|
|
alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
|
|
|
|
raw_spin_lock(&pci2phy_map_lock);
|
|
|
|
|
|
|
|
if (!alloc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
goto lookup;
|
|
|
|
}
|
|
|
|
|
|
|
|
map = alloc;
|
|
|
|
alloc = NULL;
|
|
|
|
map->segment = segment;
|
|
|
|
for (i = 0; i < 256; i++)
|
|
|
|
map->pbus_to_physid[i] = -1;
|
|
|
|
list_add_tail(&map->list, &pci2phy_map_head);
|
|
|
|
|
|
|
|
end:
|
|
|
|
kfree(alloc);
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
ssize_t uncore_event_show(struct kobject *kobj,
|
|
|
|
struct kobj_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct uncore_event_desc *event =
|
|
|
|
container_of(attr, struct uncore_event_desc, attr);
|
|
|
|
return sprintf(buf, "%s", event->config);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
2014-02-11 23:20:11 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
return pmu->boxes[topology_logical_package_id(cpu)];
|
2014-02-11 23:20:11 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
2012-07-05 14:32:17 +08:00
|
|
|
{
|
|
|
|
u64 count;
|
|
|
|
|
|
|
|
rdmsrl(event->hw.event_base, count);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* generic get constraint function for shared match/mask registers.
|
|
|
|
*/
|
2014-07-30 15:22:12 +08:00
|
|
|
struct event_constraint *
|
2012-07-05 14:32:17 +08:00
|
|
|
uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct intel_uncore_extra_reg *er;
|
|
|
|
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
|
struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
|
unsigned long flags;
|
|
|
|
bool ok = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reg->alloc can be set due to existing state, so for fake box we
|
|
|
|
* need to ignore this, otherwise we might fail to allocate proper
|
|
|
|
* fake state for this extra reg constraint.
|
|
|
|
*/
|
|
|
|
if (reg1->idx == EXTRA_REG_NONE ||
|
|
|
|
(!uncore_box_is_fake(box) && reg1->alloc))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
er = &box->shared_regs[reg1->idx];
|
|
|
|
raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
|
if (!atomic_read(&er->ref) ||
|
|
|
|
(er->config1 == reg1->config && er->config2 == reg2->config)) {
|
|
|
|
atomic_inc(&er->ref);
|
|
|
|
er->config1 = reg1->config;
|
|
|
|
er->config2 = reg2->config;
|
|
|
|
ok = true;
|
|
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
|
|
|
|
|
if (ok) {
|
|
|
|
if (!uncore_box_is_fake(box))
|
|
|
|
reg1->alloc = 1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
return &uncore_constraint_empty;
|
2012-07-05 14:32:17 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
2012-07-05 14:32:17 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_extra_reg *er;
|
|
|
|
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only put constraint if extra reg was actually allocated. Also
|
|
|
|
* takes care of event which do not use an extra shared reg.
|
|
|
|
*
|
|
|
|
* Also, if this is a fake box we shouldn't touch any event state
|
|
|
|
* (reg->alloc) and we don't care about leaving inconsistent box
|
|
|
|
* state either since it will be thrown out.
|
|
|
|
*/
|
|
|
|
if (uncore_box_is_fake(box) || !reg1->alloc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
er = &box->shared_regs[reg1->idx];
|
|
|
|
atomic_dec(&er->ref);
|
|
|
|
reg1->alloc = 0;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
|
2013-04-16 19:51:06 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_extra_reg *er;
|
|
|
|
unsigned long flags;
|
|
|
|
u64 config;
|
|
|
|
|
|
|
|
er = &box->shared_regs[idx];
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
|
config = er->config;
|
|
|
|
raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_assign_hw_event(struct intel_uncore_box *box,
|
|
|
|
struct perf_event *event, int idx)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
|
|
|
hwc->idx = idx;
|
|
|
|
hwc->last_tag = ++box->tags[idx];
|
|
|
|
|
|
|
|
if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
|
2012-06-15 14:31:36 +08:00
|
|
|
hwc->event_base = uncore_fixed_ctr(box);
|
|
|
|
hwc->config_base = uncore_fixed_ctl(box);
|
2012-06-15 14:31:34 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:36 +08:00
|
|
|
hwc->config_base = uncore_event_ctl(box, hwc->idx);
|
|
|
|
hwc->event_base = uncore_perf_ctr(box, hwc->idx);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
u64 prev_count, new_count, delta;
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
|
|
|
|
shift = 64 - uncore_fixed_ctr_bits(box);
|
|
|
|
else
|
|
|
|
shift = 64 - uncore_perf_ctr_bits(box);
|
|
|
|
|
|
|
|
/* the hrtimer might modify the previous event value */
|
|
|
|
again:
|
|
|
|
prev_count = local64_read(&event->hw.prev_count);
|
|
|
|
new_count = uncore_read_counter(box, event);
|
|
|
|
if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
|
|
|
|
goto again;
|
|
|
|
|
|
|
|
delta = (new_count << shift) - (prev_count << shift);
|
|
|
|
delta >>= shift;
|
|
|
|
|
|
|
|
local64_add(delta, &event->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The overflow interrupt is unavailable for SandyBridge-EP, is broken
|
|
|
|
* for SandyBridge. So we use hrtimer to periodically poll the counter
|
|
|
|
* to avoid overflow.
|
|
|
|
*/
|
|
|
|
static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
|
|
|
|
{
|
|
|
|
struct intel_uncore_box *box;
|
2014-02-11 23:20:13 +08:00
|
|
|
struct perf_event *event;
|
2012-06-15 14:31:34 +08:00
|
|
|
unsigned long flags;
|
|
|
|
int bit;
|
|
|
|
|
|
|
|
box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
|
|
|
|
if (!box->n_active || box->cpu != smp_processor_id())
|
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
/*
|
|
|
|
* disable local interrupt to prevent uncore_pmu_event_start/stop
|
|
|
|
* to interrupt the update process
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2014-02-11 23:20:13 +08:00
|
|
|
/*
|
|
|
|
* handle boxes with an active event list as opposed to active
|
|
|
|
* counters
|
|
|
|
*/
|
|
|
|
list_for_each_entry(event, &box->active_list, active_entry) {
|
|
|
|
uncore_perf_event_update(box, event);
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
|
|
|
|
uncore_perf_event_update(box, box->events[bit]);
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
2014-02-11 23:20:10 +08:00
|
|
|
hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
|
2012-06-15 14:31:34 +08:00
|
|
|
return HRTIMER_RESTART;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2015-04-15 05:09:01 +08:00
|
|
|
hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
|
|
|
|
HRTIMER_MODE_REL_PINNED);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
hrtimer_cancel(&box->hrtimer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
|
|
|
|
{
|
|
|
|
hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
box->hrtimer.function = uncore_pmu_hrtimer;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
|
|
|
|
int node)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:12 +08:00
|
|
|
int i, size, numshared = type->num_shared_regs ;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct intel_uncore_box *box;
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
|
2012-07-04 14:00:15 +08:00
|
|
|
|
2013-09-17 14:48:13 +08:00
|
|
|
box = kzalloc_node(size, GFP_KERNEL, node);
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!box)
|
|
|
|
return NULL;
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
for (i = 0; i < numshared; i++)
|
2012-07-04 14:00:15 +08:00
|
|
|
raw_spin_lock_init(&box->shared_regs[i].lock);
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
uncore_pmu_init_hrtimer(box);
|
|
|
|
box->cpu = -1;
|
2016-02-23 06:19:16 +08:00
|
|
|
box->pci_phys_id = -1;
|
|
|
|
box->pkgid = -1;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2014-02-11 23:20:10 +08:00
|
|
|
/* set default hrtimer timeout */
|
|
|
|
box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2014-02-11 23:20:13 +08:00
|
|
|
INIT_LIST_HEAD(&box->active_list);
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
return box;
|
|
|
|
}
|
|
|
|
|
2014-12-11 04:23:50 +08:00
|
|
|
/*
|
|
|
|
* Using uncore_pmu_event_init pmu event_init callback
|
|
|
|
* as a detection point for uncore events.
|
|
|
|
*/
|
|
|
|
static int uncore_pmu_event_init(struct perf_event *event);
|
|
|
|
|
|
|
|
static bool is_uncore_event(struct perf_event *event)
|
|
|
|
{
|
|
|
|
return event->pmu->event_init == uncore_pmu_event_init;
|
|
|
|
}
|
|
|
|
|
2012-07-05 14:32:17 +08:00
|
|
|
static int
|
2016-02-23 06:19:12 +08:00
|
|
|
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
|
|
|
|
bool dogrp)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct perf_event *event;
|
|
|
|
int n, max_count;
|
|
|
|
|
|
|
|
max_count = box->pmu->type->num_counters;
|
|
|
|
if (box->pmu->type->fixed_ctl)
|
|
|
|
max_count++;
|
|
|
|
|
|
|
|
if (box->n_events >= max_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
n = box->n_events;
|
2014-12-11 04:23:50 +08:00
|
|
|
|
|
|
|
if (is_uncore_event(leader)) {
|
|
|
|
box->event_list[n] = leader;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!dogrp)
|
|
|
|
return n;
|
|
|
|
|
|
|
|
list_for_each_entry(event, &leader->sibling_list, group_entry) {
|
2014-12-11 04:23:50 +08:00
|
|
|
if (!is_uncore_event(event) ||
|
|
|
|
event->state <= PERF_EVENT_STATE_OFF)
|
2012-06-15 14:31:34 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (n >= max_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
box->event_list[n] = event;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct event_constraint *
|
2012-07-05 14:32:17 +08:00
|
|
|
uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2012-07-04 14:00:15 +08:00
|
|
|
struct intel_uncore_type *type = box->pmu->type;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct event_constraint *c;
|
|
|
|
|
2012-07-04 14:00:15 +08:00
|
|
|
if (type->ops->get_constraint) {
|
|
|
|
c = type->ops->get_constraint(box, event);
|
|
|
|
if (c)
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2013-09-10 03:53:50 +08:00
|
|
|
if (event->attr.config == UNCORE_FIXED_EVENT)
|
2014-07-30 15:22:12 +08:00
|
|
|
return &uncore_constraint_fixed;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
if (type->constraints) {
|
|
|
|
for_each_event_constraint(c, type->constraints) {
|
|
|
|
if ((event->hw.config & c->cmask) == c->code)
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &type->unconstrainted;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_put_event_constraint(struct intel_uncore_box *box,
|
|
|
|
struct perf_event *event)
|
2012-07-04 14:00:15 +08:00
|
|
|
{
|
|
|
|
if (box->pmu->type->ops->put_constraint)
|
|
|
|
box->pmu->type->ops->put_constraint(box, event);
|
|
|
|
}
|
|
|
|
|
2012-07-05 14:32:17 +08:00
|
|
|
static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
|
2013-05-24 02:07:03 +08:00
|
|
|
struct event_constraint *c;
|
2012-07-04 14:00:15 +08:00
|
|
|
int i, wmin, wmax, ret = 0;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct hw_perf_event *hwc;
|
|
|
|
|
|
|
|
bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
|
|
|
|
|
|
|
|
for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
|
2012-07-04 14:00:15 +08:00
|
|
|
c = uncore_get_event_constraint(box, box->event_list[i]);
|
2015-05-21 16:57:13 +08:00
|
|
|
box->event_constraint[i] = c;
|
2012-06-15 14:31:34 +08:00
|
|
|
wmin = min(wmin, c->weight);
|
|
|
|
wmax = max(wmax, c->weight);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fastpath, try to reuse previous register */
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
hwc = &box->event_list[i]->hw;
|
2015-05-21 16:57:13 +08:00
|
|
|
c = box->event_constraint[i];
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
/* never assigned */
|
|
|
|
if (hwc->idx == -1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* constraint still honored */
|
|
|
|
if (!test_bit(hwc->idx, c->idxmsk))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* not already used */
|
|
|
|
if (test_bit(hwc->idx, used_mask))
|
|
|
|
break;
|
|
|
|
|
|
|
|
__set_bit(hwc->idx, used_mask);
|
2012-07-04 14:00:15 +08:00
|
|
|
if (assign)
|
|
|
|
assign[i] = hwc->idx;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
/* slow path */
|
2012-07-04 14:00:15 +08:00
|
|
|
if (i != n)
|
2015-05-21 16:57:13 +08:00
|
|
|
ret = perf_assign_events(box->event_constraint, n,
|
2015-05-21 16:57:17 +08:00
|
|
|
wmin, wmax, n, assign);
|
2012-07-04 14:00:15 +08:00
|
|
|
|
|
|
|
if (!assign || ret) {
|
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
uncore_put_event_constraint(box, box->event_list[i]);
|
|
|
|
}
|
2012-06-15 14:31:34 +08:00
|
|
|
return ret ? -EINVAL : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uncore_pmu_event_start(struct perf_event *event, int flags)
|
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
int idx = event->hw.idx;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
|
|
|
|
return;
|
|
|
|
|
|
|
|
event->hw.state = 0;
|
|
|
|
box->events[idx] = event;
|
|
|
|
box->n_active++;
|
|
|
|
__set_bit(idx, box->active_mask);
|
|
|
|
|
|
|
|
local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
|
|
|
|
uncore_enable_event(box, event);
|
|
|
|
|
|
|
|
if (box->n_active == 1) {
|
|
|
|
uncore_enable_box(box);
|
|
|
|
uncore_pmu_start_hrtimer(box);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uncore_pmu_event_stop(struct perf_event *event, int flags)
|
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
|
|
|
if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
|
|
|
|
uncore_disable_event(box, event);
|
|
|
|
box->n_active--;
|
|
|
|
box->events[hwc->idx] = NULL;
|
|
|
|
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
|
|
|
hwc->state |= PERF_HES_STOPPED;
|
|
|
|
|
|
|
|
if (box->n_active == 0) {
|
|
|
|
uncore_disable_box(box);
|
|
|
|
uncore_pmu_cancel_hrtimer(box);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
|
|
|
/*
|
|
|
|
* Drain the remaining delta count out of a event
|
|
|
|
* that we are disabling:
|
|
|
|
*/
|
|
|
|
uncore_perf_event_update(box, event);
|
|
|
|
hwc->state |= PERF_HES_UPTODATE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uncore_pmu_event_add(struct perf_event *event, int flags)
|
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
int assign[UNCORE_PMC_IDX_MAX];
|
|
|
|
int i, n, ret;
|
|
|
|
|
|
|
|
if (!box)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
ret = n = uncore_collect_events(box, event, false);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
|
|
|
if (!(flags & PERF_EF_START))
|
|
|
|
hwc->state |= PERF_HES_ARCH;
|
|
|
|
|
|
|
|
ret = uncore_assign_events(box, assign, n);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* save events moving to new counters */
|
|
|
|
for (i = 0; i < box->n_events; i++) {
|
|
|
|
event = box->event_list[i];
|
|
|
|
hwc = &event->hw;
|
|
|
|
|
|
|
|
if (hwc->idx == assign[i] &&
|
|
|
|
hwc->last_tag == box->tags[assign[i]])
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Ensure we don't accidentally enable a stopped
|
|
|
|
* counter simply because we rescheduled.
|
|
|
|
*/
|
|
|
|
if (hwc->state & PERF_HES_STOPPED)
|
|
|
|
hwc->state |= PERF_HES_ARCH;
|
|
|
|
|
|
|
|
uncore_pmu_event_stop(event, PERF_EF_UPDATE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reprogram moved events into new counters */
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
event = box->event_list[i];
|
|
|
|
hwc = &event->hw;
|
|
|
|
|
|
|
|
if (hwc->idx != assign[i] ||
|
|
|
|
hwc->last_tag != box->tags[assign[i]])
|
|
|
|
uncore_assign_hw_event(box, event, assign[i]);
|
|
|
|
else if (i < box->n_events)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (hwc->state & PERF_HES_ARCH)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
uncore_pmu_event_start(event, 0);
|
|
|
|
}
|
|
|
|
box->n_events = n;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uncore_pmu_event_del(struct perf_event *event, int flags)
|
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
uncore_pmu_event_stop(event, PERF_EF_UPDATE);
|
|
|
|
|
|
|
|
for (i = 0; i < box->n_events; i++) {
|
|
|
|
if (event == box->event_list[i]) {
|
2012-07-04 14:00:15 +08:00
|
|
|
uncore_put_event_constraint(box, event);
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
for (++i; i < box->n_events; i++)
|
2012-06-15 14:31:34 +08:00
|
|
|
box->event_list[i - 1] = box->event_list[i];
|
|
|
|
|
|
|
|
--box->n_events;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
event->hw.idx = -1;
|
|
|
|
event->hw.last_tag = ~0ULL;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
void uncore_pmu_event_read(struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
|
|
uncore_perf_event_update(box, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* validation ensures the group can be loaded onto the
|
|
|
|
* PMU if it was the only group available.
|
|
|
|
*/
|
|
|
|
static int uncore_validate_group(struct intel_uncore_pmu *pmu,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct perf_event *leader = event->group_leader;
|
|
|
|
struct intel_uncore_box *fake_box;
|
|
|
|
int ret = -EINVAL, n;
|
|
|
|
|
2013-09-17 14:48:13 +08:00
|
|
|
fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!fake_box)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
fake_box->pmu = pmu;
|
|
|
|
/*
|
|
|
|
* the event is not yet connected with its
|
|
|
|
* siblings therefore we must first collect
|
|
|
|
* existing siblings, then add the new event
|
|
|
|
* before we can simulate the scheduling
|
|
|
|
*/
|
|
|
|
n = uncore_collect_events(fake_box, leader, true);
|
|
|
|
if (n < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fake_box->n_events = n;
|
|
|
|
n = uncore_collect_events(fake_box, event, false);
|
|
|
|
if (n < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fake_box->n_events = n;
|
|
|
|
|
2012-07-04 14:00:15 +08:00
|
|
|
ret = uncore_assign_events(fake_box, NULL, n);
|
2012-06-15 14:31:34 +08:00
|
|
|
out:
|
|
|
|
kfree(fake_box);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-04-16 19:51:06 +08:00
|
|
|
static int uncore_pmu_event_init(struct perf_event *event)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct intel_uncore_box *box;
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (event->attr.type != event->pmu->type)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
pmu = uncore_event_to_pmu(event);
|
|
|
|
/* no device found for this pmu */
|
|
|
|
if (pmu->func_id < 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Uncore PMU does measure at all privilege level all the time.
|
|
|
|
* So it doesn't make sense to specify any exclude bits.
|
|
|
|
*/
|
|
|
|
if (event->attr.exclude_user || event->attr.exclude_kernel ||
|
|
|
|
event->attr.exclude_hv || event->attr.exclude_idle)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Sampling not supported yet */
|
|
|
|
if (hwc->sample_period)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Place all uncore events for a particular physical package
|
|
|
|
* onto a single cpu
|
|
|
|
*/
|
|
|
|
if (event->cpu < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
box = uncore_pmu_to_box(pmu, event->cpu);
|
|
|
|
if (!box || box->cpu < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
event->cpu = box->cpu;
|
2016-02-23 06:19:14 +08:00
|
|
|
event->pmu_private = box;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2012-07-04 14:00:15 +08:00
|
|
|
event->hw.idx = -1;
|
|
|
|
event->hw.last_tag = ~0ULL;
|
|
|
|
event->hw.extra_reg.idx = EXTRA_REG_NONE;
|
2012-08-06 13:11:21 +08:00
|
|
|
event->hw.branch_reg.idx = EXTRA_REG_NONE;
|
2012-07-04 14:00:15 +08:00
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
if (event->attr.config == UNCORE_FIXED_EVENT) {
|
|
|
|
/* no fixed counter */
|
|
|
|
if (!pmu->type->fixed_ctl)
|
|
|
|
return -EINVAL;
|
|
|
|
/*
|
|
|
|
* if there is only one fixed counter, only the first pmu
|
|
|
|
* can access the fixed counter
|
|
|
|
*/
|
|
|
|
if (pmu->type->single_fixed && pmu->pmu_idx > 0)
|
|
|
|
return -EINVAL;
|
2013-09-10 03:53:50 +08:00
|
|
|
|
|
|
|
/* fixed counters have event field hardcoded to zero */
|
|
|
|
hwc->config = 0ULL;
|
2012-06-15 14:31:34 +08:00
|
|
|
} else {
|
|
|
|
hwc->config = event->attr.config & pmu->type->event_mask;
|
2012-07-04 14:00:15 +08:00
|
|
|
if (pmu->type->ops->hw_config) {
|
|
|
|
ret = pmu->type->ops->hw_config(box, event);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (event->group_leader != event)
|
|
|
|
ret = uncore_validate_group(pmu, event);
|
|
|
|
else
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-09-10 15:53:49 +08:00
|
|
|
static ssize_t uncore_get_attr_cpumask(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2014-09-30 21:48:22 +08:00
|
|
|
return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
|
2012-09-10 15:53:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
|
|
|
|
|
|
|
|
static struct attribute *uncore_pmu_attrs[] = {
|
|
|
|
&dev_attr_cpumask.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group uncore_pmu_attr_group = {
|
|
|
|
.attrs = uncore_pmu_attrs,
|
|
|
|
};
|
|
|
|
|
2014-08-30 01:20:58 +08:00
|
|
|
static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-02-11 23:20:08 +08:00
|
|
|
if (!pmu->type->pmu) {
|
|
|
|
pmu->pmu = (struct pmu) {
|
|
|
|
.attr_groups = pmu->type->attr_groups,
|
|
|
|
.task_ctx_nr = perf_invalid_context,
|
|
|
|
.event_init = uncore_pmu_event_init,
|
|
|
|
.add = uncore_pmu_event_add,
|
|
|
|
.del = uncore_pmu_event_del,
|
|
|
|
.start = uncore_pmu_event_start,
|
|
|
|
.stop = uncore_pmu_event_stop,
|
|
|
|
.read = uncore_pmu_event_read,
|
|
|
|
};
|
|
|
|
} else {
|
|
|
|
pmu->pmu = *pmu->type->pmu;
|
|
|
|
pmu->pmu.attr_groups = pmu->type->attr_groups;
|
|
|
|
}
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
if (pmu->type->num_boxes == 1) {
|
|
|
|
if (strlen(pmu->type->name) > 0)
|
|
|
|
sprintf(pmu->name, "uncore_%s", pmu->type->name);
|
|
|
|
else
|
|
|
|
sprintf(pmu->name, "uncore");
|
|
|
|
} else {
|
|
|
|
sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
|
|
|
|
pmu->pmu_idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (!ret)
|
|
|
|
pmu->registered = true;
|
2012-06-15 14:31:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
|
|
|
|
{
|
|
|
|
if (!pmu->registered)
|
|
|
|
return;
|
|
|
|
perf_pmu_unregister(&pmu->pmu);
|
|
|
|
pmu->registered = false;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:17 +08:00
|
|
|
static void __init __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
|
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *pmu = type->pmus;
|
|
|
|
struct intel_uncore_box *box;
|
|
|
|
int i, pkg;
|
|
|
|
|
|
|
|
if (pmu) {
|
|
|
|
pkg = topology_physical_package_id(cpu);
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
|
box = pmu->boxes[pkg];
|
|
|
|
if (box)
|
|
|
|
uncore_box_exit(box);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init uncore_exit_boxes(void *dummy)
|
|
|
|
{
|
|
|
|
struct intel_uncore_type **types;
|
|
|
|
|
|
|
|
for (types = uncore_msr_uncores; *types; types++)
|
|
|
|
__uncore_exit_boxes(*types++, smp_processor_id());
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
|
|
|
|
{
|
|
|
|
int pkg;
|
|
|
|
|
|
|
|
for (pkg = 0; pkg < max_packages; pkg++)
|
|
|
|
kfree(pmu->boxes[pkg]);
|
|
|
|
kfree(pmu->boxes);
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
static void __init uncore_type_exit(struct intel_uncore_type *type)
|
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_pmu *pmu = type->pmus;
|
2012-06-15 14:31:34 +08:00
|
|
|
int i;
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
if (pmu) {
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
|
uncore_pmu_unregister(pmu);
|
|
|
|
uncore_free_boxes(pmu);
|
2016-02-23 06:19:09 +08:00
|
|
|
}
|
2016-02-23 06:19:09 +08:00
|
|
|
kfree(type->pmus);
|
|
|
|
type->pmus = NULL;
|
|
|
|
}
|
2012-09-10 15:53:49 +08:00
|
|
|
kfree(type->events_group);
|
|
|
|
type->events_group = NULL;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2012-08-02 18:55:27 +08:00
|
|
|
static void __init uncore_types_exit(struct intel_uncore_type **types)
|
2012-06-15 14:31:36 +08:00
|
|
|
{
|
2016-02-23 06:19:12 +08:00
|
|
|
for (; *types; types++)
|
|
|
|
uncore_type_exit(*types);
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_pmu *pmus;
|
2013-04-30 18:02:33 +08:00
|
|
|
struct attribute_group *attr_group;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct attribute **attrs;
|
2016-02-23 06:19:16 +08:00
|
|
|
size_t size;
|
2012-06-15 14:31:34 +08:00
|
|
|
int i, j;
|
|
|
|
|
|
|
|
pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
|
|
|
|
if (!pmus)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
size = max_packages * sizeof(struct intel_uncore_box *);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
for (i = 0; i < type->num_boxes; i++) {
|
2016-02-23 06:19:16 +08:00
|
|
|
pmus[i].func_id = setid ? i : -1;
|
|
|
|
pmus[i].pmu_idx = i;
|
|
|
|
pmus[i].type = type;
|
|
|
|
pmus[i].boxes = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!pmus[i].boxes)
|
2016-02-23 06:19:09 +08:00
|
|
|
return -ENOMEM;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
type->pmus = pmus;
|
|
|
|
type->unconstrainted = (struct event_constraint)
|
|
|
|
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
|
|
|
|
0, type->num_counters, 0, 0);
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
if (type->event_descs) {
|
2016-02-23 06:19:16 +08:00
|
|
|
for (i = 0; type->event_descs[i].attr.attr.name; i++);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2013-04-30 18:02:33 +08:00
|
|
|
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
|
|
|
|
sizeof(*attr_group), GFP_KERNEL);
|
|
|
|
if (!attr_group)
|
2016-02-23 06:19:09 +08:00
|
|
|
return -ENOMEM;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2013-04-30 18:02:33 +08:00
|
|
|
attrs = (struct attribute **)(attr_group + 1);
|
|
|
|
attr_group->name = "events";
|
|
|
|
attr_group->attrs = attrs;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
for (j = 0; j < i; j++)
|
|
|
|
attrs[j] = &type->event_descs[j].attr.attr;
|
|
|
|
|
2013-04-30 18:02:33 +08:00
|
|
|
type->events_group = attr_group;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2012-09-10 15:53:49 +08:00
|
|
|
type->pmu_group = &uncore_pmu_attr_group;
|
2012-06-15 14:31:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static int __init
|
|
|
|
uncore_types_init(struct intel_uncore_type **types, bool setid)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
int ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
for (; *types; types++) {
|
|
|
|
ret = uncore_type_init(*types, setid);
|
2012-06-15 14:31:34 +08:00
|
|
|
if (ret)
|
2016-02-23 06:19:09 +08:00
|
|
|
return ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:36 +08:00
|
|
|
/*
|
|
|
|
* add a pci uncore device
|
|
|
|
*/
|
2013-08-07 14:17:23 +08:00
|
|
|
static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
2012-06-15 14:31:36 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_type *type;
|
2012-06-15 14:31:36 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct intel_uncore_box *box;
|
2016-02-23 06:19:16 +08:00
|
|
|
int phys_id, pkg, ret;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2015-09-24 20:10:21 +08:00
|
|
|
phys_id = uncore_pcibus_to_physid(pdev->bus);
|
2016-02-23 06:19:16 +08:00
|
|
|
if (phys_id < 0)
|
2012-06-15 14:31:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
pkg = topology_phys_to_logical_pkg(phys_id);
|
|
|
|
if (WARN_ON_ONCE(pkg < 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-08-07 14:17:23 +08:00
|
|
|
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
|
2014-07-30 15:22:12 +08:00
|
|
|
int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
|
2016-02-23 06:19:16 +08:00
|
|
|
|
|
|
|
uncore_extra_pci_dev[pkg].dev[idx] = pdev;
|
2013-08-07 14:17:23 +08:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
|
2012-06-15 14:31:36 +08:00
|
|
|
/*
|
|
|
|
* for performance monitoring unit with multiple boxes,
|
|
|
|
* each box has a different function id.
|
|
|
|
*/
|
2013-08-07 14:17:23 +08:00
|
|
|
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
|
perf/x86/intel/uncore: Add Knights Landing uncore PMU support
Knights Landing uncore performance monitoring (perfmon) is derived from
Haswell-EP uncore perfmon with several differences. One notable difference
is in PCI device IDs. Knights Landing uses common PCI device ID for
multiple instances of an uncore PMU device type. In Haswell-EP, each
instance of a PMU device type has a unique device ID.
Knights Landing uncore components that have performance monitoring units
are UBOX, CHA, EDC, MC, M2PCIe, IRP and PCU. Perfmon registers in EDC, MC,
IRP, and M2PCIe reside in the PCIe configuration space. Perfmon registers
in UBOX, CHA and PCU are accessed via the MSR interface.
For more details, please refer to the public document:
https://software.intel.com/sites/default/files/managed/15/8d/IntelXeonPhi%E2%84%A2x200ProcessorPerformanceMonitoringReferenceManual_Volume1_Registers_v0%206.pdf
Signed-off-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Harish Chegondi <harish.chegondi@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lukasz Anaczkowski <lukasz.anaczkowski@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/8ac513981264c3eb10343a3f523f19cc5a2d12fe.1449470704.git.harish.chegondi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-12-08 06:32:32 +08:00
|
|
|
/* Knights Landing uses a common PCI device ID for multiple instances of
|
|
|
|
* an uncore PMU device type. There is only one entry per device type in
|
|
|
|
* the knl_uncore_pci_ids table inspite of multiple devices present for
|
|
|
|
* some device types. Hence PCI device idx would be 0 for all devices.
|
|
|
|
* So increment pmu pointer to point to an unused array element.
|
|
|
|
*/
|
2016-02-23 06:19:12 +08:00
|
|
|
if (boot_cpu_data.x86_model == 87) {
|
perf/x86/intel/uncore: Add Knights Landing uncore PMU support
Knights Landing uncore performance monitoring (perfmon) is derived from
Haswell-EP uncore perfmon with several differences. One notable difference
is in PCI device IDs. Knights Landing uses common PCI device ID for
multiple instances of an uncore PMU device type. In Haswell-EP, each
instance of a PMU device type has a unique device ID.
Knights Landing uncore components that have performance monitoring units
are UBOX, CHA, EDC, MC, M2PCIe, IRP and PCU. Perfmon registers in EDC, MC,
IRP, and M2PCIe reside in the PCIe configuration space. Perfmon registers
in UBOX, CHA and PCU are accessed via the MSR interface.
For more details, please refer to the public document:
https://software.intel.com/sites/default/files/managed/15/8d/IntelXeonPhi%E2%84%A2x200ProcessorPerformanceMonitoringReferenceManual_Volume1_Registers_v0%206.pdf
Signed-off-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Harish Chegondi <harish.chegondi@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lukasz Anaczkowski <lukasz.anaczkowski@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/8ac513981264c3eb10343a3f523f19cc5a2d12fe.1449470704.git.harish.chegondi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-12-08 06:32:32 +08:00
|
|
|
while (pmu->func_id >= 0)
|
|
|
|
pmu++;
|
2016-02-23 06:19:12 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
box = uncore_alloc_box(type, NUMA_NO_NODE);
|
|
|
|
if (!box)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-08-07 14:17:23 +08:00
|
|
|
if (pmu->func_id < 0)
|
|
|
|
pmu->func_id = pdev->devfn;
|
|
|
|
else
|
|
|
|
WARN_ON_ONCE(pmu->func_id != pdev->devfn);
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
atomic_inc(&box->refcnt);
|
|
|
|
box->pci_phys_id = phys_id;
|
|
|
|
box->pkgid = pkg;
|
2012-06-15 14:31:36 +08:00
|
|
|
box->pci_dev = pdev;
|
|
|
|
box->pmu = pmu;
|
2015-06-09 17:40:28 +08:00
|
|
|
uncore_box_init(box);
|
2012-06-15 14:31:36 +08:00
|
|
|
pci_set_drvdata(pdev, box);
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
pmu->boxes[pkg] = box;
|
|
|
|
if (atomic_inc_return(&pmu->activeboxes) > 1)
|
2016-02-23 06:19:09 +08:00
|
|
|
return 0;
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/* First active box registers the pmu */
|
2016-02-23 06:19:09 +08:00
|
|
|
ret = uncore_pmu_register(pmu);
|
|
|
|
if (ret) {
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
2016-02-23 06:19:16 +08:00
|
|
|
pmu->boxes[pkg] = NULL;
|
2016-02-23 06:19:11 +08:00
|
|
|
uncore_box_exit(box);
|
2016-02-23 06:19:09 +08:00
|
|
|
kfree(box);
|
|
|
|
}
|
|
|
|
return ret;
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
|
2012-06-21 00:39:27 +08:00
|
|
|
static void uncore_pci_remove(struct pci_dev *pdev)
|
2012-06-15 14:31:36 +08:00
|
|
|
{
|
|
|
|
struct intel_uncore_box *box = pci_get_drvdata(pdev);
|
2013-08-07 14:17:23 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
2016-02-23 06:19:16 +08:00
|
|
|
int i, phys_id, pkg;
|
2013-08-07 14:17:23 +08:00
|
|
|
|
2015-09-24 20:10:21 +08:00
|
|
|
phys_id = uncore_pcibus_to_physid(pdev->bus);
|
2016-02-23 06:19:16 +08:00
|
|
|
pkg = topology_phys_to_logical_pkg(phys_id);
|
|
|
|
|
2013-08-07 14:17:23 +08:00
|
|
|
box = pci_get_drvdata(pdev);
|
|
|
|
if (!box) {
|
|
|
|
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
|
2016-02-23 06:19:16 +08:00
|
|
|
if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
|
|
|
|
uncore_extra_pci_dev[pkg].dev[i] = NULL;
|
2013-08-07 14:17:23 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
|
|
|
|
return;
|
|
|
|
}
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2013-08-07 14:17:23 +08:00
|
|
|
pmu = box->pmu;
|
2016-02-23 06:19:16 +08:00
|
|
|
if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
|
2012-06-15 14:31:36 +08:00
|
|
|
return;
|
|
|
|
|
2013-04-16 19:51:07 +08:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
2016-02-23 06:19:16 +08:00
|
|
|
pmu->boxes[pkg] = NULL;
|
|
|
|
if (atomic_dec_return(&pmu->activeboxes) == 0)
|
|
|
|
uncore_pmu_unregister(pmu);
|
2016-02-23 06:19:11 +08:00
|
|
|
uncore_box_exit(box);
|
2012-06-15 14:31:36 +08:00
|
|
|
kfree(box);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init uncore_pci_init(void)
|
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
size_t size;
|
2012-06-15 14:31:36 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (boot_cpu_data.x86_model) {
|
2012-06-15 14:31:37 +08:00
|
|
|
case 45: /* Sandy Bridge-EP */
|
2014-07-30 15:22:14 +08:00
|
|
|
ret = snbep_uncore_pci_init();
|
2012-06-15 14:31:37 +08:00
|
|
|
break;
|
2014-08-12 15:15:25 +08:00
|
|
|
case 62: /* Ivy Bridge-EP */
|
|
|
|
ret = ivbep_uncore_pci_init();
|
2013-04-16 19:51:07 +08:00
|
|
|
break;
|
2014-09-05 07:08:26 +08:00
|
|
|
case 63: /* Haswell-EP */
|
|
|
|
ret = hswep_uncore_pci_init();
|
|
|
|
break;
|
2015-12-04 05:00:11 +08:00
|
|
|
case 79: /* BDX-EP */
|
2015-07-02 20:12:52 +08:00
|
|
|
case 86: /* BDX-DE */
|
|
|
|
ret = bdx_uncore_pci_init();
|
|
|
|
break;
|
2014-02-11 23:20:12 +08:00
|
|
|
case 42: /* Sandy Bridge */
|
2014-07-30 15:22:13 +08:00
|
|
|
ret = snb_uncore_pci_init();
|
2014-02-11 23:20:12 +08:00
|
|
|
break;
|
|
|
|
case 58: /* Ivy Bridge */
|
2014-07-30 15:22:13 +08:00
|
|
|
ret = ivb_uncore_pci_init();
|
2014-02-11 23:20:12 +08:00
|
|
|
break;
|
|
|
|
case 60: /* Haswell */
|
|
|
|
case 69: /* Haswell Celeron */
|
2014-07-30 15:22:13 +08:00
|
|
|
ret = hsw_uncore_pci_init();
|
2014-02-11 23:20:12 +08:00
|
|
|
break;
|
2015-04-23 14:56:42 +08:00
|
|
|
case 61: /* Broadwell */
|
|
|
|
ret = bdw_uncore_pci_init();
|
|
|
|
break;
|
perf/x86/intel/uncore: Add Knights Landing uncore PMU support
Knights Landing uncore performance monitoring (perfmon) is derived from
Haswell-EP uncore perfmon with several differences. One notable difference
is in PCI device IDs. Knights Landing uses common PCI device ID for
multiple instances of an uncore PMU device type. In Haswell-EP, each
instance of a PMU device type has a unique device ID.
Knights Landing uncore components that have performance monitoring units
are UBOX, CHA, EDC, MC, M2PCIe, IRP and PCU. Perfmon registers in EDC, MC,
IRP, and M2PCIe reside in the PCIe configuration space. Perfmon registers
in UBOX, CHA and PCU are accessed via the MSR interface.
For more details, please refer to the public document:
https://software.intel.com/sites/default/files/managed/15/8d/IntelXeonPhi%E2%84%A2x200ProcessorPerformanceMonitoringReferenceManual_Volume1_Registers_v0%206.pdf
Signed-off-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Harish Chegondi <harish.chegondi@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lukasz Anaczkowski <lukasz.anaczkowski@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/8ac513981264c3eb10343a3f523f19cc5a2d12fe.1449470704.git.harish.chegondi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-12-08 06:32:32 +08:00
|
|
|
case 87: /* Knights Landing */
|
|
|
|
ret = knl_uncore_pci_init();
|
|
|
|
break;
|
2016-01-07 15:25:46 +08:00
|
|
|
case 94: /* SkyLake */
|
|
|
|
ret = skl_uncore_pci_init();
|
|
|
|
break;
|
2012-06-15 14:31:36 +08:00
|
|
|
default:
|
2016-02-23 06:19:17 +08:00
|
|
|
return -ENODEV;
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
|
2014-07-30 15:22:13 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
size = max_packages * sizeof(struct pci_extra_dev);
|
|
|
|
uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!uncore_extra_pci_dev) {
|
|
|
|
ret = -ENOMEM;
|
2016-02-23 06:19:09 +08:00
|
|
|
goto err;
|
2016-02-23 06:19:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = uncore_types_init(uncore_pci_uncores, false);
|
|
|
|
if (ret)
|
|
|
|
goto errtype;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
|
|
|
uncore_pci_driver->probe = uncore_pci_probe;
|
|
|
|
uncore_pci_driver->remove = uncore_pci_remove;
|
|
|
|
|
|
|
|
ret = pci_register_driver(uncore_pci_driver);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (ret)
|
2016-02-23 06:19:16 +08:00
|
|
|
goto errtype;
|
2016-02-23 06:19:09 +08:00
|
|
|
|
|
|
|
pcidrv_registered = true;
|
|
|
|
return 0;
|
2012-06-15 14:31:36 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
errtype:
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_types_exit(uncore_pci_uncores);
|
2016-02-23 06:19:16 +08:00
|
|
|
kfree(uncore_extra_pci_dev);
|
|
|
|
uncore_extra_pci_dev = NULL;
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_free_pcibus_map();
|
2016-02-23 06:19:16 +08:00
|
|
|
err:
|
|
|
|
uncore_pci_uncores = empty_uncore;
|
2012-06-15 14:31:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init uncore_pci_exit(void)
|
|
|
|
{
|
|
|
|
if (pcidrv_registered) {
|
|
|
|
pcidrv_registered = false;
|
|
|
|
pci_unregister_driver(uncore_pci_driver);
|
2014-07-30 15:22:12 +08:00
|
|
|
uncore_types_exit(uncore_pci_uncores);
|
2016-02-23 06:19:16 +08:00
|
|
|
kfree(uncore_extra_pci_dev);
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_free_pcibus_map();
|
2012-06-15 14:31:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
static void uncore_cpu_dying(int cpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct intel_uncore_box *box;
|
2016-02-23 06:19:16 +08:00
|
|
|
int i, pkg;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
pkg = topology_logical_package_id(cpu);
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
pmu = type->pmus;
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
|
box = pmu->boxes[pkg];
|
|
|
|
if (box && atomic_dec_return(&box->refcnt) == 0)
|
2016-02-23 06:19:11 +08:00
|
|
|
uncore_box_exit(box);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static void uncore_cpu_starting(int cpu, bool init)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_box *box;
|
|
|
|
int i, pkg, ncpus = 1;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
if (init) {
|
|
|
|
/*
|
|
|
|
* On init we get the number of online cpus in the package
|
|
|
|
* and set refcount for all of them.
|
|
|
|
*/
|
|
|
|
ncpus = cpumask_weight(topology_core_cpumask(cpu));
|
|
|
|
}
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
pkg = topology_logical_package_id(cpu);
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
pmu = type->pmus;
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
|
box = pmu->boxes[pkg];
|
|
|
|
if (!box)
|
|
|
|
continue;
|
|
|
|
/* The first cpu on a package activates the box */
|
|
|
|
if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
|
2015-06-09 17:40:28 +08:00
|
|
|
uncore_box_init(box);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
static int uncore_cpu_prepare(int cpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
|
struct intel_uncore_box *box;
|
2016-02-23 06:19:16 +08:00
|
|
|
int i, pkg;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
pkg = topology_logical_package_id(cpu);
|
|
|
|
for (; *types; types++) {
|
|
|
|
type = *types;
|
|
|
|
pmu = type->pmus;
|
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
|
if (pmu->boxes[pkg])
|
|
|
|
continue;
|
|
|
|
/* First cpu of a package allocates the box */
|
2013-09-17 14:48:13 +08:00
|
|
|
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!box)
|
|
|
|
return -ENOMEM;
|
|
|
|
box->pmu = pmu;
|
2016-02-23 06:19:16 +08:00
|
|
|
box->pkgid = pkg;
|
|
|
|
pmu->boxes[pkg] = box;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
|
|
|
|
int new_cpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:12 +08:00
|
|
|
struct intel_uncore_pmu *pmu = type->pmus;
|
2012-06-15 14:31:34 +08:00
|
|
|
struct intel_uncore_box *box;
|
2016-02-23 06:19:16 +08:00
|
|
|
int i, pkg;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
|
2016-02-23 06:19:12 +08:00
|
|
|
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
2016-02-23 06:19:16 +08:00
|
|
|
box = pmu->boxes[pkg];
|
2016-02-23 06:19:12 +08:00
|
|
|
if (!box)
|
|
|
|
continue;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
if (old_cpu < 0) {
|
|
|
|
WARN_ON_ONCE(box->cpu != -1);
|
|
|
|
box->cpu = new_cpu;
|
|
|
|
continue;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
2016-02-23 06:19:12 +08:00
|
|
|
|
|
|
|
WARN_ON_ONCE(box->cpu != old_cpu);
|
|
|
|
box->cpu = -1;
|
|
|
|
if (new_cpu < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
uncore_pmu_cancel_hrtimer(box);
|
|
|
|
perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
|
|
|
|
box->cpu = new_cpu;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
static void uncore_change_context(struct intel_uncore_type **uncores,
|
|
|
|
int old_cpu, int new_cpu)
|
|
|
|
{
|
|
|
|
for (; *uncores; uncores++)
|
|
|
|
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
static void uncore_event_exit_cpu(int cpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
int target;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/* Check if exiting cpu is used for collecting uncore events */
|
2012-06-15 14:31:34 +08:00
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
|
|
|
return;
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/* Find a new cpu to collect uncore events */
|
|
|
|
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/* Migrate uncore events to the new target */
|
|
|
|
if (target < nr_cpu_ids)
|
2012-06-15 14:31:34 +08:00
|
|
|
cpumask_set_cpu(target, &uncore_cpu_mask);
|
2016-02-23 06:19:16 +08:00
|
|
|
else
|
|
|
|
target = -1;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
uncore_change_context(uncore_msr_uncores, cpu, target);
|
|
|
|
uncore_change_context(uncore_pci_uncores, cpu, target);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
static void uncore_event_init_cpu(int cpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
int target;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/*
|
|
|
|
* Check if there is an online cpu in the package
|
|
|
|
* which collects uncore events already.
|
|
|
|
*/
|
|
|
|
target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
|
|
|
|
if (target < nr_cpu_ids)
|
|
|
|
return;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
cpumask_set_cpu(cpu, &uncore_cpu_mask);
|
|
|
|
|
2014-07-30 15:22:12 +08:00
|
|
|
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
|
|
|
uncore_change_context(uncore_pci_uncores, -1, cpu);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
static int uncore_cpu_notifier(struct notifier_block *self,
|
|
|
|
unsigned long action, void *hcpu)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
|
|
|
unsigned int cpu = (long)hcpu;
|
|
|
|
|
|
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
|
case CPU_UP_PREPARE:
|
2016-02-23 06:19:16 +08:00
|
|
|
return notifier_from_errno(uncore_cpu_prepare(cpu));
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
case CPU_STARTING:
|
2016-02-23 06:19:16 +08:00
|
|
|
uncore_cpu_starting(cpu, false);
|
|
|
|
case CPU_DOWN_FAILED:
|
|
|
|
uncore_event_init_cpu(cpu);
|
2012-06-15 14:31:34 +08:00
|
|
|
break;
|
2016-02-23 06:19:16 +08:00
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
case CPU_UP_CANCELED:
|
|
|
|
case CPU_DYING:
|
|
|
|
uncore_cpu_dying(cpu);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
|
uncore_event_exit_cpu(cpu);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
static struct notifier_block uncore_cpu_nb = {
|
2012-07-05 14:32:17 +08:00
|
|
|
.notifier_call = uncore_cpu_notifier,
|
2012-06-15 14:31:34 +08:00
|
|
|
/*
|
|
|
|
* to migrate uncore events, our notifier should be executed
|
|
|
|
* before perf core's notifier.
|
|
|
|
*/
|
2012-07-05 14:32:17 +08:00
|
|
|
.priority = CPU_PRI_PERF + 1,
|
2012-06-15 14:31:34 +08:00
|
|
|
};
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
static int __init type_pmu_register(struct intel_uncore_type *type)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:09 +08:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < type->num_boxes; i++) {
|
|
|
|
ret = uncore_pmu_register(&type->pmus[i]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init uncore_msr_pmus_register(void)
|
|
|
|
{
|
|
|
|
struct intel_uncore_type **types = uncore_msr_uncores;
|
|
|
|
int ret;
|
|
|
|
|
2016-02-23 06:19:12 +08:00
|
|
|
for (; *types; types++) {
|
|
|
|
ret = type_pmu_register(*types);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init uncore_cpu_init(void)
|
|
|
|
{
|
2014-07-30 15:22:15 +08:00
|
|
|
int ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
switch (boot_cpu_data.x86_model) {
|
2012-06-15 14:31:35 +08:00
|
|
|
case 26: /* Nehalem */
|
|
|
|
case 30:
|
|
|
|
case 37: /* Westmere */
|
|
|
|
case 44:
|
2014-07-30 15:22:13 +08:00
|
|
|
nhm_uncore_cpu_init();
|
2012-06-15 14:31:35 +08:00
|
|
|
break;
|
|
|
|
case 42: /* Sandy Bridge */
|
2013-04-30 03:52:27 +08:00
|
|
|
case 58: /* Ivy Bridge */
|
2015-06-15 13:57:41 +08:00
|
|
|
case 60: /* Haswell */
|
|
|
|
case 69: /* Haswell */
|
|
|
|
case 70: /* Haswell */
|
|
|
|
case 61: /* Broadwell */
|
|
|
|
case 71: /* Broadwell */
|
2014-07-30 15:22:13 +08:00
|
|
|
snb_uncore_cpu_init();
|
2012-06-15 14:31:35 +08:00
|
|
|
break;
|
2013-04-30 03:49:28 +08:00
|
|
|
case 45: /* Sandy Bridge-EP */
|
2014-07-30 15:22:14 +08:00
|
|
|
snbep_uncore_cpu_init();
|
2012-06-15 14:31:37 +08:00
|
|
|
break;
|
2012-08-06 13:11:22 +08:00
|
|
|
case 46: /* Nehalem-EX */
|
|
|
|
case 47: /* Westmere-EX aka. Xeon E7 */
|
2014-07-30 15:22:15 +08:00
|
|
|
nhmex_uncore_cpu_init();
|
2012-07-05 14:32:17 +08:00
|
|
|
break;
|
2014-08-12 15:15:25 +08:00
|
|
|
case 62: /* Ivy Bridge-EP */
|
|
|
|
ivbep_uncore_cpu_init();
|
2013-04-16 19:51:07 +08:00
|
|
|
break;
|
2014-09-05 07:08:26 +08:00
|
|
|
case 63: /* Haswell-EP */
|
|
|
|
hswep_uncore_cpu_init();
|
|
|
|
break;
|
2015-12-04 05:00:11 +08:00
|
|
|
case 79: /* BDX-EP */
|
2015-07-02 20:12:52 +08:00
|
|
|
case 86: /* BDX-DE */
|
|
|
|
bdx_uncore_cpu_init();
|
|
|
|
break;
|
perf/x86/intel/uncore: Add Knights Landing uncore PMU support
Knights Landing uncore performance monitoring (perfmon) is derived from
Haswell-EP uncore perfmon with several differences. One notable difference
is in PCI device IDs. Knights Landing uses common PCI device ID for
multiple instances of an uncore PMU device type. In Haswell-EP, each
instance of a PMU device type has a unique device ID.
Knights Landing uncore components that have performance monitoring units
are UBOX, CHA, EDC, MC, M2PCIe, IRP and PCU. Perfmon registers in EDC, MC,
IRP, and M2PCIe reside in the PCIe configuration space. Perfmon registers
in UBOX, CHA and PCU are accessed via the MSR interface.
For more details, please refer to the public document:
https://software.intel.com/sites/default/files/managed/15/8d/IntelXeonPhi%E2%84%A2x200ProcessorPerformanceMonitoringReferenceManual_Volume1_Registers_v0%206.pdf
Signed-off-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Harish Chegondi <harish.chegondi@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lukasz Anaczkowski <lukasz.anaczkowski@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/8ac513981264c3eb10343a3f523f19cc5a2d12fe.1449470704.git.harish.chegondi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-12-08 06:32:32 +08:00
|
|
|
case 87: /* Knights Landing */
|
|
|
|
knl_uncore_cpu_init();
|
|
|
|
break;
|
2012-06-15 14:31:34 +08:00
|
|
|
default:
|
2016-02-23 06:19:17 +08:00
|
|
|
return -ENODEV;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
ret = uncore_types_init(uncore_msr_uncores, true);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = uncore_msr_pmus_register();
|
2012-06-15 14:31:34 +08:00
|
|
|
if (ret)
|
2016-02-23 06:19:09 +08:00
|
|
|
goto err;
|
2012-06-15 14:31:34 +08:00
|
|
|
return 0;
|
2016-02-23 06:19:09 +08:00
|
|
|
err:
|
|
|
|
uncore_types_exit(uncore_msr_uncores);
|
|
|
|
uncore_msr_uncores = empty_uncore;
|
|
|
|
return ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:09 +08:00
|
|
|
static void __init uncore_cpu_setup(void *dummy)
|
2012-06-15 14:31:34 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
uncore_cpu_starting(smp_processor_id(), true);
|
2012-06-15 14:31:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
/* Lazy to avoid allocation of a few bytes for the normal case */
|
|
|
|
static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
|
|
|
|
|
2016-02-23 06:19:17 +08:00
|
|
|
static int __init uncore_cpumask_init(bool msr)
|
2014-02-11 23:20:07 +08:00
|
|
|
{
|
2016-02-23 06:19:16 +08:00
|
|
|
unsigned int cpu;
|
2014-02-11 23:20:07 +08:00
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
2016-02-23 06:19:16 +08:00
|
|
|
unsigned int pkg = topology_logical_package_id(cpu);
|
|
|
|
int ret;
|
2014-02-11 23:20:07 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
if (test_and_set_bit(pkg, packages))
|
2014-02-11 23:20:07 +08:00
|
|
|
continue;
|
2016-02-23 06:19:16 +08:00
|
|
|
/*
|
2016-02-23 06:19:17 +08:00
|
|
|
* The first online cpu of each package allocates and takes
|
|
|
|
* the refcounts for all other online cpus in that package.
|
|
|
|
* If msrs are not enabled no allocation is required.
|
2016-02-23 06:19:16 +08:00
|
|
|
*/
|
2016-02-23 06:19:17 +08:00
|
|
|
if (msr) {
|
|
|
|
ret = uncore_cpu_prepare(cpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2014-02-11 23:20:07 +08:00
|
|
|
uncore_event_init_cpu(cpu);
|
2016-02-23 06:19:16 +08:00
|
|
|
smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
|
2014-02-11 23:20:07 +08:00
|
|
|
}
|
CPU hotplug notifiers registration fixes for 3.15-rc1
The purpose of this single series of commits from Srivatsa S Bhat (with
a small piece from Gautham R Shenoy) touching multiple subsystems that use
CPU hotplug notifiers is to provide a way to register them that will not
lead to deadlocks with CPU online/offline operations as described in the
changelog of commit 93ae4f978ca7f (CPU hotplug: Provide lockless versions
of callback registration functions).
The first three commits in the series introduce the API and document it
and the rest simply goes through the users of CPU hotplug notifiers and
converts them to using the new method.
/
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQIcBAABCAAGBQJTQow2AAoJEILEb/54YlRxW4QQAJlYRDUzwFJzJzYhltQYuVR+
4D74XMtvXgoJfg3cwdSWvMKKpJZnA9BVN0f7Hcx9wYmgdexYUuHeZJmMNyc3S2+g
KjKBIsugvgmZhHbbLd6TJ6GBbhGT5JLt9VmSfL9zIkveInU1YHFUUqL/mxdHm4J0
BSGKjk2rN3waRJgmY+xfliFLtQjDKFwJpMuvrgtoUyfas3f4sIV43UNbqdvA/weJ
rzedxXOlKH/id4b56lj/4iIzcoL3mwvJJ7r6n0CEMsKv87z09kqR0O+69Tsq/cgs
j17CsvoJOmZGk3QTeKVMQWBsvk6aPoDu3zK83gLbQMt+qjOpSTbJLz/3HZw4/TrW
ss4nuZne1DLMGS+6hoxYbTP+6Ni//Kn+l/LrHc5jb7m1X3lMO4W2aV3IROtIE1rv
lEP1IG01NU4u9YwkVj1dyhrkSp8tLPul4SrUK8W+oNweOC5crjJV7vJbIPJgmYiM
IZN55wln0yVRtR4TX+rmvN0PixsInE8MeaVCmReApyF9pdzul/StxlBze5BKLSJD
cqo1kNPpsmdxoDucqUpQ/gSvy+IOl2qnlisB5PpV93sk7De6TFDYrGHxjYIW7jMf
StXwdCDDQhzd2Q8Kfpp895A1dbIl8rKtwA6bTU2eX+BfMVFzuMdT44cvosx1+UdQ
sWl//rg76nb13dFjvF+q
=SW7Q
-----END PGP SIGNATURE-----
Merge tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull CPU hotplug notifiers registration fixes from Rafael Wysocki:
"The purpose of this single series of commits from Srivatsa S Bhat
(with a small piece from Gautham R Shenoy) touching multiple
subsystems that use CPU hotplug notifiers is to provide a way to
register them that will not lead to deadlocks with CPU online/offline
operations as described in the changelog of commit 93ae4f978ca7f ("CPU
hotplug: Provide lockless versions of callback registration
functions").
The first three commits in the series introduce the API and document
it and the rest simply goes through the users of CPU hotplug notifiers
and converts them to using the new method"
* tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (52 commits)
net/iucv/iucv.c: Fix CPU hotplug callback registration
net/core/flow.c: Fix CPU hotplug callback registration
mm, zswap: Fix CPU hotplug callback registration
mm, vmstat: Fix CPU hotplug callback registration
profile: Fix CPU hotplug callback registration
trace, ring-buffer: Fix CPU hotplug callback registration
xen, balloon: Fix CPU hotplug callback registration
hwmon, via-cputemp: Fix CPU hotplug callback registration
hwmon, coretemp: Fix CPU hotplug callback registration
thermal, x86-pkg-temp: Fix CPU hotplug callback registration
octeon, watchdog: Fix CPU hotplug callback registration
oprofile, nmi-timer: Fix CPU hotplug callback registration
intel-idle: Fix CPU hotplug callback registration
clocksource, dummy-timer: Fix CPU hotplug callback registration
drivers/base/topology.c: Fix CPU hotplug callback registration
acpi-cpufreq: Fix CPU hotplug callback registration
zsmalloc: Fix CPU hotplug callback registration
scsi, fcoe: Fix CPU hotplug callback registration
scsi, bnx2fc: Fix CPU hotplug callback registration
scsi, bnx2i: Fix CPU hotplug callback registration
...
2014-04-08 05:55:46 +08:00
|
|
|
__register_cpu_notifier(&uncore_cpu_nb);
|
2016-02-23 06:19:16 +08:00
|
|
|
return 0;
|
2014-02-11 23:20:07 +08:00
|
|
|
}
|
|
|
|
|
2012-06-15 14:31:34 +08:00
|
|
|
static int __init intel_uncore_init(void)
|
|
|
|
{
|
2016-02-23 06:19:17 +08:00
|
|
|
int pret, cret, ret;
|
2012-06-15 14:31:34 +08:00
|
|
|
|
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2012-08-21 17:08:37 +08:00
|
|
|
if (cpu_has_hypervisor)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
max_packages = topology_max_packages();
|
|
|
|
|
2016-02-23 06:19:17 +08:00
|
|
|
pret = uncore_pci_init();
|
|
|
|
cret = uncore_cpu_init();
|
|
|
|
|
|
|
|
if (cret && pret)
|
|
|
|
return -ENODEV;
|
2016-02-23 06:19:16 +08:00
|
|
|
|
|
|
|
cpu_notifier_register_begin();
|
2016-02-23 06:19:17 +08:00
|
|
|
ret = uncore_cpumask_init(!cret);
|
2016-02-23 06:19:09 +08:00
|
|
|
if (ret)
|
2016-02-23 06:19:16 +08:00
|
|
|
goto err;
|
|
|
|
cpu_notifier_register_done();
|
2012-06-15 14:31:34 +08:00
|
|
|
return 0;
|
2016-02-23 06:19:09 +08:00
|
|
|
|
2016-02-23 06:19:16 +08:00
|
|
|
err:
|
2016-02-23 06:19:17 +08:00
|
|
|
/* Undo box->init_box() */
|
|
|
|
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
|
2016-02-23 06:19:09 +08:00
|
|
|
uncore_types_exit(uncore_msr_uncores);
|
|
|
|
uncore_pci_exit();
|
2016-02-23 06:19:16 +08:00
|
|
|
cpu_notifier_register_done();
|
2012-06-15 14:31:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
device_initcall(intel_uncore_init);
|