diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c index 5cc4d4dcf3cf..02c08737f6aa 100644 --- a/arch/metag/kernel/perf/perf_event.c +++ b/arch/metag/kernel/perf/perf_event.c @@ -567,16 +567,6 @@ static int _hw_perf_event_init(struct perf_event *event) if (mapping == -1) return -EINVAL; - /* - * Early cores have "limited" counters - they have no overflow - * interrupts - and so are unable to do sampling without extra work - * and timer assistance. - */ - if (metag_pmu->max_period == 0) { - if (hwc->sample_period) - return -EINVAL; - } - /* * Don't assign an index until the event is placed into the hardware. * -1 signifies that we're still deciding where to put it. On SMP @@ -866,6 +856,15 @@ static int __init init_hw_perf_events(void) pr_info("enabled with %s PMU driver, %d counters available\n", metag_pmu->name, metag_pmu->max_events); + /* + * Early cores have "limited" counters - they have no overflow + * interrupts - and so are unable to do sampling without extra work + * and timer assistance. + */ + if (metag_pmu->max_period == 0) { + metag_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + } + /* Initialise the active events and reservation mutex */ atomic_set(&metag_pmu->active_events, 0); mutex_init(&metag_pmu->reserve_mutex);