Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Tooling fixes, three Intel uncore driver fixes, plus an AUX events fix uncovered by the perf fuzzer" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel/uncore: Remove PCIe3 unit for SNR perf/x86/intel/uncore: Fix missing marker for snr_uncore_imc_freerunning_events perf/x86/intel/uncore: Add PCI ID of IMC for Xeon E3 V5 Family perf: Correctly handle failed perf_get_aux_event() perf hists: Fix variable name's inconsistency in hists__for_each() macro perf map: Set kmap->kmaps backpointer for main kernel map chunks perf report: Fix incorrectly added dimensions as switch perf data file tools lib traceevent: Fix memory leakage in filter_event
This commit is contained in:
commit
b07b9e8d63
|
@ -15,6 +15,7 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
|
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
|
||||||
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
|
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
|
||||||
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
|
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
|
||||||
|
#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918
|
||||||
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
|
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
|
||||||
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
|
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
|
||||||
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
|
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
|
||||||
|
@ -657,6 +658,10 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
|
||||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
},
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
{ /* IMC */
|
{ /* IMC */
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
|
||||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
@ -826,6 +831,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
|
||||||
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
|
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
|
||||||
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
|
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
|
||||||
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
|
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
|
||||||
|
IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */
|
||||||
IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
|
IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
|
||||||
IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
|
IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
|
||||||
IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
|
IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
|
||||||
|
|
|
@ -369,11 +369,6 @@
|
||||||
#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
|
#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
|
||||||
#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
|
#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
|
||||||
|
|
||||||
/* SNR PCIE3 */
|
|
||||||
#define SNR_PCIE3_PCI_PMON_CTL0 0x508
|
|
||||||
#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
|
|
||||||
#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4
|
|
||||||
|
|
||||||
/* SNR IMC */
|
/* SNR IMC */
|
||||||
#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
|
#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
|
||||||
#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
|
#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
|
||||||
|
@ -4328,27 +4323,12 @@ static struct intel_uncore_type snr_uncore_m2m = {
|
||||||
.format_group = &snr_m2m_uncore_format_group,
|
.format_group = &snr_m2m_uncore_format_group,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct intel_uncore_type snr_uncore_pcie3 = {
|
|
||||||
.name = "pcie3",
|
|
||||||
.num_counters = 4,
|
|
||||||
.num_boxes = 1,
|
|
||||||
.perf_ctr_bits = 48,
|
|
||||||
.perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
|
|
||||||
.event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
|
|
||||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
|
||||||
.box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
|
|
||||||
.ops = &ivbep_uncore_pci_ops,
|
|
||||||
.format_group = &ivbep_uncore_format_group,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
SNR_PCI_UNCORE_M2M,
|
SNR_PCI_UNCORE_M2M,
|
||||||
SNR_PCI_UNCORE_PCIE3,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct intel_uncore_type *snr_pci_uncores[] = {
|
static struct intel_uncore_type *snr_pci_uncores[] = {
|
||||||
[SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
|
[SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
|
||||||
[SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
|
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4357,10 +4337,6 @@ static const struct pci_device_id snr_uncore_pci_ids[] = {
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
|
||||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
|
.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
|
||||||
},
|
},
|
||||||
{ /* PCIe3 */
|
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
|
|
||||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
|
|
||||||
},
|
|
||||||
{ /* end: all zeroes */ }
|
{ /* end: all zeroes */ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4536,6 +4512,7 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
|
||||||
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
|
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
|
||||||
INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
|
INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
|
||||||
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
|
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
|
||||||
|
{ /* end: all zeroes */ },
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
|
static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
|
||||||
|
|
|
@ -11465,8 +11465,10 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader))
|
if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
|
||||||
|
err = -EINVAL;
|
||||||
goto err_locked;
|
goto err_locked;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be under the same ctx::mutex as perf_install_in_context(),
|
* Must be under the same ctx::mutex as perf_install_in_context(),
|
||||||
|
|
|
@ -1228,8 +1228,10 @@ filter_event(struct tep_event_filter *filter, struct tep_event *event,
|
||||||
}
|
}
|
||||||
|
|
||||||
filter_type = add_filter_type(filter, event->id);
|
filter_type = add_filter_type(filter, event->id);
|
||||||
if (filter_type == NULL)
|
if (filter_type == NULL) {
|
||||||
|
free_arg(arg);
|
||||||
return TEP_ERRNO__MEM_ALLOC_FAILED;
|
return TEP_ERRNO__MEM_ALLOC_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
if (filter_type->filter)
|
if (filter_type->filter)
|
||||||
free_arg(filter_type->filter);
|
free_arg(filter_type->filter);
|
||||||
|
|
|
@ -1076,6 +1076,7 @@ int cmd_report(int argc, const char **argv)
|
||||||
struct stat st;
|
struct stat st;
|
||||||
bool has_br_stack = false;
|
bool has_br_stack = false;
|
||||||
int branch_mode = -1;
|
int branch_mode = -1;
|
||||||
|
int last_key = 0;
|
||||||
bool branch_call_mode = false;
|
bool branch_call_mode = false;
|
||||||
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
|
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
|
||||||
static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
|
static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
|
||||||
|
@ -1450,7 +1451,8 @@ repeat:
|
||||||
sort_order = sort_tmp;
|
sort_order = sort_tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (setup_sorting(session->evlist) < 0) {
|
if ((last_key != K_SWITCH_INPUT_DATA) &&
|
||||||
|
(setup_sorting(session->evlist) < 0)) {
|
||||||
if (sort_order)
|
if (sort_order)
|
||||||
parse_options_usage(report_usage, options, "s", 1);
|
parse_options_usage(report_usage, options, "s", 1);
|
||||||
if (field_order)
|
if (field_order)
|
||||||
|
@ -1530,6 +1532,7 @@ repeat:
|
||||||
ret = __cmd_report(&report);
|
ret = __cmd_report(&report);
|
||||||
if (ret == K_SWITCH_INPUT_DATA) {
|
if (ret == K_SWITCH_INPUT_DATA) {
|
||||||
perf_session__delete(session);
|
perf_session__delete(session);
|
||||||
|
last_key = K_SWITCH_INPUT_DATA;
|
||||||
goto repeat;
|
goto repeat;
|
||||||
} else
|
} else
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
|
@ -339,10 +339,10 @@ static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
|
||||||
list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
|
list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
|
||||||
|
|
||||||
#define hists__for_each_format(hists, format) \
|
#define hists__for_each_format(hists, format) \
|
||||||
perf_hpp_list__for_each_format((hists)->hpp_list, fmt)
|
perf_hpp_list__for_each_format((hists)->hpp_list, format)
|
||||||
|
|
||||||
#define hists__for_each_sort_list(hists, format) \
|
#define hists__for_each_sort_list(hists, format) \
|
||||||
perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt)
|
perf_hpp_list__for_each_sort_list((hists)->hpp_list, format)
|
||||||
|
|
||||||
extern struct perf_hpp_fmt perf_hpp__format[];
|
extern struct perf_hpp_fmt perf_hpp__format[];
|
||||||
|
|
||||||
|
|
|
@ -920,6 +920,9 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
|
||||||
if (curr_map == NULL)
|
if (curr_map == NULL)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
if (curr_dso->kernel)
|
||||||
|
map__kmap(curr_map)->kmaps = kmaps;
|
||||||
|
|
||||||
if (adjust_kernel_syms) {
|
if (adjust_kernel_syms) {
|
||||||
curr_map->start = shdr->sh_addr + ref_reloc(kmap);
|
curr_map->start = shdr->sh_addr + ref_reloc(kmap);
|
||||||
curr_map->end = curr_map->start + shdr->sh_size;
|
curr_map->end = curr_map->start + shdr->sh_size;
|
||||||
|
|
Loading…
Reference in New Issue