Merge branches 'perf-urgent-for-linus', 'x86-urgent-for-linus' and 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf, x86 and scheduler updates from Ingo Molnar. * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tracing: Do not enable function event with enable perf stat: handle ENXIO error for perf_event_open perf: Turn off compiler warnings for flex and bison generated files perf stat: Fix case where guest/host monitoring is not supported by kernel perf build-id: Fix filename size calculation * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, kvm: KVM paravirt kernels don't check for CPUID being unavailable x86: Fix section annotation of acpi_map_cpu2node() x86/microcode: Ensure that module is only loaded on supported Intel CPUs * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Fix KVM and ia64 boot crash due to sched_groups circular linked list assumption
This commit is contained in:
commit
31ae98359d
|
@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
|
|||
unsigned int eax, ebx, ecx, edx;
|
||||
char signature[13];
|
||||
|
||||
if (boot_cpu_data.cpuid_level < 0)
|
||||
return 0; /* So we don't blow up on old processors */
|
||||
|
||||
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
|
||||
memcpy(signature + 0, &ebx, 4);
|
||||
memcpy(signature + 4, &ecx, 4);
|
||||
|
|
|
@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void)
|
|||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
#include <acpi/processor.h>
|
||||
|
||||
static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int nid;
|
||||
|
|
|
@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
|
|||
|
||||
memset(csig, 0, sizeof(*csig));
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
|
||||
cpu_has(c, X86_FEATURE_IA64)) {
|
||||
pr_err("CPU%d not a capable Intel processor\n", cpu_num);
|
||||
return -1;
|
||||
}
|
||||
|
||||
csig->sig = cpuid_eax(0x00000001);
|
||||
|
||||
if ((c->x86_model >= 5) || (c->x86 > 6)) {
|
||||
|
@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = {
|
|||
|
||||
struct microcode_ops * __init init_intel_microcode(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
|
||||
cpu_has(c, X86_FEATURE_IA64)) {
|
||||
pr_err("Intel CPU family 0x%x not supported\n", c->x86);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return µcode_intel_ops;
|
||||
}
|
||||
|
||||
|
|
|
@ -179,6 +179,7 @@ enum {
|
|||
TRACE_EVENT_FL_RECORDED_CMD_BIT,
|
||||
TRACE_EVENT_FL_CAP_ANY_BIT,
|
||||
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
|
||||
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -187,6 +188,7 @@ enum {
|
|||
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
|
||||
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
|
||||
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
|
||||
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
|
||||
};
|
||||
|
||||
struct ftrace_event_call {
|
||||
|
|
|
@ -6382,6 +6382,8 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|||
if (!sg)
|
||||
return -ENOMEM;
|
||||
|
||||
sg->next = sg;
|
||||
|
||||
*per_cpu_ptr(sdd->sg, j) = sg;
|
||||
|
||||
sgp = kzalloc_node(sizeof(struct sched_group_power),
|
||||
|
|
|
@ -294,6 +294,9 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
|
|||
if (!call->name || !call->class || !call->class->reg)
|
||||
continue;
|
||||
|
||||
if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
|
||||
continue;
|
||||
|
||||
if (match &&
|
||||
strcmp(match, call->name) != 0 &&
|
||||
strcmp(match, call->class->system) != 0)
|
||||
|
@ -1164,7 +1167,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (call->class->reg)
|
||||
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
|
||||
trace_create_file("enable", 0644, call->dir, call,
|
||||
enable);
|
||||
|
||||
|
|
|
@ -180,6 +180,7 @@ struct ftrace_event_call __used event_##call = { \
|
|||
.event.type = etype, \
|
||||
.class = &event_class_ftrace_##call, \
|
||||
.print_fmt = print, \
|
||||
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
|
||||
}; \
|
||||
struct ftrace_event_call __used \
|
||||
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
|
||||
|
|
|
@ -774,10 +774,10 @@ $(OUTPUT)perf.o perf.spec \
|
|||
# over the general rule for .o
|
||||
|
||||
$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
|
||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $<
|
||||
|
||||
$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
|
||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $<
|
||||
|
||||
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
|
||||
|
|
|
@ -283,6 +283,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
|
|||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct xyarray *group_fd = NULL;
|
||||
bool exclude_guest_missing = false;
|
||||
int ret;
|
||||
|
||||
if (group && evsel != first)
|
||||
group_fd = first->fd;
|
||||
|
@ -293,16 +295,39 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
|
|||
|
||||
attr->inherit = !no_inherit;
|
||||
|
||||
if (system_wide)
|
||||
return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
|
||||
retry:
|
||||
if (exclude_guest_missing)
|
||||
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
|
||||
|
||||
if (system_wide) {
|
||||
ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
|
||||
group, group_fd);
|
||||
if (ret)
|
||||
goto check_ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!target_pid && !target_tid && (!group || evsel == first)) {
|
||||
attr->disabled = 1;
|
||||
attr->enable_on_exec = 1;
|
||||
}
|
||||
|
||||
return perf_evsel__open_per_thread(evsel, evsel_list->threads,
|
||||
group, group_fd);
|
||||
ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
|
||||
group, group_fd);
|
||||
if (!ret)
|
||||
return 0;
|
||||
/* fall through */
|
||||
check_ret:
|
||||
if (ret && errno == EINVAL) {
|
||||
if (!exclude_guest_missing &&
|
||||
(evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
|
||||
pr_debug("Old kernel, cannot exclude "
|
||||
"guest or host samples.\n");
|
||||
exclude_guest_missing = true;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -463,8 +488,13 @@ static int run_perf_stat(int argc __used, const char **argv)
|
|||
|
||||
list_for_each_entry(counter, &evsel_list->entries, node) {
|
||||
if (create_perf_stat_counter(counter, first) < 0) {
|
||||
/*
|
||||
* PPC returns ENXIO for HW counters until 2.6.37
|
||||
* (behavior changed with commit b0a873e).
|
||||
*/
|
||||
if (errno == EINVAL || errno == ENOSYS ||
|
||||
errno == ENOENT || errno == EOPNOTSUPP) {
|
||||
errno == ENOENT || errno == EOPNOTSUPP ||
|
||||
errno == ENXIO) {
|
||||
if (verbose)
|
||||
ui__warning("%s event is not supported by the kernel.\n",
|
||||
event_name(counter));
|
||||
|
|
|
@ -296,7 +296,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
|
|||
if (mkdir_p(filename, 0755))
|
||||
goto out_free;
|
||||
|
||||
snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
|
||||
snprintf(filename + len, size - len, "/%s", sbuild_id);
|
||||
|
||||
if (access(filename, F_OK)) {
|
||||
if (is_kallsyms) {
|
||||
|
|
Loading…
Reference in New Issue