Merge branches 'x86-fixes-for-linus' and 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86-32: Make sure we can map all of lowmem if we need to x86, vt-d: Handle previous faults after enabling fault handling x86: Enable the intr-remap fault handling after local APIC setup x86, vt-d: Fix the vt-d fault handling irq migration in the x2apic mode x86, vt-d: Quirk for masking vtd spec errors to platform error handling logic x86, xsave: Use alloc_bootmem_align() instead of alloc_bootmem() bootmem: Add alloc_bootmem_align() x86, gcc-4.6: Use gcc -m options when building vdso x86: HPET: Chose a paranoid safe value for the ETIME check x86: io_apic: Avoid unused variable warning when CONFIG_GENERIC_PENDING_IRQ=n * 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Fix off by one in perf_swevent_init() perf: Fix duplicate events with multiple-pmu vs software events ftrace: Have recordmcount honor endianness in fn_ELF_R_INFO scripts/tags.sh: Add magic for trace-events tracing: Fix panic when lseek() called on "trace" opened for writing
This commit is contained in:
commit
55ec86f848
|
@ -355,7 +355,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
||||||
if (heap > 0x3fffffffffffUL)
|
if (heap > 0x3fffffffffffUL)
|
||||||
error("Destination address too large");
|
error("Destination address too large");
|
||||||
#else
|
#else
|
||||||
if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
|
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
|
||||||
error("Destination address too large");
|
error("Destination address too large");
|
||||||
#endif
|
#endif
|
||||||
#ifndef CONFIG_RELOCATABLE
|
#ifndef CONFIG_RELOCATABLE
|
||||||
|
|
|
@ -1389,6 +1389,14 @@ void __cpuinit end_local_APIC_setup(void)
|
||||||
|
|
||||||
setup_apic_nmi_watchdog(NULL);
|
setup_apic_nmi_watchdog(NULL);
|
||||||
apic_pm_activate();
|
apic_pm_activate();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now that local APIC setup is completed for BP, configure the fault
|
||||||
|
* handling for interrupt remapping.
|
||||||
|
*/
|
||||||
|
if (!smp_processor_id() && intr_remapping_enabled)
|
||||||
|
enable_drhd_fault_handling();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_X2APIC
|
#ifdef CONFIG_X86_X2APIC
|
||||||
|
|
|
@ -2430,13 +2430,12 @@ static void ack_apic_level(struct irq_data *data)
|
||||||
{
|
{
|
||||||
struct irq_cfg *cfg = data->chip_data;
|
struct irq_cfg *cfg = data->chip_data;
|
||||||
int i, do_unmask_irq = 0, irq = data->irq;
|
int i, do_unmask_irq = 0, irq = data->irq;
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
|
||||||
unsigned long v;
|
unsigned long v;
|
||||||
|
|
||||||
irq_complete_move(cfg);
|
irq_complete_move(cfg);
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
/* If we are moving the irq we need to mask it */
|
/* If we are moving the irq we need to mask it */
|
||||||
if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
|
if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
|
||||||
do_unmask_irq = 1;
|
do_unmask_irq = 1;
|
||||||
mask_ioapic(cfg);
|
mask_ioapic(cfg);
|
||||||
}
|
}
|
||||||
|
@ -3413,6 +3412,7 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||||
msg.data |= MSI_DATA_VECTOR(cfg->vector);
|
msg.data |= MSI_DATA_VECTOR(cfg->vector);
|
||||||
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
|
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
|
||||||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||||
|
msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
|
||||||
|
|
||||||
dmar_msi_write(irq, &msg);
|
dmar_msi_write(irq, &msg);
|
||||||
|
|
||||||
|
|
|
@ -79,13 +79,6 @@ void __init default_setup_apic_routing(void)
|
||||||
/* need to update phys_pkg_id */
|
/* need to update phys_pkg_id */
|
||||||
apic->phys_pkg_id = apicid_phys_pkg_id;
|
apic->phys_pkg_id = apicid_phys_pkg_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Now that apic routing model is selected, configure the
|
|
||||||
* fault handling for intr remapping.
|
|
||||||
*/
|
|
||||||
if (intr_remapping_enabled)
|
|
||||||
enable_drhd_fault_handling();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Same for both flat and physical. */
|
/* Same for both flat and physical. */
|
||||||
|
|
|
@ -60,16 +60,18 @@
|
||||||
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
|
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Number of possible pages in the lowmem region */
|
||||||
|
LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
|
||||||
|
|
||||||
/* Enough space to fit pagetables for the low memory linear map */
|
/* Enough space to fit pagetables for the low memory linear map */
|
||||||
MAPPING_BEYOND_END = \
|
MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
|
||||||
PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Worst-case size of the kernel mapping we need to make:
|
* Worst-case size of the kernel mapping we need to make:
|
||||||
* the worst-case size of the kernel itself, plus the extra we need
|
* a relocatable kernel can live anywhere in lowmem, so we need to be able
|
||||||
* to map for the linear map.
|
* to map all of lowmem.
|
||||||
*/
|
*/
|
||||||
KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT
|
KERNEL_PAGES = LOWMEM_PAGES
|
||||||
|
|
||||||
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
|
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
|
||||||
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
|
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
|
||||||
|
|
|
@ -27,6 +27,9 @@
|
||||||
#define HPET_DEV_FSB_CAP 0x1000
|
#define HPET_DEV_FSB_CAP 0x1000
|
||||||
#define HPET_DEV_PERI_CAP 0x2000
|
#define HPET_DEV_PERI_CAP 0x2000
|
||||||
|
|
||||||
|
#define HPET_MIN_CYCLES 128
|
||||||
|
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
|
||||||
|
|
||||||
#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
|
#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -299,8 +302,9 @@ static void hpet_legacy_clockevent_register(void)
|
||||||
/* Calculate the min / max delta */
|
/* Calculate the min / max delta */
|
||||||
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
|
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
|
||||||
&hpet_clockevent);
|
&hpet_clockevent);
|
||||||
/* 5 usec minimum reprogramming delta. */
|
/* Setup minimum reprogramming delta. */
|
||||||
hpet_clockevent.min_delta_ns = 5000;
|
hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA,
|
||||||
|
&hpet_clockevent);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start hpet with the boot cpu mask and make it
|
* Start hpet with the boot cpu mask and make it
|
||||||
|
@ -393,22 +397,24 @@ static int hpet_next_event(unsigned long delta,
|
||||||
* the wraparound into account) nor a simple count down event
|
* the wraparound into account) nor a simple count down event
|
||||||
* mode. Further the write to the comparator register is
|
* mode. Further the write to the comparator register is
|
||||||
* delayed internally up to two HPET clock cycles in certain
|
* delayed internally up to two HPET clock cycles in certain
|
||||||
* chipsets (ATI, ICH9,10). We worked around that by reading
|
* chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
|
||||||
* back the compare register, but that required another
|
* longer delays. We worked around that by reading back the
|
||||||
* workaround for ICH9,10 chips where the first readout after
|
* compare register, but that required another workaround for
|
||||||
* write can return the old stale value. We already have a
|
* ICH9,10 chips where the first readout after write can
|
||||||
* minimum delta of 5us enforced, but a NMI or SMI hitting
|
* return the old stale value. We already had a minimum
|
||||||
|
* programming delta of 5us enforced, but a NMI or SMI hitting
|
||||||
* between the counter readout and the comparator write can
|
* between the counter readout and the comparator write can
|
||||||
* move us behind that point easily. Now instead of reading
|
* move us behind that point easily. Now instead of reading
|
||||||
* the compare register back several times, we make the ETIME
|
* the compare register back several times, we make the ETIME
|
||||||
* decision based on the following: Return ETIME if the
|
* decision based on the following: Return ETIME if the
|
||||||
* counter value after the write is less than 8 HPET cycles
|
* counter value after the write is less than HPET_MIN_CYCLES
|
||||||
* away from the event or if the counter is already ahead of
|
* away from the event or if the counter is already ahead of
|
||||||
* the event.
|
* the event. The minimum programming delta for the generic
|
||||||
|
* clockevents code is set to 1.5 * HPET_MIN_CYCLES.
|
||||||
*/
|
*/
|
||||||
res = (s32)(cnt - hpet_readl(HPET_COUNTER));
|
res = (s32)(cnt - hpet_readl(HPET_COUNTER));
|
||||||
|
|
||||||
return res < 8 ? -ETIME : 0;
|
return res < HPET_MIN_CYCLES ? -ETIME : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hpet_legacy_set_mode(enum clock_event_mode mode,
|
static void hpet_legacy_set_mode(enum clock_event_mode mode,
|
||||||
|
|
|
@ -394,7 +394,8 @@ static void __init setup_xstate_init(void)
|
||||||
* Setup init_xstate_buf to represent the init state of
|
* Setup init_xstate_buf to represent the init state of
|
||||||
* all the features managed by the xsave
|
* all the features managed by the xsave
|
||||||
*/
|
*/
|
||||||
init_xstate_buf = alloc_bootmem(xstate_size);
|
init_xstate_buf = alloc_bootmem_align(xstate_size,
|
||||||
|
__alignof__(struct xsave_struct));
|
||||||
init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
|
init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
|
||||||
|
|
||||||
clts();
|
clts();
|
||||||
|
|
|
@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
|
||||||
|
|
||||||
export CPPFLAGS_vdso.lds += -P -C
|
export CPPFLAGS_vdso.lds += -P -C
|
||||||
|
|
||||||
VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
|
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
|
||||||
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
|
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
|
||||||
|
|
||||||
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
|
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
|
||||||
|
@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y) += sysenter
|
||||||
vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
|
vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
|
||||||
|
|
||||||
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
|
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
|
||||||
VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
|
VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
|
||||||
|
|
||||||
# This makes sure the $(obj) subdirectory exists even though vdso32/
|
# This makes sure the $(obj) subdirectory exists even though vdso32/
|
||||||
# is not a kbuild sub-make subdirectory.
|
# is not a kbuild sub-make subdirectory.
|
||||||
|
|
|
@ -1417,6 +1417,11 @@ int __init enable_drhd_fault_handling(void)
|
||||||
(unsigned long long)drhd->reg_base_addr, ret);
|
(unsigned long long)drhd->reg_base_addr, ret);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear any previous faults.
|
||||||
|
*/
|
||||||
|
dmar_fault(iommu->irq, iommu);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -2767,6 +2767,29 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_m
|
||||||
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
|
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
|
||||||
#endif /*CONFIG_MMC_RICOH_MMC*/
|
#endif /*CONFIG_MMC_RICOH_MMC*/
|
||||||
|
|
||||||
|
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
|
||||||
|
#define VTUNCERRMSK_REG 0x1ac
|
||||||
|
#define VTD_MSK_SPEC_ERRORS (1 << 31)
|
||||||
|
/*
|
||||||
|
* This is a quirk for masking vt-d spec defined errors to platform error
|
||||||
|
* handling logic. With out this, platforms using Intel 7500, 5500 chipsets
|
||||||
|
* (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
|
||||||
|
* on the RAS config settings of the platform) when a vt-d fault happens.
|
||||||
|
* The resulting SMI caused the system to hang.
|
||||||
|
*
|
||||||
|
* VT-d spec related errors are already handled by the VT-d OS code, so no
|
||||||
|
* need to report the same error through other channels.
|
||||||
|
*/
|
||||||
|
static void vtd_mask_spec_errors(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
u32 word;
|
||||||
|
|
||||||
|
pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
|
||||||
|
pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
|
||||||
|
#endif
|
||||||
|
|
||||||
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
|
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
|
||||||
struct pci_fixup *end)
|
struct pci_fixup *end)
|
||||||
|
|
|
@ -105,6 +105,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||||
|
|
||||||
#define alloc_bootmem(x) \
|
#define alloc_bootmem(x) \
|
||||||
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||||
|
#define alloc_bootmem_align(x, align) \
|
||||||
|
__alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS))
|
||||||
#define alloc_bootmem_nopanic(x) \
|
#define alloc_bootmem_nopanic(x) \
|
||||||
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||||
#define alloc_bootmem_pages(x) \
|
#define alloc_bootmem_pages(x) \
|
||||||
|
|
|
@ -887,6 +887,7 @@ struct perf_cpu_context {
|
||||||
int exclusive;
|
int exclusive;
|
||||||
struct list_head rotation_list;
|
struct list_head rotation_list;
|
||||||
int jiffies_interval;
|
int jiffies_interval;
|
||||||
|
struct pmu *active_pmu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct perf_output_handle {
|
struct perf_output_handle {
|
||||||
|
|
|
@ -3824,6 +3824,8 @@ static void perf_event_task_event(struct perf_task_event *task_event)
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||||
|
if (cpuctx->active_pmu != pmu)
|
||||||
|
goto next;
|
||||||
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
||||||
|
|
||||||
ctx = task_event->task_ctx;
|
ctx = task_event->task_ctx;
|
||||||
|
@ -3959,6 +3961,8 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||||
|
if (cpuctx->active_pmu != pmu)
|
||||||
|
goto next;
|
||||||
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
|
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
|
||||||
|
|
||||||
ctxn = pmu->task_ctx_nr;
|
ctxn = pmu->task_ctx_nr;
|
||||||
|
@ -4144,6 +4148,8 @@ got_name:
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||||
|
if (cpuctx->active_pmu != pmu)
|
||||||
|
goto next;
|
||||||
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
|
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
|
||||||
vma->vm_flags & VM_EXEC);
|
vma->vm_flags & VM_EXEC);
|
||||||
|
|
||||||
|
@ -4713,7 +4719,7 @@ static int perf_swevent_init(struct perf_event *event)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event_id > PERF_COUNT_SW_MAX)
|
if (event_id >= PERF_COUNT_SW_MAX)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
if (!event->parent) {
|
if (!event->parent) {
|
||||||
|
@ -5145,20 +5151,36 @@ static void *find_pmu_context(int ctxn)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_pmu_context(void * __percpu cpu_context)
|
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
|
||||||
{
|
{
|
||||||
struct pmu *pmu;
|
int cpu;
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct perf_cpu_context *cpuctx;
|
||||||
|
|
||||||
|
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
||||||
|
|
||||||
|
if (cpuctx->active_pmu == old_pmu)
|
||||||
|
cpuctx->active_pmu = pmu;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void free_pmu_context(struct pmu *pmu)
|
||||||
|
{
|
||||||
|
struct pmu *i;
|
||||||
|
|
||||||
mutex_lock(&pmus_lock);
|
mutex_lock(&pmus_lock);
|
||||||
/*
|
/*
|
||||||
* Like a real lame refcount.
|
* Like a real lame refcount.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(pmu, &pmus, entry) {
|
list_for_each_entry(i, &pmus, entry) {
|
||||||
if (pmu->pmu_cpu_context == cpu_context)
|
if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
|
||||||
|
update_pmu_context(i, pmu);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free_percpu(cpu_context);
|
free_percpu(pmu->pmu_cpu_context);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&pmus_lock);
|
mutex_unlock(&pmus_lock);
|
||||||
}
|
}
|
||||||
|
@ -5190,6 +5212,7 @@ int perf_pmu_register(struct pmu *pmu)
|
||||||
cpuctx->ctx.pmu = pmu;
|
cpuctx->ctx.pmu = pmu;
|
||||||
cpuctx->jiffies_interval = 1;
|
cpuctx->jiffies_interval = 1;
|
||||||
INIT_LIST_HEAD(&cpuctx->rotation_list);
|
INIT_LIST_HEAD(&cpuctx->rotation_list);
|
||||||
|
cpuctx->active_pmu = pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
got_cpu_context:
|
got_cpu_context:
|
||||||
|
@ -5241,7 +5264,7 @@ void perf_pmu_unregister(struct pmu *pmu)
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|
||||||
free_percpu(pmu->pmu_disable_count);
|
free_percpu(pmu->pmu_disable_count);
|
||||||
free_pmu_context(pmu->pmu_cpu_context);
|
free_pmu_context(pmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pmu *perf_init_event(struct perf_event *event)
|
struct pmu *perf_init_event(struct perf_event *event)
|
||||||
|
|
|
@ -2338,11 +2338,19 @@ tracing_write_stub(struct file *filp, const char __user *ubuf,
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
|
||||||
|
{
|
||||||
|
if (file->f_mode & FMODE_READ)
|
||||||
|
return seq_lseek(file, offset, origin);
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct file_operations tracing_fops = {
|
static const struct file_operations tracing_fops = {
|
||||||
.open = tracing_open,
|
.open = tracing_open,
|
||||||
.read = seq_read,
|
.read = seq_read,
|
||||||
.write = tracing_write_stub,
|
.write = tracing_write_stub,
|
||||||
.llseek = seq_lseek,
|
.llseek = tracing_seek,
|
||||||
.release = tracing_release,
|
.release = tracing_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,7 @@ static uint_t (*Elf_r_sym)(Elf_Rel const *rp) = fn_ELF_R_SYM;
|
||||||
|
|
||||||
static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type)
|
static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type)
|
||||||
{
|
{
|
||||||
rp->r_info = ELF_R_INFO(sym, type);
|
rp->r_info = _w(ELF_R_INFO(sym, type));
|
||||||
}
|
}
|
||||||
static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO;
|
static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO;
|
||||||
|
|
||||||
|
|
|
@ -125,7 +125,9 @@ exuberant()
|
||||||
-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
|
-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
|
||||||
--extra=+f --c-kinds=-px \
|
--extra=+f --c-kinds=-px \
|
||||||
--regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \
|
--regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \
|
||||||
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/'
|
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
|
||||||
|
--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \
|
||||||
|
--regex-c++='/^DEFINE_EVENT\(([^,)]*).*/trace_\1/'
|
||||||
|
|
||||||
all_kconfigs | xargs $1 -a \
|
all_kconfigs | xargs $1 -a \
|
||||||
--langdef=kconfig --language-force=kconfig \
|
--langdef=kconfig --language-force=kconfig \
|
||||||
|
|
Loading…
Reference in New Issue