Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Two fixes:

   - Fix 'NMI handler took too long to run' false positives

     [ Genuine NMI overhead speedups will come for v3.13, this commit
       only fixes a measurement bug ]

   - Fix perf ring-buffer missed barrier causing (rare) ring-buffer data
     corruption on ppc64"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86: Fix NMI measurements
  perf: Fix perf ring buffer memory ordering
This commit is contained in:
Linus Torvalds 2013-11-01 12:54:51 -07:00
commit 9581b7d268
4 changed files with 39 additions and 14 deletions

View File

@ -1276,16 +1276,16 @@ void perf_events_lapic_init(void)
static int __kprobes
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
int ret;
u64 start_clock;
u64 finish_clock;
int ret;
if (!atomic_read(&active_events))
return NMI_DONE;
start_clock = local_clock();
start_clock = sched_clock();
ret = x86_pmu.handle_irq(regs);
finish_clock = local_clock();
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);

View File

@ -113,10 +113,10 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
u64 before, delta, whole_msecs;
int remainder_ns, decimal_msecs, thishandled;
before = local_clock();
before = sched_clock();
thishandled = a->handler(type, regs);
handled += thishandled;
delta = local_clock() - before;
delta = sched_clock() - before;
trace_nmi_handler(a->handler, (int)delta, thishandled);
if (delta < nmi_longest_ns)

View File

@ -456,13 +456,15 @@ struct perf_event_mmap_page {
/*
* Control data for the mmap() data buffer.
*
* User-space reading the @data_head value should issue an rmb(), on
* SMP capable platforms, after reading this value -- see
* perf_event_wakeup().
* User-space reading the @data_head value should issue an smp_rmb(),
* after reading this value.
*
* When the mapping is PROT_WRITE the @data_tail value should be
* written by userspace to reflect the last read data. In this case
* the kernel will not over-write unread data.
* written by userspace to reflect the last read data, after issueing
* an smp_mb() to separate the data read from the ->data_tail store.
* In this case the kernel will not over-write unread data.
*
* See perf_output_put_handle() for the data ordering.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */

View File

@ -87,10 +87,31 @@ again:
goto out;
/*
* Publish the known good head. Rely on the full barrier implied
* by atomic_dec_and_test() order the rb->head read and this
* write.
* Since the mmap() consumer (userspace) can run on a different CPU:
*
* kernel user
*
* READ ->data_tail READ ->data_head
* smp_mb() (A) smp_rmb() (C)
* WRITE $data READ $data
* smp_wmb() (B) smp_mb() (D)
* STORE ->data_head WRITE ->data_tail
*
* Where A pairs with D, and B pairs with C.
*
* I don't think A needs to be a full barrier because we won't in fact
* write data until we see the store from userspace. So we simply don't
* issue the data WRITE until we observe it. Be conservative for now.
*
* OTOH, D needs to be a full barrier since it separates the data READ
* from the tail WRITE.
*
* For B a WMB is sufficient since it separates two WRITEs, and for C
* an RMB is sufficient since it separates two READs.
*
* See perf_output_begin().
*/
smp_wmb();
rb->user_page->data_head = head;
/*
@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle,
* Userspace could choose to issue a mb() before updating the
* tail pointer. So that all reads will be completed before the
* write is issued.
*
* See perf_output_put_handle().
*/
tail = ACCESS_ONCE(rb->user_page->data_tail);
smp_rmb();
smp_mb();
offset = head = local_read(&rb->head);
head += size;
if (unlikely(!perf_output_space(rb, tail, offset, head)))