Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf: Validate cpu early in perf_event_alloc()
  perf: Find_get_context: fix the per-cpu-counter check
  perf: Fix contexted inheritance
This commit is contained in:
Linus Torvalds 2011-01-18 14:29:37 -08:00
commit 335bc70b6b
1 changed files with 12 additions and 11 deletions

View File

@ -2228,14 +2228,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
unsigned long flags; unsigned long flags;
int ctxn, err; int ctxn, err;
if (!task && cpu != -1) { if (!task) {
/* Must be root to operate on a CPU event: */ /* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES); return ERR_PTR(-EACCES);
if (cpu < 0 || cpu >= nr_cpumask_bits)
return ERR_PTR(-EINVAL);
/* /*
* We could be clever and allow to attach a event to an * We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but * offline CPU and activate it when the CPU comes up, but
@ -5541,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
long err; long err;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(*event), GFP_KERNEL); event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event) if (!event)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -5589,7 +5591,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (!overflow_handler && parent_event) if (!overflow_handler && parent_event)
overflow_handler = parent_event->overflow_handler; overflow_handler = parent_event->overflow_handler;
event->overflow_handler = overflow_handler; event->overflow_handler = overflow_handler;
if (attr->disabled) if (attr->disabled)
@ -6494,7 +6496,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
raw_spin_lock_irqsave(&parent_ctx->lock, flags); raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0; parent_ctx->rotate_disable = 0;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
child_ctx = child->perf_event_ctxp[ctxn]; child_ctx = child->perf_event_ctxp[ctxn];
@ -6502,12 +6503,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
/* /*
* Mark the child context as a clone of the parent * Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of. * context, or of whatever the parent is a clone of.
* Note that if the parent is a clone, it could get *
* uncloned at any point, but that doesn't matter * Note that if the parent is a clone, the holding of
* because the list of events and the generation * parent_ctx->lock avoids it from being uncloned.
* count can't have changed since we took the mutex.
*/ */
cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) { if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx; child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen; child_ctx->parent_gen = parent_ctx->parent_gen;
@ -6518,6 +6518,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
get_ctx(child_ctx->parent_ctx); get_ctx(child_ctx->parent_ctx);
} }
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex); mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx); perf_unpin_context(parent_ctx);