call_function_many: fix list delete vs add race
Peter pointed out there was nothing preventing the list_del_rcu in smp_call_function_interrupt from running before the list_add_rcu in smp_call_function_many. Fix this by not setting refs until we have gotten the lock for the list. Take advantage of the wmb in list_add_rcu to save an explicit additional one. I tried to force this race with a udelay before the lock & list_add and by mixing all 64 online cpus with just 3 random cpus in the mask, but was unsuccessful. Still, inspection shows a valid race, and the fix is a extension of the existing protection window in the current code. Cc: stable@kernel.org (v2.6.32 and later) Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ef2b4b95a6
commit
e6cd1e07a1
20
kernel/smp.c
20
kernel/smp.c
|
@ -491,14 +491,15 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||||
cpumask_clear_cpu(this_cpu, data->cpumask);
|
cpumask_clear_cpu(this_cpu, data->cpumask);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To ensure the interrupt handler gets an complete view
|
* We reuse the call function data without waiting for any grace
|
||||||
* we order the cpumask and refs writes and order the read
|
* period after some other cpu removes it from the global queue.
|
||||||
* of them in the interrupt handler. In addition we may
|
* This means a cpu might find our data block as it is writen.
|
||||||
* only clear our own cpu bit from the mask.
|
* The interrupt handler waits until it sees refs filled out
|
||||||
|
* while its cpu mask bit is set; here we may only clear our
|
||||||
|
* own cpu mask bit, and must wait to set refs until we are sure
|
||||||
|
* previous writes are complete and we have obtained the lock to
|
||||||
|
* add the element to the queue.
|
||||||
*/
|
*/
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
atomic_set(&data->refs, cpumask_weight(data->cpumask));
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&call_function.lock, flags);
|
raw_spin_lock_irqsave(&call_function.lock, flags);
|
||||||
/*
|
/*
|
||||||
|
@ -507,6 +508,11 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||||
* will not miss any other list entries:
|
* will not miss any other list entries:
|
||||||
*/
|
*/
|
||||||
list_add_rcu(&data->csd.list, &call_function.queue);
|
list_add_rcu(&data->csd.list, &call_function.queue);
|
||||||
|
/*
|
||||||
|
* We rely on the wmb() in list_add_rcu to order the writes
|
||||||
|
* to func, data, and cpumask before this write to refs.
|
||||||
|
*/
|
||||||
|
atomic_set(&data->refs, cpumask_weight(data->cpumask));
|
||||||
raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue