rcu: Stop disabling CPU hotplug in synchronize_rcu_expedited()
The fact that tasks could be migrated from leaf to root rcu_node structures meant that synchronize_rcu_expedited() had to disable CPU hotplug. However, tasks now stay put, so this commit removes the CPU-hotplug disabling from synchronize_rcu_expedited(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
13bd64947f
commit
9b68387450
|
@ -727,20 +727,6 @@ void synchronize_rcu_expedited(void)
|
|||
snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
|
||||
smp_mb(); /* Above access cannot bleed into critical section. */
|
||||
|
||||
/*
|
||||
* Block CPU-hotplug operations. This means that any CPU-hotplug
|
||||
* operation that finds an rcu_node structure with tasks in the
|
||||
* process of being boosted will know that all tasks blocking
|
||||
* this expedited grace period will already be in the process of
|
||||
* being boosted. This simplifies the process of moving tasks
|
||||
* from leaf to root rcu_node structures.
|
||||
*/
|
||||
if (!try_get_online_cpus()) {
|
||||
/* CPU-hotplug operation in flight, fall back to normal GP. */
|
||||
wait_rcu_gp(call_rcu);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire lock, falling back to synchronize_rcu() if too many
|
||||
* lock-acquisition failures. Of course, if someone does the
|
||||
|
@ -748,22 +734,17 @@ void synchronize_rcu_expedited(void)
|
|||
*/
|
||||
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
|
||||
if (ULONG_CMP_LT(snap,
|
||||
READ_ONCE(sync_rcu_preempt_exp_count))) {
|
||||
put_online_cpus();
|
||||
READ_ONCE(sync_rcu_preempt_exp_count)))
|
||||
goto mb_ret; /* Others did our work for us. */
|
||||
}
|
||||
if (trycount++ < 10) {
|
||||
udelay(trycount * num_online_cpus());
|
||||
} else {
|
||||
put_online_cpus();
|
||||
wait_rcu_gp(call_rcu);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
|
||||
put_online_cpus();
|
||||
if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count)))
|
||||
goto unlock_mb_ret; /* Others did our work for us. */
|
||||
}
|
||||
|
||||
/* force all RCU readers onto ->blkd_tasks lists. */
|
||||
synchronize_sched_expedited();
|
||||
|
@ -779,8 +760,6 @@ void synchronize_rcu_expedited(void)
|
|||
rcu_for_each_leaf_node(rsp, rnp)
|
||||
sync_rcu_preempt_exp_init2(rsp, rnp);
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
/* Wait for snapshotted ->blkd_tasks lists to drain. */
|
||||
rnp = rcu_get_root(rsp);
|
||||
wait_event(sync_rcu_preempt_exp_wq,
|
||||
|
|
Loading…
Reference in New Issue