rcu: Refactor rcu_barrier() empty-list handling
This commit saves a few lines by checking first for an empty callback list. If the callback list is empty, then that CPU is taken care of, regardless of its online or nocb state. Also simplify tracing accordingly and fold a few lines together. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
82980b1622
commit
0cabb47af3
|
@ -794,16 +794,15 @@ TRACE_EVENT_RCU(rcu_torture_read,
|
|||
* Tracepoint for rcu_barrier() execution. The string "s" describes
|
||||
* the rcu_barrier phase:
|
||||
* "Begin": rcu_barrier() started.
|
||||
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
|
||||
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
|
||||
* "Inc1": rcu_barrier() piggyback check counter incremented.
|
||||
* "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks.
|
||||
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
|
||||
* "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
|
||||
* "Inc2": rcu_barrier() piggyback check counter incremented.
|
||||
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
|
||||
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
|
||||
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
|
||||
* "LastCB": An rcu_barrier_callback() invoked the last callback.
|
||||
* "Inc2": rcu_barrier() piggyback check counter incremented.
|
||||
* "NQ": rcu_barrier() found a CPU with no callbacks.
|
||||
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
|
||||
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
|
||||
* is the count of remaining callbacks, and "done" is the piggybacking count.
|
||||
*/
|
||||
|
|
|
@ -4030,8 +4030,7 @@ void rcu_barrier(void)
|
|||
|
||||
/* Did someone else do our work for us? */
|
||||
if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
|
||||
rcu_barrier_trace(TPS("EarlyExit"), -1,
|
||||
rcu_state.barrier_sequence);
|
||||
rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
|
||||
smp_mb(); /* caller's subsequent code after above check. */
|
||||
mutex_unlock(&rcu_state.barrier_mutex);
|
||||
return;
|
||||
|
@ -4059,26 +4058,18 @@ void rcu_barrier(void)
|
|||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
if (cpu_is_offline(cpu) &&
|
||||
!rcu_rdp_is_offloaded(rdp))
|
||||
if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
|
||||
rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
|
||||
continue;
|
||||
if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
|
||||
rcu_barrier_trace(TPS("OnlineQ"), cpu,
|
||||
rcu_state.barrier_sequence);
|
||||
}
|
||||
if (cpu_online(cpu)) {
|
||||
rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
|
||||
smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
|
||||
} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
|
||||
cpu_is_offline(cpu)) {
|
||||
rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
|
||||
rcu_state.barrier_sequence);
|
||||
} else {
|
||||
rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
|
||||
local_irq_disable();
|
||||
rcu_barrier_func((void *)cpu);
|
||||
local_irq_enable();
|
||||
} else if (cpu_is_offline(cpu)) {
|
||||
rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
|
||||
rcu_state.barrier_sequence);
|
||||
} else {
|
||||
rcu_barrier_trace(TPS("OnlineNQ"), cpu,
|
||||
rcu_state.barrier_sequence);
|
||||
}
|
||||
}
|
||||
cpus_read_unlock();
|
||||
|
|
Loading…
Reference in New Issue