rcu: Update comments to reflect softirqs vs. kthreads
We now have kthreads only for flavors of RCU that support boosting, so update the now-misleading comments accordingly. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
72fe701b70
commit
e0f23060ad
|
@ -198,7 +198,7 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|||
};
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
|
||||
static int blimit = 10; /* Maximum callbacks per softirq. */
|
||||
static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
|
||||
static int qhimark = 10000; /* If this many pending, ignore blimit. */
|
||||
static int qlowmark = 100; /* Once only this many pending, use blimit. */
|
||||
|
||||
|
@ -1261,7 +1261,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Re-raise the RCU softirq if there are callbacks remaining. */
|
||||
/* Re-invoke RCU core processing if there are callbacks remaining. */
|
||||
if (cpu_has_callbacks_ready_to_invoke(rdp))
|
||||
invoke_rcu_core();
|
||||
}
|
||||
|
@ -1269,7 +1269,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
/*
|
||||
* Check to see if this CPU is in a non-context-switch quiescent state
|
||||
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
|
||||
* Also schedule the RCU softirq handler.
|
||||
* Also schedule RCU core processing.
|
||||
*
|
||||
* This function must be called with hardirqs disabled. It is normally
|
||||
* invoked from the scheduling-clock interrupt. If rcu_pending returns
|
||||
|
@ -1448,9 +1448,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
|
|||
#endif /* #else #ifdef CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* This does the RCU processing work from softirq context for the
|
||||
* specified rcu_state and rcu_data structures. This may be called
|
||||
* only from the CPU to whom the rdp belongs.
|
||||
* This does the RCU core processing work for the specified rcu_state
|
||||
* and rcu_data structures. This may be called only from the CPU to
|
||||
* whom the rdp belongs.
|
||||
*/
|
||||
static void
|
||||
__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
|
@ -1487,7 +1487,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
}
|
||||
|
||||
/*
|
||||
* Do softirq processing for the current CPU.
|
||||
* Do RCU core processing for the current CPU.
|
||||
*/
|
||||
static void rcu_process_callbacks(struct softirq_action *unused)
|
||||
{
|
||||
|
@ -1503,10 +1503,11 @@ static void rcu_process_callbacks(struct softirq_action *unused)
|
|||
}
|
||||
|
||||
/*
|
||||
* Wake up the current CPU's kthread. This replaces raise_softirq()
|
||||
* in earlier versions of RCU. Note that because we are running on
|
||||
* the current CPU with interrupts disabled, the rcu_cpu_kthread_task
|
||||
* cannot disappear out from under us.
|
||||
* Schedule RCU callback invocation. If the specified type of RCU
|
||||
* does not support RCU priority boosting, just do a direct call,
|
||||
* otherwise wake up the per-CPU kernel kthread. Note that because we
|
||||
* are running on the current CPU with interrupts disabled, the
|
||||
* rcu_cpu_kthread_task cannot disappear out from under us.
|
||||
*/
|
||||
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
|
|
|
@ -1478,7 +1478,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
|
|||
|
||||
/*
|
||||
* Per-CPU kernel thread that invokes RCU callbacks. This replaces the
|
||||
* earlier RCU softirq.
|
||||
* RCU softirq used in flavors and configurations of RCU that do not
|
||||
* support RCU priority boosting.
|
||||
*/
|
||||
static int rcu_cpu_kthread(void *arg)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue