rcu: Avoid resched_cpu() when rescheduling the current CPU
The resched_cpu() interface is quite handy, but it does acquire the specified CPU's runqueue lock, which does not come for free. This commit therefore substitutes the following when directing resched_cpu() at the current CPU: set_tsk_need_resched(current); set_preempt_need_resched(); Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
parent
d3052109c0
commit
fced9c8cfe
|
@ -1354,7 +1354,8 @@ static void print_cpu_stall(void)
|
|||
* progress and it could be we're stuck in kernel space without context
|
||||
* switches for an entirely unreasonable amount of time.
|
||||
*/
|
||||
resched_cpu(smp_processor_id());
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
|
||||
static void check_cpu_stall(struct rcu_data *rdp)
|
||||
|
@ -2675,10 +2676,12 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
|
|||
WARN_ON_ONCE(!rdp->beenonline);
|
||||
|
||||
/* Report any deferred quiescent states if preemption enabled. */
|
||||
if (!(preempt_count() & PREEMPT_MASK))
|
||||
if (!(preempt_count() & PREEMPT_MASK)) {
|
||||
rcu_preempt_deferred_qs(current);
|
||||
else if (rcu_preempt_need_deferred_qs(current))
|
||||
resched_cpu(rdp->cpu); /* Provoke future context switch. */
|
||||
} else if (rcu_preempt_need_deferred_qs(current)) {
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
|
||||
/* Update RCU state based on any recent quiescent states. */
|
||||
rcu_check_quiescent_state(rdp);
|
||||
|
|
|
@ -672,7 +672,8 @@ static void sync_rcu_exp_handler(void *unused)
|
|||
rcu_report_exp_rdp(rdp);
|
||||
} else {
|
||||
rdp->deferred_qs = true;
|
||||
resched_cpu(rdp->cpu);
|
||||
set_tsk_need_resched(t);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -710,15 +711,16 @@ static void sync_rcu_exp_handler(void *unused)
|
|||
* because we are in an interrupt handler, which will cause that
|
||||
* function to take an early exit without doing anything.
|
||||
*
|
||||
* Otherwise, use resched_cpu() to force a context switch after
|
||||
* the CPU enables everything.
|
||||
* Otherwise, force a context switch after the CPU enables everything.
|
||||
*/
|
||||
rdp->deferred_qs = true;
|
||||
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
||||
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()))
|
||||
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
|
||||
rcu_preempt_deferred_qs(t);
|
||||
else
|
||||
resched_cpu(rdp->cpu);
|
||||
} else {
|
||||
set_tsk_need_resched(t);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
}
|
||||
|
||||
/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
|
||||
|
@ -779,7 +781,8 @@ static void sync_sched_exp_handler(void *unused)
|
|||
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
|
||||
/* Store .exp before .rcu_urgent_qs. */
|
||||
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
|
||||
resched_cpu(smp_processor_id());
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
|
||||
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
|
||||
|
|
|
@ -791,8 +791,10 @@ static void rcu_flavor_check_callbacks(int user)
|
|||
if (t->rcu_read_lock_nesting > 0 ||
|
||||
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
|
||||
/* No QS, force context switch if deferred. */
|
||||
if (rcu_preempt_need_deferred_qs(t))
|
||||
resched_cpu(smp_processor_id());
|
||||
if (rcu_preempt_need_deferred_qs(t)) {
|
||||
set_tsk_need_resched(t);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
} else if (rcu_preempt_need_deferred_qs(t)) {
|
||||
rcu_preempt_deferred_qs(t); /* Report deferred QS. */
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue