rcu: Switch urgent quiescent-state requests to rcu_data structure

This commit removes ->rcu_need_heavy_qs and ->rcu_urgent_qs from the
rcu_dynticks structure and updates the code to access them from the
rcu_data structure.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2018-08-03 21:00:38 -07:00
parent c458a89e96
commit 2dba13f0b6
4 changed files with 14 additions and 16 deletions

View File

@ -362,7 +362,7 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void)
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
int special; int special;
raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false); raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
/* It is illegal to call this from idle state. */ /* It is illegal to call this from idle state. */
WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
@ -928,7 +928,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
cpu = task_cpu(t); cpu = task_cpu(t);
if (!task_curr(t)) if (!task_curr(t))
return; /* This task is not running on that CPU. */ return; /* This task is not running on that CPU. */
smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true); smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
} }
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
@ -1081,8 +1081,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* is set way high. * is set way high.
*/ */
jtsq = READ_ONCE(jiffies_to_sched_qs); jtsq = READ_ONCE(jiffies_to_sched_qs);
ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
if (!READ_ONCE(*rnhqp) && if (!READ_ONCE(*rnhqp) &&
(time_after(jiffies, rcu_state.gp_start + jtsq * 2) || (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
time_after(jiffies, rcu_state.jiffies_resched))) { time_after(jiffies, rcu_state.jiffies_resched))) {
@ -2499,13 +2499,13 @@ void rcu_check_callbacks(int user)
trace_rcu_utilization(TPS("Start scheduler-tick")); trace_rcu_utilization(TPS("Start scheduler-tick"));
raw_cpu_inc(rcu_data.ticks_this_gp); raw_cpu_inc(rcu_data.ticks_this_gp);
/* The load-acquire pairs with the store-release setting to true. */ /* The load-acquire pairs with the store-release setting to true. */
if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
/* Idle and userspace execution already are quiescent states. */ /* Idle and userspace execution already are quiescent states. */
if (!rcu_is_cpu_rrupt_from_idle() && !user) { if (!rcu_is_cpu_rrupt_from_idle() && !user) {
set_tsk_need_resched(current); set_tsk_need_resched(current);
set_preempt_need_resched(); set_preempt_need_resched();
} }
__this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); __this_cpu_write(rcu_data.rcu_urgent_qs, false);
} }
rcu_flavor_check_callbacks(user); rcu_flavor_check_callbacks(user);
if (rcu_pending()) if (rcu_pending())

View File

@ -41,8 +41,6 @@ struct rcu_dynticks {
long dynticks_nesting; /* Track process nesting level. */ long dynticks_nesting; /* Track process nesting level. */
long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */ atomic_t dynticks; /* Even value for idle, else odd. */
bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
bool rcu_urgent_qs; /* GP old need light quiescent state. */
}; };
/* Communicate arguments to a workqueue handler. */ /* Communicate arguments to a workqueue handler. */

View File

@ -780,7 +780,7 @@ static void sync_sched_exp_handler(void *unused)
} }
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */ /* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
set_tsk_need_resched(current); set_tsk_need_resched(current);
set_preempt_need_resched(); set_preempt_need_resched();
} }

View File

@ -967,17 +967,17 @@ void rcu_all_qs(void)
{ {
unsigned long flags; unsigned long flags;
if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs)) if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
return; return;
preempt_disable(); preempt_disable();
/* Load rcu_urgent_qs before other flags. */ /* Load rcu_urgent_qs before other flags. */
if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
preempt_enable(); preempt_enable();
return; return;
} }
this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); this_cpu_write(rcu_data.rcu_urgent_qs, false);
barrier(); /* Avoid RCU read-side critical sections leaking down. */ barrier(); /* Avoid RCU read-side critical sections leaking down. */
if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) { if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
local_irq_save(flags); local_irq_save(flags);
rcu_momentary_dyntick_idle(); rcu_momentary_dyntick_idle();
local_irq_restore(flags); local_irq_restore(flags);
@ -997,10 +997,10 @@ void rcu_note_context_switch(bool preempt)
trace_rcu_utilization(TPS("Start context switch")); trace_rcu_utilization(TPS("Start context switch"));
rcu_qs(); rcu_qs();
/* Load rcu_urgent_qs before other flags. */ /* Load rcu_urgent_qs before other flags. */
if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
goto out; goto out;
this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
rcu_momentary_dyntick_idle(); rcu_momentary_dyntick_idle();
if (!preempt) if (!preempt)
rcu_tasks_qs(current); rcu_tasks_qs(current);