rcu: Move rcu_cpu_started per-CPU variable to rcu_data
When the rcu_cpu_started per-CPU variable was added by commit
f64c6013a2
("rcu/x86: Provide early rcu_cpu_starting() callback"),
there were multiple sets of per-CPU rcu_data structures. Therefore, the
rcu_cpu_started flag was added as a separate per-CPU variable. But now
there is only one set of per-CPU rcu_data structures, so this commit
moves rcu_cpu_started to a new ->cpu_started field in that structure.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
df9a30fd1f
commit
c0f97f20e5
|
@ -3967,8 +3967,6 @@ int rcutree_offline_cpu(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, rcu_cpu_started);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark the specified CPU as being online so that subsequent grace periods
|
* Mark the specified CPU as being online so that subsequent grace periods
|
||||||
* (both expedited and normal) will wait on it. Note that this means that
|
* (both expedited and normal) will wait on it. Note that this means that
|
||||||
|
@ -3988,12 +3986,11 @@ void rcu_cpu_starting(unsigned int cpu)
|
||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
bool newcpu;
|
bool newcpu;
|
||||||
|
|
||||||
if (per_cpu(rcu_cpu_started, cpu))
|
|
||||||
return;
|
|
||||||
|
|
||||||
per_cpu(rcu_cpu_started, cpu) = 1;
|
|
||||||
|
|
||||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
if (rdp->cpu_started)
|
||||||
|
return;
|
||||||
|
rdp->cpu_started = true;
|
||||||
|
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
mask = rdp->grpmask;
|
mask = rdp->grpmask;
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
|
@ -4053,7 +4050,7 @@ void rcu_report_dead(unsigned int cpu)
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
raw_spin_unlock(&rcu_state.ofl_lock);
|
raw_spin_unlock(&rcu_state.ofl_lock);
|
||||||
|
|
||||||
per_cpu(rcu_cpu_started, cpu) = 0;
|
rdp->cpu_started = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -156,6 +156,7 @@ struct rcu_data {
|
||||||
bool beenonline; /* CPU online at least once. */
|
bool beenonline; /* CPU online at least once. */
|
||||||
bool gpwrap; /* Possible ->gp_seq wrap. */
|
bool gpwrap; /* Possible ->gp_seq wrap. */
|
||||||
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
|
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
|
||||||
|
bool cpu_started; /* RCU watching this onlining CPU. */
|
||||||
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
||||||
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
||||||
unsigned long ticks_this_gp; /* The number of scheduling-clock */
|
unsigned long ticks_this_gp; /* The number of scheduling-clock */
|
||||||
|
|
Loading…
Reference in New Issue