cpumask: convert RCU implementations
Impact: use new cpumask API. rcu_ctrlblk contains a cpumask, and it's highly optimized so I don't want a cpumask_var_t (ie. a pointer) for the CONFIG_CPUMASK_OFFSTACK case. It could use a dangling bitmap, and be allocated in __rcu_init to save memory, but for the moment we use a bitmap. (Eventually 'struct cpumask' will be undefined for CONFIG_CPUMASK_OFFSTACK, so we use a bitmap here to show we really mean it). We remove on-stack cpumasks, using cpumask_var_t for rcu_torture_shuffle_tasks() and for_each_cpu_and in force_quiescent_state(). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
d036e67b40
commit
bd232f97b3
|
@ -59,8 +59,8 @@ struct rcu_ctrlblk {
|
|||
int signaled;
|
||||
|
||||
spinlock_t lock ____cacheline_internodealigned_in_smp;
|
||||
cpumask_t cpumask; /* CPUs that need to switch in order */
|
||||
/* for current batch to proceed. */
|
||||
DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
|
||||
/* current batch to proceed. */
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
/* Is batch a before batch b ? */
|
||||
|
|
|
@ -63,14 +63,14 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
|
|||
.completed = -300,
|
||||
.pending = -300,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
|
||||
.cpumask = CPU_MASK_NONE,
|
||||
.cpumask = CPU_BITS_NONE,
|
||||
};
|
||||
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
|
||||
.cur = -300,
|
||||
.completed = -300,
|
||||
.pending = -300,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
|
||||
.cpumask = CPU_MASK_NONE,
|
||||
.cpumask = CPU_BITS_NONE,
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
|
||||
|
@ -85,7 +85,6 @@ static void force_quiescent_state(struct rcu_data *rdp,
|
|||
struct rcu_ctrlblk *rcp)
|
||||
{
|
||||
int cpu;
|
||||
cpumask_t cpumask;
|
||||
unsigned long flags;
|
||||
|
||||
set_need_resched();
|
||||
|
@ -96,10 +95,10 @@ static void force_quiescent_state(struct rcu_data *rdp,
|
|||
* Don't send IPI to itself. With irqs disabled,
|
||||
* rdp->cpu is the current cpu.
|
||||
*
|
||||
* cpu_online_map is updated by the _cpu_down()
|
||||
* cpu_online_mask is updated by the _cpu_down()
|
||||
* using __stop_machine(). Since we're in irqs disabled
|
||||
* section, __stop_machine() is not exectuting, hence
|
||||
* the cpu_online_map is stable.
|
||||
* the cpu_online_mask is stable.
|
||||
*
|
||||
* However, a cpu might have been offlined _just_ before
|
||||
* we disabled irqs while entering here.
|
||||
|
@ -107,13 +106,14 @@ static void force_quiescent_state(struct rcu_data *rdp,
|
|||
* notification, leading to the offlined cpu's bit
|
||||
* being set in the rcp->cpumask.
|
||||
*
|
||||
* Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
|
||||
* Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
|
||||
* sending smp_reschedule() to an offlined CPU.
|
||||
*/
|
||||
cpus_and(cpumask, rcp->cpumask, cpu_online_map);
|
||||
cpu_clear(rdp->cpu, cpumask);
|
||||
for_each_cpu_mask_nr(cpu, cpumask)
|
||||
smp_send_reschedule(cpu);
|
||||
for_each_cpu_and(cpu,
|
||||
to_cpumask(rcp->cpumask), cpu_online_mask) {
|
||||
if (cpu != rdp->cpu)
|
||||
smp_send_reschedule(cpu);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&rcp->lock, flags);
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
|
|||
|
||||
printk(KERN_ERR "INFO: RCU detected CPU stalls:");
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu_isset(cpu, rcp->cpumask))
|
||||
if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
|
||||
printk(" %d", cpu);
|
||||
}
|
||||
printk(" (detected by %d, t=%ld jiffies)\n",
|
||||
|
@ -221,7 +221,8 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
|
|||
long delta;
|
||||
|
||||
delta = jiffies - rcp->jiffies_stall;
|
||||
if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
|
||||
delta >= 0) {
|
||||
|
||||
/* We haven't checked in, so go dump stack. */
|
||||
print_cpu_stall(rcp);
|
||||
|
@ -393,7 +394,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
|
|||
* unnecessarily.
|
||||
*/
|
||||
smp_mb();
|
||||
cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
|
||||
cpumask_andnot(to_cpumask(rcp->cpumask),
|
||||
cpu_online_mask, &nohz_cpu_mask);
|
||||
|
||||
rcp->signaled = 0;
|
||||
}
|
||||
|
@ -406,8 +408,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
|
|||
*/
|
||||
static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
|
||||
{
|
||||
cpu_clear(cpu, rcp->cpumask);
|
||||
if (cpus_empty(rcp->cpumask)) {
|
||||
cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
|
||||
if (cpumask_empty(to_cpumask(rcp->cpumask))) {
|
||||
/* batch completed ! */
|
||||
rcp->completed = rcp->cur;
|
||||
rcu_start_batch(rcp);
|
||||
|
|
|
@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] =
|
|||
{ "idle", "waitack", "waitzero", "waitmb" };
|
||||
#endif /* #ifdef CONFIG_RCU_TRACE */
|
||||
|
||||
static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE;
|
||||
static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly
|
||||
= CPU_BITS_NONE;
|
||||
|
||||
/*
|
||||
* Enum and per-CPU flag to determine when each CPU has seen
|
||||
|
@ -758,7 +759,7 @@ rcu_try_flip_idle(void)
|
|||
|
||||
/* Now ask each CPU for acknowledgement of the flip. */
|
||||
|
||||
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
|
||||
for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
|
||||
per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
|
||||
dyntick_save_progress_counter(cpu);
|
||||
}
|
||||
|
@ -776,7 +777,7 @@ rcu_try_flip_waitack(void)
|
|||
int cpu;
|
||||
|
||||
RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
|
||||
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
|
||||
for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
|
||||
if (rcu_try_flip_waitack_needed(cpu) &&
|
||||
per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
|
||||
RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
|
||||
|
@ -808,7 +809,7 @@ rcu_try_flip_waitzero(void)
|
|||
/* Check to see if the sum of the "last" counters is zero. */
|
||||
|
||||
RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
|
||||
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
|
||||
for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
|
||||
sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
|
||||
if (sum != 0) {
|
||||
RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
|
||||
|
@ -823,7 +824,7 @@ rcu_try_flip_waitzero(void)
|
|||
smp_mb(); /* ^^^^^^^^^^^^ */
|
||||
|
||||
/* Call for a memory barrier from each CPU. */
|
||||
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
|
||||
for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
|
||||
per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
|
||||
dyntick_save_progress_counter(cpu);
|
||||
}
|
||||
|
@ -843,7 +844,7 @@ rcu_try_flip_waitmb(void)
|
|||
int cpu;
|
||||
|
||||
RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
|
||||
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
|
||||
for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
|
||||
if (rcu_try_flip_waitmb_needed(cpu) &&
|
||||
per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
|
||||
RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
|
||||
|
@ -1032,7 +1033,7 @@ void rcu_offline_cpu(int cpu)
|
|||
RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
|
||||
RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
|
||||
|
||||
cpu_clear(cpu, rcu_cpu_online_map);
|
||||
cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
|
||||
|
||||
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
||||
|
||||
|
@ -1072,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu)
|
|||
struct rcu_data *rdp;
|
||||
|
||||
spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
|
||||
cpu_set(cpu, rcu_cpu_online_map);
|
||||
cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
|
||||
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
||||
|
||||
/*
|
||||
|
@ -1430,7 +1431,7 @@ void __init __rcu_init(void)
|
|||
* We don't need protection against CPU-Hotplug here
|
||||
* since
|
||||
* a) If a CPU comes online while we are iterating over the
|
||||
* cpu_online_map below, we would only end up making a
|
||||
* cpu_online_mask below, we would only end up making a
|
||||
* duplicate call to rcu_online_cpu() which sets the corresponding
|
||||
* CPU's mask in the rcu_cpu_online_map.
|
||||
*
|
||||
|
|
|
@ -868,49 +868,52 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
|
|||
*/
|
||||
static void rcu_torture_shuffle_tasks(void)
|
||||
{
|
||||
cpumask_t tmp_mask;
|
||||
cpumask_var_t tmp_mask;
|
||||
int i;
|
||||
|
||||
cpus_setall(tmp_mask);
|
||||
if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
|
||||
BUG();
|
||||
|
||||
cpumask_setall(tmp_mask);
|
||||
get_online_cpus();
|
||||
|
||||
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
||||
if (num_online_cpus() == 1) {
|
||||
put_online_cpus();
|
||||
return;
|
||||
}
|
||||
if (num_online_cpus() == 1)
|
||||
goto out;
|
||||
|
||||
if (rcu_idle_cpu != -1)
|
||||
cpu_clear(rcu_idle_cpu, tmp_mask);
|
||||
cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
|
||||
|
||||
set_cpus_allowed_ptr(current, &tmp_mask);
|
||||
set_cpus_allowed_ptr(current, tmp_mask);
|
||||
|
||||
if (reader_tasks) {
|
||||
for (i = 0; i < nrealreaders; i++)
|
||||
if (reader_tasks[i])
|
||||
set_cpus_allowed_ptr(reader_tasks[i],
|
||||
&tmp_mask);
|
||||
tmp_mask);
|
||||
}
|
||||
|
||||
if (fakewriter_tasks) {
|
||||
for (i = 0; i < nfakewriters; i++)
|
||||
if (fakewriter_tasks[i])
|
||||
set_cpus_allowed_ptr(fakewriter_tasks[i],
|
||||
&tmp_mask);
|
||||
tmp_mask);
|
||||
}
|
||||
|
||||
if (writer_task)
|
||||
set_cpus_allowed_ptr(writer_task, &tmp_mask);
|
||||
set_cpus_allowed_ptr(writer_task, tmp_mask);
|
||||
|
||||
if (stats_task)
|
||||
set_cpus_allowed_ptr(stats_task, &tmp_mask);
|
||||
set_cpus_allowed_ptr(stats_task, tmp_mask);
|
||||
|
||||
if (rcu_idle_cpu == -1)
|
||||
rcu_idle_cpu = num_online_cpus() - 1;
|
||||
else
|
||||
rcu_idle_cpu--;
|
||||
|
||||
out:
|
||||
put_online_cpus();
|
||||
free_cpumask_var(tmp_mask);
|
||||
}
|
||||
|
||||
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
|
||||
|
|
Loading…
Reference in New Issue