srcu: Add size-state transitioning code
This is just dead code at the moment, and will be used once the state-transition code is activated. Because srcu_barrier() must be aware of transition before call_srcu(), the state machine waits for an SRCU grace period before callbacks are queued to the non-CPU-0 queues. This requres that portions of srcu_barrier() be enclosed in an SRCU read-side critical section. Co-developed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com> Signed-off-by: Neeraj Upadhyay <quic_neeraju@quicinc.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
2ec303113d
commit
e2f638365d
|
@ -562,6 +562,7 @@ static void srcu_gp_end(struct srcu_struct *ssp)
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
struct srcu_data *sdp;
|
struct srcu_data *sdp;
|
||||||
struct srcu_node *snp;
|
struct srcu_node *snp;
|
||||||
|
int ss_state;
|
||||||
|
|
||||||
/* Prevent more than one additional grace period. */
|
/* Prevent more than one additional grace period. */
|
||||||
mutex_lock(&ssp->srcu_cb_mutex);
|
mutex_lock(&ssp->srcu_cb_mutex);
|
||||||
|
@ -629,6 +630,15 @@ static void srcu_gp_end(struct srcu_struct *ssp)
|
||||||
} else {
|
} else {
|
||||||
spin_unlock_irq_rcu_node(ssp);
|
spin_unlock_irq_rcu_node(ssp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Transition to big if needed. */
|
||||||
|
ss_state = smp_load_acquire(&ssp->srcu_size_state);
|
||||||
|
if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
|
||||||
|
if (ss_state == SRCU_SIZE_ALLOC)
|
||||||
|
init_srcu_struct_nodes(ssp);
|
||||||
|
else
|
||||||
|
smp_store_release(&ssp->srcu_size_state, ss_state + 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1178,6 +1188,7 @@ static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
|
||||||
void srcu_barrier(struct srcu_struct *ssp)
|
void srcu_barrier(struct srcu_struct *ssp)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
int idx;
|
||||||
unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
|
unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
|
||||||
|
|
||||||
check_init_srcu_struct(ssp);
|
check_init_srcu_struct(ssp);
|
||||||
|
@ -1193,11 +1204,13 @@ void srcu_barrier(struct srcu_struct *ssp)
|
||||||
/* Initial count prevents reaching zero until all CBs are posted. */
|
/* Initial count prevents reaching zero until all CBs are posted. */
|
||||||
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
|
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
|
||||||
|
|
||||||
|
idx = srcu_read_lock(ssp);
|
||||||
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
|
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
|
||||||
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
|
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
|
||||||
else
|
else
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
|
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
|
||||||
|
srcu_read_unlock(ssp, idx);
|
||||||
|
|
||||||
/* Remove the initial count, at which point reaching zero can happen. */
|
/* Remove the initial count, at which point reaching zero can happen. */
|
||||||
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
|
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
|
||||||
|
|
Loading…
Reference in New Issue