rcu/nocb: Prepare state machine for a new step

Currently SEGCBLIST_SOFTIRQ_ONLY is a bit of an exception among the
segcblist flags because it is an exclusive state that doesn't mix up
with the other flags. Remove it in favour of:

_ A flag specifying that rcu_core() needs to perform callbacks execution
  and acceleration

and

_ A flag specifying we want the nocb lock to be held in any needed
  circumstances

This clarifies the code and is more flexible: It allows to have a state
where rcu_core() runs with locking while offloading hasn't started yet.
This is a necessary step to prepare for triggering rcu_core() at the
very beginning of the de-offloading process so that rcu_core() won't
dismiss work while being preempted by the de-offloading process, at
least not without a pending subsequent rcu_core() that will quickly
catch up.

Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
Tested-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Frederic Weisbecker 2021-10-19 02:08:07 +02:00 committed by Paul E. McKenney
parent 118e0d4a1b
commit 213d56bf33
5 changed files with 50 additions and 31 deletions

View File

@ -69,7 +69,7 @@ struct rcu_cblist {
* *
* *
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* | SEGCBLIST_SOFTIRQ_ONLY | * | SEGCBLIST_RCU_CORE |
* | | * | |
* | Callbacks processed by rcu_core() from softirqs or local | * | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, without holding nocb_lock. | * | rcuc kthread, without holding nocb_lock. |
@ -77,7 +77,7 @@ struct rcu_cblist {
* | * |
* v * v
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* | SEGCBLIST_OFFLOADED | * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
* | | * | |
* | Callbacks processed by rcu_core() from softirqs or local | * | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, | * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
@ -89,7 +89,9 @@ struct rcu_cblist {
* | | * | |
* v v * v v
* --------------------------------------- ----------------------------------| * --------------------------------------- ----------------------------------|
* | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP | * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
* | | | | * | | | |
* | | | | * | | | |
@ -104,9 +106,10 @@ struct rcu_cblist {
* | * |
* v * v
* |--------------------------------------------------------------------------| * |--------------------------------------------------------------------------|
* | SEGCBLIST_OFFLOADED | | * | SEGCBLIST_LOCKING | |
* | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP | * | SEGCBLIST_KTHREAD_GP | |
* | SEGCBLIST_KTHREAD_CB |
* | | * | |
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
* | handling callbacks. Enable bypass queueing. | * | handling callbacks. Enable bypass queueing. |
@ -120,7 +123,8 @@ struct rcu_cblist {
* *
* *
* |--------------------------------------------------------------------------| * |--------------------------------------------------------------------------|
* | SEGCBLIST_OFFLOADED | | * | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP | * | SEGCBLIST_KTHREAD_GP |
* | | * | |
@ -130,6 +134,8 @@ struct rcu_cblist {
* | * |
* v * v
* |--------------------------------------------------------------------------| * |--------------------------------------------------------------------------|
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP | * | SEGCBLIST_KTHREAD_GP |
* | | * | |
@ -143,7 +149,9 @@ struct rcu_cblist {
* | | * | |
* v v * v v
* ---------------------------------------------------------------------------| * ---------------------------------------------------------------------------|
* | | * | | |
* | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
* | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
* | | | * | | |
* | GP kthread woke up and | CB kthread woke up and | * | GP kthread woke up and | CB kthread woke up and |
@ -159,7 +167,7 @@ struct rcu_cblist {
* | * |
* v * v
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* | 0 | * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
* | | * | |
* | Callbacks processed by rcu_core() from softirqs or local | * | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. | * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
@ -168,17 +176,18 @@ struct rcu_cblist {
* | * |
* v * v
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* | SEGCBLIST_SOFTIRQ_ONLY | * | SEGCBLIST_RCU_CORE |
* | | * | |
* | Callbacks processed by rcu_core() from softirqs or local | * | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, without holding nocb_lock. | * | rcuc kthread, without holding nocb_lock. |
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
*/ */
#define SEGCBLIST_ENABLED BIT(0) #define SEGCBLIST_ENABLED BIT(0)
#define SEGCBLIST_SOFTIRQ_ONLY BIT(1) #define SEGCBLIST_RCU_CORE BIT(1)
#define SEGCBLIST_KTHREAD_CB BIT(2) #define SEGCBLIST_LOCKING BIT(2)
#define SEGCBLIST_KTHREAD_GP BIT(3) #define SEGCBLIST_KTHREAD_CB BIT(3)
#define SEGCBLIST_OFFLOADED BIT(4) #define SEGCBLIST_KTHREAD_GP BIT(4)
#define SEGCBLIST_OFFLOADED BIT(5)
struct rcu_segcblist { struct rcu_segcblist {
struct rcu_head *head; struct rcu_head *head;

View File

@ -261,14 +261,14 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
} }
/* /*
* Mark the specified rcu_segcblist structure as offloaded. * Mark the specified rcu_segcblist structure as offloaded (or not)
*/ */
void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
{ {
if (offload) { if (offload) {
rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED);
rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED);
} else { } else {
rcu_segcblist_set_flags(rsclp, SEGCBLIST_RCU_CORE);
rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
} }
} }

View File

@ -80,11 +80,14 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED); return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
} }
/* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */ /*
* Is the specified rcu_segcblist NOCB offloaded (or in the middle of the
* [de]offloading process)?
*/
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
{ {
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
!rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY)) rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING))
return true; return true;
return false; return false;
@ -92,9 +95,8 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp) static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
{ {
int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
!rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE))
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags)
return true; return true;
return false; return false;

View File

@ -79,7 +79,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
.dynticks = ATOMIC_INIT(1), .dynticks = ATOMIC_INIT(1),
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
.cblist.flags = SEGCBLIST_SOFTIRQ_ONLY, .cblist.flags = SEGCBLIST_RCU_CORE,
#endif #endif
}; };
static struct rcu_state rcu_state = { static struct rcu_state rcu_state = {

View File

@ -1000,12 +1000,12 @@ static long rcu_nocb_rdp_deoffload(void *arg)
*/ */
rcu_nocb_lock_irqsave(rdp, flags); rcu_nocb_lock_irqsave(rdp, flags);
/* /*
* Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
* lock is released but how about being paranoid for once? * lock is released but how about being paranoid for once?
*/ */
rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY); rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING);
/* /*
* With SEGCBLIST_SOFTIRQ_ONLY, we can't use * Without SEGCBLIST_LOCKING, we can't use
* rcu_nocb_unlock_irqrestore() anymore. * rcu_nocb_unlock_irqrestore() anymore.
*/ */
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
@ -1058,14 +1058,14 @@ static long rcu_nocb_rdp_offload(void *arg)
pr_info("Offloading %d\n", rdp->cpu); pr_info("Offloading %d\n", rdp->cpu);
/* /*
* Can't use rcu_nocb_lock_irqsave() while we are in * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
* SEGCBLIST_SOFTIRQ_ONLY mode. * is set.
*/ */
raw_spin_lock_irqsave(&rdp->nocb_lock, flags); raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
/* /*
* We didn't take the nocb lock while working on the * We didn't take the nocb lock while working on the
* rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode. * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode).
* Every modifications that have been done previously on * Every modifications that have been done previously on
* rdp->cblist must be visible remotely by the nocb kthreads * rdp->cblist must be visible remotely by the nocb kthreads
* upon wake up after reading the cblist flags. * upon wake up after reading the cblist flags.
@ -1084,6 +1084,14 @@ static long rcu_nocb_rdp_offload(void *arg)
rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) && rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
/*
* All kthreads are ready to work, we can finally relieve rcu_core() and
* enable nocb bypass.
*/
rcu_nocb_lock_irqsave(rdp, flags);
rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE);
rcu_nocb_unlock_irqrestore(rdp, flags);
return ret; return ret;
} }
@ -1154,8 +1162,8 @@ void __init rcu_init_nohz(void)
if (rcu_segcblist_empty(&rdp->cblist)) if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist); rcu_segcblist_init(&rdp->cblist);
rcu_segcblist_offload(&rdp->cblist, true); rcu_segcblist_offload(&rdp->cblist, true);
rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP); rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
} }
rcu_organize_nocb_kthreads(); rcu_organize_nocb_kthreads();
} }