rcu: Add polled expedited grace-period primitives
This commit adds expedited grace-period functionality to RCU's polled grace-period API, adding start_poll_synchronize_rcu_expedited() and cond_synchronize_rcu_expedited(), which are similar to the existing start_poll_synchronize_rcu() and cond_synchronize_rcu() functions, respectively. Note that although start_poll_synchronize_rcu_expedited() can be invoked very early, the resulting expedited grace periods are not guaranteed to start until after workqueues are fully initialized. On the other hand, both synchronize_rcu() and synchronize_rcu_expedited() can also be invoked very early, and the resulting grace periods will be taken into account as they occur. [ paulmck: Apply feedback from Neeraj Upadhyay. ] Link: https://lore.kernel.org/all/20220121142454.1994916-1-bfoster@redhat.com/ Link: https://docs.google.com/document/d/1RNKWW9jQyfjxw2E8dsXVTdvZYh0HnYeSHDKog9jhdN8/edit?usp=sharing Cc: Brian Foster <bfoster@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Ian Kent <raven@themaw.net> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
e4333cb20f
commit
d96c52fe49
|
@ -23,6 +23,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
|
|||
might_sleep();
|
||||
}
|
||||
|
||||
static inline unsigned long start_poll_synchronize_rcu_expedited(void)
|
||||
{
|
||||
return start_poll_synchronize_rcu();
|
||||
}
|
||||
|
||||
static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
|
||||
{
|
||||
cond_synchronize_rcu(oldstate);
|
||||
}
|
||||
|
||||
extern void rcu_barrier(void);
|
||||
|
||||
static inline void synchronize_rcu_expedited(void)
|
||||
|
|
|
@ -40,6 +40,8 @@ bool rcu_eqs_special_set(int cpu);
|
|||
void rcu_momentary_dyntick_idle(void);
|
||||
void kfree_rcu_scheduler_running(void);
|
||||
bool rcu_gp_might_be_stalled(void);
|
||||
unsigned long start_poll_synchronize_rcu_expedited(void);
|
||||
void cond_synchronize_rcu_expedited(unsigned long oldstate);
|
||||
unsigned long get_state_synchronize_rcu(void);
|
||||
unsigned long start_poll_synchronize_rcu(void);
|
||||
bool poll_state_synchronize_rcu(unsigned long oldstate);
|
||||
|
|
|
@ -4027,20 +4027,20 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
|
|||
/**
|
||||
* cond_synchronize_rcu - Conditionally wait for an RCU grace period
|
||||
*
|
||||
* @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
|
||||
* @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
|
||||
*
|
||||
* If a full RCU grace period has elapsed since the earlier call to
|
||||
* get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
|
||||
* Otherwise, invoke synchronize_rcu() to wait for a full grace period.
|
||||
*
|
||||
* Yes, this function does not take counter wrap into account. But
|
||||
* counter wrap is harmless. If the counter wraps, we have waited for
|
||||
* Yes, this function does not take counter wrap into account.
|
||||
* But counter wrap is harmless. If the counter wraps, we have waited for
|
||||
* more than 2 billion grace periods (and way more on a 64-bit system!),
|
||||
* so waiting for one additional grace period should be just fine.
|
||||
* so waiting for a couple of additional grace periods should be just fine.
|
||||
*
|
||||
* This function provides the same memory-ordering guarantees that
|
||||
* would be provided by a synchronize_rcu() that was invoked at the call
|
||||
* to the function that provided @oldstate, and that returned at the end
|
||||
* to the function that provided @oldstate and that returned at the end
|
||||
* of this function.
|
||||
*/
|
||||
void cond_synchronize_rcu(unsigned long oldstate)
|
||||
|
@ -4793,6 +4793,9 @@ static void __init rcu_init_one(void)
|
|||
init_waitqueue_head(&rnp->exp_wq[3]);
|
||||
spin_lock_init(&rnp->exp_lock);
|
||||
mutex_init(&rnp->boost_kthread_mutex);
|
||||
raw_spin_lock_init(&rnp->exp_poll_lock);
|
||||
rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
|
||||
INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5018,6 +5021,10 @@ void __init rcu_init(void)
|
|||
qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
|
||||
else
|
||||
qovld_calc = qovld;
|
||||
|
||||
// Kick-start any polled grace periods that started early.
|
||||
if (!(per_cpu_ptr(&rcu_data, cpu)->mynode->exp_seq_poll_rq & 0x1))
|
||||
(void)start_poll_synchronize_rcu_expedited();
|
||||
}
|
||||
|
||||
#include "tree_stall.h"
|
||||
|
|
|
@ -133,6 +133,10 @@ struct rcu_node {
|
|||
wait_queue_head_t exp_wq[4];
|
||||
struct rcu_exp_work rew;
|
||||
bool exp_need_flush; /* Need to flush workitem? */
|
||||
raw_spinlock_t exp_poll_lock;
|
||||
/* Lock and data for polled expedited grace periods. */
|
||||
unsigned long exp_seq_poll_rq;
|
||||
struct work_struct exp_poll_wq;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
/*
|
||||
|
@ -484,3 +488,6 @@ static void rcu_iw_handler(struct irq_work *iwp);
|
|||
static void check_cpu_stall(struct rcu_data *rdp);
|
||||
static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
|
||||
const unsigned long gpssdelay);
|
||||
|
||||
/* Forward declarations for tree_exp.h. */
|
||||
static void sync_rcu_do_polled_gp(struct work_struct *wp);
|
||||
|
|
|
@ -962,3 +962,88 @@ void synchronize_rcu_expedited(void)
|
|||
synchronize_rcu_expedited_destroy_work(&rew);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
|
||||
/*
|
||||
* Ensure that start_poll_synchronize_rcu_expedited() has the expedited
|
||||
* RCU grace periods that it needs.
|
||||
*/
|
||||
static void sync_rcu_do_polled_gp(struct work_struct *wp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
|
||||
unsigned long s;
|
||||
|
||||
raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
|
||||
s = rnp->exp_seq_poll_rq;
|
||||
rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
|
||||
raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
|
||||
if (s == RCU_GET_STATE_COMPLETED)
|
||||
return;
|
||||
while (!poll_state_synchronize_rcu(s))
|
||||
synchronize_rcu_expedited();
|
||||
raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
|
||||
s = rnp->exp_seq_poll_rq;
|
||||
if (poll_state_synchronize_rcu(s))
|
||||
rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
|
||||
raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
|
||||
*
|
||||
* Returns a cookie to pass to a call to cond_synchronize_rcu(),
|
||||
* cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
|
||||
* allowing them to determine whether or not any sort of grace period has
|
||||
* elapsed in the meantime. If the needed expedited grace period is not
|
||||
* already slated to start, initiates that grace period.
|
||||
*/
|
||||
unsigned long start_poll_synchronize_rcu_expedited(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp;
|
||||
unsigned long s;
|
||||
|
||||
s = get_state_synchronize_rcu();
|
||||
rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
|
||||
rnp = rdp->mynode;
|
||||
if (rcu_init_invoked())
|
||||
raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
|
||||
if (!poll_state_synchronize_rcu(s)) {
|
||||
rnp->exp_seq_poll_rq = s;
|
||||
if (rcu_init_invoked())
|
||||
queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
|
||||
}
|
||||
if (rcu_init_invoked())
|
||||
raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
|
||||
|
||||
return s;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
|
||||
|
||||
/**
|
||||
* cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
|
||||
*
|
||||
* @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
|
||||
*
|
||||
* If any type of full RCU grace period has elapsed since the earlier
|
||||
* call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
|
||||
* or start_poll_synchronize_rcu_expedited(), just return. Otherwise,
|
||||
* invoke synchronize_rcu_expedited() to wait for a full grace period.
|
||||
*
|
||||
* Yes, this function does not take counter wrap into account.
|
||||
* But counter wrap is harmless. If the counter wraps, we have waited for
|
||||
* more than 2 billion grace periods (and way more on a 64-bit system!),
|
||||
* so waiting for a couple of additional grace periods should be just fine.
|
||||
*
|
||||
* This function provides the same memory-ordering guarantees that
|
||||
* would be provided by a synchronize_rcu() that was invoked at the call
|
||||
* to the function that provided @oldstate and that returned at the end
|
||||
* of this function.
|
||||
*/
|
||||
void cond_synchronize_rcu_expedited(unsigned long oldstate)
|
||||
{
|
||||
if (!poll_state_synchronize_rcu(oldstate))
|
||||
synchronize_rcu_expedited();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
|
||||
|
|
Loading…
Reference in New Issue