rcu/nocb: Allow lockless use of rcu_segcblist_empty()
Currently, rcu_segcblist_empty() assumes that the callback list is not being changed by other CPUs, but upcoming changes will require it to operate locklessly. This commit therefore adds the needed READ_ONCE() call, along with the WRITE_ONCE() calls when updating the callback list's ->head field. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
parent
76c6927c3e
commit
e6060b41c9
|
@ -213,7 +213,7 @@ void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
|
|||
if (!rcu_segcblist_ready_cbs(rsclp))
|
||||
return; /* Nothing to do. */
|
||||
*rclp->tail = rsclp->head;
|
||||
rsclp->head = *rsclp->tails[RCU_DONE_TAIL];
|
||||
WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
|
||||
WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
|
||||
rclp->tail = rsclp->tails[RCU_DONE_TAIL];
|
||||
for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
|
||||
|
@ -268,7 +268,7 @@ void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
|
|||
if (!rclp->head)
|
||||
return; /* No callbacks to move. */
|
||||
*rclp->tail = rsclp->head;
|
||||
rsclp->head = rclp->head;
|
||||
WRITE_ONCE(rsclp->head, rclp->head);
|
||||
for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
|
||||
if (&rsclp->head == rsclp->tails[i])
|
||||
WRITE_ONCE(rsclp->tails[i], rclp->tail);
|
||||
|
|
|
@ -36,7 +36,7 @@ struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
|
|||
*/
|
||||
static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
|
||||
{
|
||||
return !rsclp->head;
|
||||
return !READ_ONCE(rsclp->head);
|
||||
}
|
||||
|
||||
/* Return number of callbacks in segmented callback list. */
|
||||
|
|
Loading…
Reference in New Issue