rcu: Repurpose no-CBs event tracing to future-GP events

Dyntick-idle CPUs need to be able to pre-announce their need for grace
periods.  This can be done using something similar to the mechanism used
by no-CB CPUs to announce their need for grace periods.  This commit
moves in this direction by renaming the no-CBs grace-period event tracing
to suit the new future-grace-period needs.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2012-12-29 21:51:20 -08:00 committed by Paul E. McKenney
parent b92db6cb7e
commit bd9f0686fc
2 changed files with 40 additions and 38 deletions

View File

@ -72,10 +72,10 @@ TRACE_EVENT(rcu_grace_period,
); );
/* /*
* Tracepoint for no-callbacks grace-period events. The caller should * Tracepoint for future grace-period events, including those for no-callbacks
* pull the data from the rcu_node structure, other than rcuname, which * CPUs. The caller should pull the data from the rcu_node structure,
* comes from the rcu_state structure, and event, which is one of the * other than rcuname, which comes from the rcu_state structure, and event,
* following: * which is one of the following:
* *
* "Startleaf": Request a nocb grace period based on leaf-node data. * "Startleaf": Request a nocb grace period based on leaf-node data.
* "Startedleaf": Leaf-node start proved sufficient. * "Startedleaf": Leaf-node start proved sufficient.
@ -87,7 +87,7 @@ TRACE_EVENT(rcu_grace_period,
* "Cleanup": Clean up rcu_node structure after previous GP. * "Cleanup": Clean up rcu_node structure after previous GP.
* "CleanupMore": Clean up, and another no-CB GP is needed. * "CleanupMore": Clean up, and another no-CB GP is needed.
*/ */
TRACE_EVENT(rcu_nocb_grace_period, TRACE_EVENT(rcu_future_grace_period,
TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long completed, TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long completed,
unsigned long c, u8 level, int grplo, int grphi, unsigned long c, u8 level, int grplo, int grphi,
@ -653,9 +653,9 @@ TRACE_EVENT(rcu_barrier,
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \ #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
qsmask) do { } while (0) qsmask) do { } while (0)
#define trace_rcu_nocb_grace_period(rcuname, gpnum, completed, c, \ #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \ level, grplo, grphi, event) \
do { } while (0) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \

View File

@ -2034,9 +2034,9 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
wake_up_all(&rnp->nocb_gp_wq[c & 0x1]); wake_up_all(&rnp->nocb_gp_wq[c & 0x1]);
rnp->n_nocb_gp_requests[c & 0x1] = 0; rnp->n_nocb_gp_requests[c & 0x1] = 0;
needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1]; needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1];
trace_rcu_nocb_grace_period(rsp->name, rnp->gpnum, rnp->completed, trace_rcu_future_grace_period(rsp->name, rnp->gpnum, rnp->completed,
c, rnp->level, rnp->grplo, rnp->grphi, c, rnp->level, rnp->grplo, rnp->grphi,
needmore ? "CleanupMore" : "Cleanup"); needmore ? "CleanupMore" : "Cleanup");
return needmore; return needmore;
} }
@ -2183,9 +2183,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
/* Count our request for a grace period. */ /* Count our request for a grace period. */
rnp->n_nocb_gp_requests[c & 0x1]++; rnp->n_nocb_gp_requests[c & 0x1]++;
trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed, trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
c, rnp->level, rnp->grplo, rnp->grphi, rnp->completed, c, rnp->level,
"Startleaf"); rnp->grplo, rnp->grphi, "Startleaf");
if (rnp->gpnum != rnp->completed) { if (rnp->gpnum != rnp->completed) {
@ -2194,10 +2194,10 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* is in progress, so we are done. When this grace * is in progress, so we are done. When this grace
* period ends, our request will be acted upon. * period ends, our request will be acted upon.
*/ */
trace_rcu_nocb_grace_period(rdp->rsp->name, trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
rnp->gpnum, rnp->completed, c, rnp->completed, c, rnp->level,
rnp->level, rnp->grplo, rnp->grphi, rnp->grplo, rnp->grphi,
"Startedleaf"); "Startedleaf");
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else { } else {
@ -2209,11 +2209,12 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
if (rnp != rnp_root) if (rnp != rnp_root)
raw_spin_lock(&rnp_root->lock); /* irqs disabled. */ raw_spin_lock(&rnp_root->lock); /* irqs disabled. */
if (rnp_root->gpnum != rnp_root->completed) { if (rnp_root->gpnum != rnp_root->completed) {
trace_rcu_nocb_grace_period(rdp->rsp->name, trace_rcu_future_grace_period(rdp->rsp->name,
rnp->gpnum, rnp->completed, rnp->gpnum,
c, rnp->level, rnp->completed,
rnp->grplo, rnp->grphi, c, rnp->level,
"Startedleafroot"); rnp->grplo, rnp->grphi,
"Startedleafroot");
raw_spin_unlock(&rnp_root->lock); /* irqs disabled. */ raw_spin_unlock(&rnp_root->lock); /* irqs disabled. */
} else { } else {
@ -2229,11 +2230,12 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
c = rnp_root->completed + 1; c = rnp_root->completed + 1;
rnp->n_nocb_gp_requests[c & 0x1]++; rnp->n_nocb_gp_requests[c & 0x1]++;
rnp_root->n_nocb_gp_requests[c & 0x1]++; rnp_root->n_nocb_gp_requests[c & 0x1]++;
trace_rcu_nocb_grace_period(rdp->rsp->name, trace_rcu_future_grace_period(rdp->rsp->name,
rnp->gpnum, rnp->completed, rnp->gpnum,
c, rnp->level, rnp->completed,
rnp->grplo, rnp->grphi, c, rnp->level,
"Startedroot"); rnp->grplo, rnp->grphi,
"Startedroot");
local_save_flags(flags1); local_save_flags(flags1);
rcu_start_gp(rdp->rsp, flags1); /* Rlses ->lock. */ rcu_start_gp(rdp->rsp, flags1); /* Rlses ->lock. */
} }
@ -2249,9 +2251,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* Wait for the grace period. Do so interruptibly to avoid messing * Wait for the grace period. Do so interruptibly to avoid messing
* up the load average. * up the load average.
*/ */
trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed, trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
c, rnp->level, rnp->grplo, rnp->grphi, rnp->completed, c, rnp->level,
"StartWait"); rnp->grplo, rnp->grphi, "StartWait");
for (;;) { for (;;) {
wait_event_interruptible( wait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1], rnp->nocb_gp_wq[c & 0x1],
@ -2259,14 +2261,14 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
if (likely(d)) if (likely(d))
break; break;
flush_signals(current); flush_signals(current);
trace_rcu_nocb_grace_period(rdp->rsp->name, trace_rcu_future_grace_period(rdp->rsp->name,
rnp->gpnum, rnp->completed, c, rnp->gpnum, rnp->completed, c,
rnp->level, rnp->grplo, rnp->grphi, rnp->level, rnp->grplo,
"ResumeWait"); rnp->grphi, "ResumeWait");
} }
trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed, trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
c, rnp->level, rnp->grplo, rnp->grphi, rnp->completed, c, rnp->level,
"EndWait"); rnp->grplo, rnp->grphi, "EndWait");
smp_mb(); /* Ensure that CB invocation happens after GP end. */ smp_mb(); /* Ensure that CB invocation happens after GP end. */
} }