rcu: Eliminate a few RCU_BOOST #ifdefs in favor of IS_ENABLED()

This commit removes a few RCU_BOOST #ifdefs, replacing them with
IS_ENABLED()-protected return statements.  This relies on the
optimizer to remove any resulting dead code.  There are several other
RCU_BOOST #ifdefs, however these rely on some per-CPU variables that
are available only under RCU_BOOST.  These might be converted later,
if the simplification proves to outweigh the increase in memory footprint.
One hoped-for advantage is more easily locating compiler errors in
obscure combinations of Kconfig parameters.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: <linux-rt-users@vger.kernel.org>
This commit is contained in:
Paul E. McKenney 2015-03-03 14:49:26 -08:00
parent e63c887cfe
commit 727b705baf
2 changed files with 20 additions and 17 deletions

View File

@ -170,7 +170,6 @@ struct rcu_node {
/* if there is no such task. If there */ /* if there is no such task. If there */
/* is no current expedited grace period, */ /* is no current expedited grace period, */
/* then there can cannot be any such task. */ /* then there can cannot be any such task. */
#ifdef CONFIG_RCU_BOOST
struct list_head *boost_tasks; struct list_head *boost_tasks;
/* Pointer to first task that needs to be */ /* Pointer to first task that needs to be */
/* priority boosted, or NULL if no priority */ /* priority boosted, or NULL if no priority */
@ -208,7 +207,6 @@ struct rcu_node {
unsigned long n_balk_nos; unsigned long n_balk_nos;
/* Refused to boost: not sure why, though. */ /* Refused to boost: not sure why, though. */
/* This can happen due to race conditions. */ /* This can happen due to race conditions. */
#endif /* #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
wait_queue_head_t nocb_gp_wq[2]; wait_queue_head_t nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */ /* Place for rcu_nocb_kthread() to wait GP. */

View File

@ -43,7 +43,17 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work); DEFINE_PER_CPU(char, rcu_cpu_has_work);
#endif /* #ifdef CONFIG_RCU_BOOST */ #else /* #ifdef CONFIG_RCU_BOOST */
/*
* Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
* all uses are in dead code. Provide a definition to keep the compiler
* happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
* This probably needs to be excluded from -rt builds.
*/
#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
#endif /* #else #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
@ -180,10 +190,9 @@ static void rcu_preempt_note_context_switch(void)
if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
rnp->gp_tasks = &t->rcu_node_entry; rnp->gp_tasks = &t->rcu_node_entry;
#ifdef CONFIG_RCU_BOOST if (IS_ENABLED(CONFIG_RCU_BOOST) &&
if (rnp->boost_tasks != NULL) rnp->boost_tasks != NULL)
rnp->boost_tasks = rnp->gp_tasks; rnp->boost_tasks = rnp->gp_tasks;
#endif /* #ifdef CONFIG_RCU_BOOST */
} else { } else {
list_add(&t->rcu_node_entry, &rnp->blkd_tasks); list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
if (rnp->qsmask & rdp->grpmask) if (rnp->qsmask & rdp->grpmask)
@ -263,9 +272,7 @@ void rcu_read_unlock_special(struct task_struct *t)
bool empty_exp_now; bool empty_exp_now;
unsigned long flags; unsigned long flags;
struct list_head *np; struct list_head *np;
#ifdef CONFIG_RCU_BOOST
bool drop_boost_mutex = false; bool drop_boost_mutex = false;
#endif /* #ifdef CONFIG_RCU_BOOST */
struct rcu_node *rnp; struct rcu_node *rnp;
union rcu_special special; union rcu_special special;
@ -331,12 +338,12 @@ void rcu_read_unlock_special(struct task_struct *t)
rnp->gp_tasks = np; rnp->gp_tasks = np;
if (&t->rcu_node_entry == rnp->exp_tasks) if (&t->rcu_node_entry == rnp->exp_tasks)
rnp->exp_tasks = np; rnp->exp_tasks = np;
#ifdef CONFIG_RCU_BOOST if (IS_ENABLED(CONFIG_RCU_BOOST)) {
if (&t->rcu_node_entry == rnp->boost_tasks) if (&t->rcu_node_entry == rnp->boost_tasks)
rnp->boost_tasks = np; rnp->boost_tasks = np;
/* Snapshot ->boost_mtx ownership with rcu_node lock held. */ /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
#endif /* #ifdef CONFIG_RCU_BOOST */ }
/* /*
* If this was the last task on the current list, and if * If this was the last task on the current list, and if
@ -358,11 +365,9 @@ void rcu_read_unlock_special(struct task_struct *t)
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */ /* Unboost if we were boosted. */
if (drop_boost_mutex) if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
rt_mutex_unlock(&rnp->boost_mtx); rt_mutex_unlock(&rnp->boost_mtx);
#endif /* #ifdef CONFIG_RCU_BOOST */
/* /*
* If this was the last task on the expedited lists, * If this was the last task on the expedited lists,