rcu: Avoid signed integer overflow in rcu_preempt_deferred_qs()

Subtracting INT_MIN can be interpreted as unconditional signed integer
overflow, which according to the C standard is undefined behavior.
Therefore, kernel build arguments notwithstanding, it would be good to
future-proof the code.  This commit therefore substitutes INT_MAX for
INT_MIN in order to avoid undefined behavior.

While in the neighborhood, this commit also creates some meaningful names
for INT_MAX and friends in order to improve readability, as suggested
by Joel Fernandes.

Reported-by: Ran Rozenstein <ranro@mellanox.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
Paul E. McKenney 2018-10-29 07:36:50 -07:00
parent 117f683c6e
commit 5f1a6ef374
1 changed files with 13 additions and 8 deletions

View File

@ -397,6 +397,11 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return rnp->gp_tasks != NULL; return rnp->gp_tasks != NULL;
} }
/* Bias and limit values for ->rcu_read_lock_nesting. */
#define RCU_NEST_BIAS INT_MAX
#define RCU_NEST_NMAX (-INT_MAX / 2)
#define RCU_NEST_PMAX (INT_MAX / 2)
/* /*
* Preemptible RCU implementation for rcu_read_lock(). * Preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated * Just increment ->rcu_read_lock_nesting, shared state will be updated
@ -405,6 +410,8 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
void __rcu_read_lock(void) void __rcu_read_lock(void)
{ {
current->rcu_read_lock_nesting++; current->rcu_read_lock_nesting++;
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
barrier(); /* critical section after entry code. */ barrier(); /* critical section after entry code. */
} }
EXPORT_SYMBOL_GPL(__rcu_read_lock); EXPORT_SYMBOL_GPL(__rcu_read_lock);
@ -424,20 +431,18 @@ void __rcu_read_unlock(void)
--t->rcu_read_lock_nesting; --t->rcu_read_lock_nesting;
} else { } else {
barrier(); /* critical section before exit code. */ barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN; t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
barrier(); /* assign before ->rcu_read_unlock_special load */ barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t); rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */ barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0; t->rcu_read_lock_nesting = 0;
} }
#ifdef CONFIG_PROVE_LOCKING if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
{ int rrln = t->rcu_read_lock_nesting;
int rrln = READ_ONCE(t->rcu_read_lock_nesting);
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
} }
#endif /* #ifdef CONFIG_PROVE_LOCKING */
} }
EXPORT_SYMBOL_GPL(__rcu_read_unlock); EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@ -617,11 +622,11 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
if (!rcu_preempt_need_deferred_qs(t)) if (!rcu_preempt_need_deferred_qs(t))
return; return;
if (couldrecurse) if (couldrecurse)
t->rcu_read_lock_nesting -= INT_MIN; t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
local_irq_save(flags); local_irq_save(flags);
rcu_preempt_deferred_qs_irqrestore(t, flags); rcu_preempt_deferred_qs_irqrestore(t, flags);
if (couldrecurse) if (couldrecurse)
t->rcu_read_lock_nesting += INT_MIN; t->rcu_read_lock_nesting += RCU_NEST_BIAS;
} }
/* /*