rcu: Complain if blocking in preemptible RCU read-side critical section
Although preemptible RCU allows its read-side critical sections to be preempted, general blocking is forbidden. The reason for this is that excessive preemption times can be handled by CONFIG_RCU_BOOST=y, but a voluntarily blocked task doesn't care how high you boost its priority. Because preemptible RCU is a global mechanism, one ill-behaved reader hurts everyone. Hence the prohibition against general blocking in RCU-preempt read-side critical sections. Preemption yes, blocking no. This commit enforces this prohibition. There is a special exception for the -rt patchset (which they kindly volunteered to implement): It is OK to block (as opposed to merely being preempted) within an RCU-preempt read-side critical section, but only if the blocking is subject to priority inheritance. This exception permits CONFIG_RCU_BOOST=y to get -rt RCU readers out of trouble. Why doesn't this exception also apply to mainline's rt_mutex? Because of the possibility that someone does general blocking while holding an rt_mutex. Yes, the priority boosting will affect the rt_mutex, but it won't help with the task doing general blocking while holding that rt_mutex. Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
881ec9d209
commit
5b72f9643b
|
@ -478,7 +478,7 @@ void rcu_note_context_switch(bool preempt)
|
|||
barrier(); /* Avoid RCU read-side critical sections leaking down. */
|
||||
trace_rcu_utilization(TPS("Start context switch"));
|
||||
rcu_sched_qs();
|
||||
rcu_preempt_note_context_switch();
|
||||
rcu_preempt_note_context_switch(preempt);
|
||||
/* Load rcu_urgent_qs before other flags. */
|
||||
if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
|
||||
goto out;
|
||||
|
|
|
@ -477,7 +477,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
|
|||
|
||||
/* Forward declarations for rcutree_plugin.h */
|
||||
static void rcu_bootup_announce(void);
|
||||
static void rcu_preempt_note_context_switch(void);
|
||||
static void rcu_preempt_note_context_switch(bool preempt);
|
||||
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
|
||||
|
|
|
@ -286,12 +286,13 @@ static void rcu_preempt_qs(void)
|
|||
*
|
||||
* Caller must disable interrupts.
|
||||
*/
|
||||
static void rcu_preempt_note_context_switch(void)
|
||||
static void rcu_preempt_note_context_switch(bool preempt)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
|
||||
if (t->rcu_read_lock_nesting > 0 &&
|
||||
!t->rcu_read_unlock_special.b.blocked) {
|
||||
|
||||
|
@ -738,7 +739,7 @@ static void __init rcu_bootup_announce(void)
|
|||
* Because preemptible RCU does not exist, we never have to check for
|
||||
* CPUs being in quiescent states.
|
||||
*/
|
||||
static void rcu_preempt_note_context_switch(void)
|
||||
static void rcu_preempt_note_context_switch(bool preempt)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue