srcu: Check for consistent per-CPU per-srcu_struct NMI safety
This commit adds runtime checks to verify that a given srcu_struct uses consistent NMI-safe (or not) read-side primitives on a per-CPU basis. Link: https://lore.kernel.org/all/20220910221947.171557773@linutronix.de/ Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Ogness <john.ogness@linutronix.de> Cc: Petr Mladek <pmladek@suse.com>
This commit is contained in:
parent
2e83b879fb
commit
27120e7d2c
|
@ -65,14 +65,14 @@ unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
|
|||
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
|
||||
|
||||
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
|
||||
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
|
||||
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
|
||||
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) __acquires(ssp);
|
||||
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe) __releases(ssp);
|
||||
#else
|
||||
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
|
||||
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
|
||||
{
|
||||
return __srcu_read_lock(ssp);
|
||||
}
|
||||
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
|
||||
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
|
||||
{
|
||||
__srcu_read_unlock(ssp, idx);
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
|
|||
int retval;
|
||||
|
||||
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
|
||||
retval = __srcu_read_lock_nmisafe(ssp);
|
||||
retval = __srcu_read_lock_nmisafe(ssp, true);
|
||||
else
|
||||
retval = __srcu_read_lock(ssp);
|
||||
rcu_lock_acquire(&(ssp)->dep_map);
|
||||
|
@ -237,7 +237,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
|
|||
WARN_ON_ONCE(idx & ~0x1);
|
||||
rcu_lock_release(&(ssp)->dep_map);
|
||||
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
|
||||
__srcu_read_unlock_nmisafe(ssp, idx);
|
||||
__srcu_read_unlock_nmisafe(ssp, idx, true);
|
||||
else
|
||||
__srcu_read_unlock(ssp, idx);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ struct srcu_data {
|
|||
/* Read-side state. */
|
||||
atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
|
||||
atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
|
||||
int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
|
||||
|
||||
/* Update-side state. */
|
||||
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
|
||||
|
@ -42,6 +43,10 @@ struct srcu_data {
|
|||
struct srcu_struct *ssp;
|
||||
};
|
||||
|
||||
#define SRCU_NMI_UNKNOWN 0x0
|
||||
#define SRCU_NMI_NMI_UNSAFE 0x1
|
||||
#define SRCU_NMI_NMI_SAFE 0x2
|
||||
|
||||
/*
|
||||
* Node in SRCU combining tree, similar in function to rcu_data.
|
||||
*/
|
||||
|
|
|
@ -626,6 +626,26 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
|
||||
|
||||
/*
|
||||
* Check for consistent NMI safety.
|
||||
*/
|
||||
static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
|
||||
{
|
||||
int nmi_safe_mask = 1 << nmi_safe;
|
||||
int old_nmi_safe_mask;
|
||||
struct srcu_data *sdp;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PROVE_RCU))
|
||||
return;
|
||||
sdp = raw_cpu_ptr(ssp->sda);
|
||||
old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
|
||||
if (!old_nmi_safe_mask) {
|
||||
WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
|
||||
return;
|
||||
}
|
||||
WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Counts the new reader in the appropriate per-CPU element of the
|
||||
* srcu_struct.
|
||||
|
@ -638,6 +658,7 @@ int __srcu_read_lock(struct srcu_struct *ssp)
|
|||
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
|
||||
this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
|
||||
smp_mb(); /* B */ /* Avoid leaking the critical section. */
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
return idx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__srcu_read_lock);
|
||||
|
@ -651,6 +672,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
|
|||
{
|
||||
smp_mb(); /* C */ /* Avoid leaking the critical section. */
|
||||
this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
||||
|
||||
|
@ -661,7 +683,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
|||
* srcu_struct, but in an NMI-safe manner using RMW atomics.
|
||||
* Returns an index that must be passed to the matching srcu_read_unlock().
|
||||
*/
|
||||
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
|
||||
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
|
||||
{
|
||||
int idx;
|
||||
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
|
||||
|
@ -669,6 +691,8 @@ int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
|
|||
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
|
||||
atomic_long_inc(&sdp->srcu_lock_count[idx]);
|
||||
smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
|
||||
if (chknmisafe)
|
||||
srcu_check_nmi_safety(ssp, true);
|
||||
return idx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
|
||||
|
@ -678,12 +702,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
|
|||
* element of the srcu_struct. Note that this may well be a different
|
||||
* CPU than that which was incremented by the corresponding srcu_read_lock().
|
||||
*/
|
||||
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
|
||||
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
|
||||
{
|
||||
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
|
||||
|
||||
smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
|
||||
atomic_long_inc(&sdp->srcu_unlock_count[idx]);
|
||||
if (chknmisafe)
|
||||
srcu_check_nmi_safety(ssp, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
|
||||
|
||||
|
@ -1125,7 +1151,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
|
|||
int ss_state;
|
||||
|
||||
check_init_srcu_struct(ssp);
|
||||
idx = __srcu_read_lock_nmisafe(ssp);
|
||||
idx = __srcu_read_lock_nmisafe(ssp, false);
|
||||
ss_state = smp_load_acquire(&ssp->srcu_size_state);
|
||||
if (ss_state < SRCU_SIZE_WAIT_CALL)
|
||||
sdp = per_cpu_ptr(ssp->sda, 0);
|
||||
|
@ -1158,7 +1184,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
|
|||
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
|
||||
else if (needexp)
|
||||
srcu_funnel_exp_start(ssp, sdp_mynode, s);
|
||||
__srcu_read_unlock_nmisafe(ssp, idx);
|
||||
__srcu_read_unlock_nmisafe(ssp, idx, false);
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -1462,13 +1488,13 @@ void srcu_barrier(struct srcu_struct *ssp)
|
|||
/* Initial count prevents reaching zero until all CBs are posted. */
|
||||
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
|
||||
|
||||
idx = __srcu_read_lock_nmisafe(ssp);
|
||||
idx = __srcu_read_lock_nmisafe(ssp, false);
|
||||
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
|
||||
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
|
||||
else
|
||||
for_each_possible_cpu(cpu)
|
||||
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
|
||||
__srcu_read_unlock_nmisafe(ssp, idx);
|
||||
__srcu_read_unlock_nmisafe(ssp, idx, false);
|
||||
|
||||
/* Remove the initial count, at which point reaching zero can happen. */
|
||||
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
|
||||
|
|
Loading…
Reference in New Issue