locktorture: Support rtmutex torturing
Real time mutexes is one of the few general primitives that we do not have in locktorture. Address this -- a few considerations: o To spice things up, enable competing thread(s) to become rt, such that we can stress different prio boosting paths in the rtmutex code. Introduce a ->task_boost callback, only used by rtmutex-torturer. Tasks will boost/deboost around every 50k (arbitrarily) lock/unlock operations. o Hold times are similar to what we have for other locks: only occasionally having longer hold times (per ~200k ops). So we roughly do two full rt boost+deboosting ops with short hold times. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
19a5ecde08
commit
095777c417
|
@ -52,6 +52,9 @@ torture_type Type of lock to torture. By default, only spinlocks will
|
|||
|
||||
o "mutex_lock": mutex_lock() and mutex_unlock() pairs.
|
||||
|
||||
o "rtmutex_lock": rtmutex_lock() and rtmutex_unlock()
|
||||
pairs. Kernel must have CONFIG_RT_MUTEX=y.
|
||||
|
||||
o "rwsem_lock": read/write down() and up() semaphore pairs.
|
||||
|
||||
torture_runnable Start locktorture at boot time in the case where the
|
||||
|
|
|
@ -17,12 +17,14 @@
|
|||
*
|
||||
* Copyright (C) IBM Corporation, 2014
|
||||
*
|
||||
* Author: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Davidlohr Bueso <dave@stgolabs.net>
|
||||
* Based on kernel/rcu/torture.c.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rwlock.h>
|
||||
#include <linux/mutex.h>
|
||||
|
@ -91,11 +93,13 @@ struct lock_torture_ops {
|
|||
void (*init)(void);
|
||||
int (*writelock)(void);
|
||||
void (*write_delay)(struct torture_random_state *trsp);
|
||||
void (*task_boost)(struct torture_random_state *trsp);
|
||||
void (*writeunlock)(void);
|
||||
int (*readlock)(void);
|
||||
void (*read_delay)(struct torture_random_state *trsp);
|
||||
void (*readunlock)(void);
|
||||
unsigned long flags;
|
||||
|
||||
unsigned long flags; /* for irq spinlocks */
|
||||
const char *name;
|
||||
};
|
||||
|
||||
|
@ -139,9 +143,15 @@ static void torture_lock_busted_write_unlock(void)
|
|||
/* BUGGY, do not use in real life!!! */
|
||||
}
|
||||
|
||||
static void torture_boost_dummy(struct torture_random_state *trsp)
|
||||
{
|
||||
/* Only rtmutexes care about priority */
|
||||
}
|
||||
|
||||
static struct lock_torture_ops lock_busted_ops = {
|
||||
.writelock = torture_lock_busted_write_lock,
|
||||
.write_delay = torture_lock_busted_write_delay,
|
||||
.task_boost = torture_boost_dummy,
|
||||
.writeunlock = torture_lock_busted_write_unlock,
|
||||
.readlock = NULL,
|
||||
.read_delay = NULL,
|
||||
|
@ -185,6 +195,7 @@ static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
|
|||
static struct lock_torture_ops spin_lock_ops = {
|
||||
.writelock = torture_spin_lock_write_lock,
|
||||
.write_delay = torture_spin_lock_write_delay,
|
||||
.task_boost = torture_boost_dummy,
|
||||
.writeunlock = torture_spin_lock_write_unlock,
|
||||
.readlock = NULL,
|
||||
.read_delay = NULL,
|
||||
|
@ -211,6 +222,7 @@ __releases(torture_spinlock)
|
|||
static struct lock_torture_ops spin_lock_irq_ops = {
|
||||
.writelock = torture_spin_lock_write_lock_irq,
|
||||
.write_delay = torture_spin_lock_write_delay,
|
||||
.task_boost = torture_boost_dummy,
|
||||
.writeunlock = torture_lock_spin_write_unlock_irq,
|
||||
.readlock = NULL,
|
||||
.read_delay = NULL,
|
||||
|
@ -275,6 +287,7 @@ static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
|
|||
static struct lock_torture_ops rw_lock_ops = {
|
||||
.writelock = torture_rwlock_write_lock,
|
||||
.write_delay = torture_rwlock_write_delay,
|
||||
.task_boost = torture_boost_dummy,
|
||||
.writeunlock = torture_rwlock_write_unlock,
|
||||
.readlock = torture_rwlock_read_lock,
|
||||
.read_delay = torture_rwlock_read_delay,
|
||||
|
@ -315,6 +328,7 @@ __releases(torture_rwlock)
|
|||
static struct lock_torture_ops rw_lock_irq_ops = {
|
||||
.writelock = torture_rwlock_write_lock_irq,
|
||||
.write_delay = torture_rwlock_write_delay,
|
||||
.task_boost = torture_boost_dummy,
|
||||
.writeunlock = torture_rwlock_write_unlock_irq,
|
||||
.readlock = torture_rwlock_read_lock_irq,
|
||||
.read_delay = torture_rwlock_read_delay,
|
||||
|
@ -354,6 +368,7 @@ static void torture_mutex_unlock(void) __releases(torture_mutex)
|
|||
static struct lock_torture_ops mutex_lock_ops = {
|
||||
.writelock = torture_mutex_lock,
|
||||
.write_delay = torture_mutex_delay,
|
||||
.task_boost = torture_boost_dummy,
|
||||
.writeunlock = torture_mutex_unlock,
|
||||
.readlock = NULL,
|
||||
.read_delay = NULL,
|
||||
|
@ -361,6 +376,90 @@ static struct lock_torture_ops mutex_lock_ops = {
|
|||
.name = "mutex_lock"
|
||||
};
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
static DEFINE_RT_MUTEX(torture_rtmutex);
|
||||
|
||||
static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
|
||||
{
|
||||
rt_mutex_lock(&torture_rtmutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void torture_rtmutex_boost(struct torture_random_state *trsp)
|
||||
{
|
||||
int policy;
|
||||
struct sched_param param;
|
||||
const unsigned int factor = 50000; /* yes, quite arbitrary */
|
||||
|
||||
if (!rt_task(current)) {
|
||||
/*
|
||||
* (1) Boost priority once every ~50k operations. When the
|
||||
* task tries to take the lock, the rtmutex it will account
|
||||
* for the new priority, and do any corresponding pi-dance.
|
||||
*/
|
||||
if (!(torture_random(trsp) %
|
||||
(cxt.nrealwriters_stress * factor))) {
|
||||
policy = SCHED_FIFO;
|
||||
param.sched_priority = MAX_RT_PRIO - 1;
|
||||
} else /* common case, do nothing */
|
||||
return;
|
||||
} else {
|
||||
/*
|
||||
* The task will remain boosted for another ~500k operations,
|
||||
* then restored back to its original prio, and so forth.
|
||||
*
|
||||
* When @trsp is nil, we want to force-reset the task for
|
||||
* stopping the kthread.
|
||||
*/
|
||||
if (!trsp || !(torture_random(trsp) %
|
||||
(cxt.nrealwriters_stress * factor * 2))) {
|
||||
policy = SCHED_NORMAL;
|
||||
param.sched_priority = 0;
|
||||
} else /* common case, do nothing */
|
||||
return;
|
||||
}
|
||||
|
||||
sched_setscheduler_nocheck(current, policy, ¶m);
|
||||
}
|
||||
|
||||
static void torture_rtmutex_delay(struct torture_random_state *trsp)
|
||||
{
|
||||
const unsigned long shortdelay_us = 2;
|
||||
const unsigned long longdelay_ms = 100;
|
||||
|
||||
/*
|
||||
* We want a short delay mostly to emulate likely code, and
|
||||
* we want a long delay occasionally to force massive contention.
|
||||
*/
|
||||
if (!(torture_random(trsp) %
|
||||
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
|
||||
mdelay(longdelay_ms);
|
||||
if (!(torture_random(trsp) %
|
||||
(cxt.nrealwriters_stress * 2 * shortdelay_us)))
|
||||
udelay(shortdelay_us);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
|
||||
preempt_schedule(); /* Allow test to be preempted. */
|
||||
#endif
|
||||
}
|
||||
|
||||
static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
|
||||
{
|
||||
rt_mutex_unlock(&torture_rtmutex);
|
||||
}
|
||||
|
||||
static struct lock_torture_ops rtmutex_lock_ops = {
|
||||
.writelock = torture_rtmutex_lock,
|
||||
.write_delay = torture_rtmutex_delay,
|
||||
.task_boost = torture_rtmutex_boost,
|
||||
.writeunlock = torture_rtmutex_unlock,
|
||||
.readlock = NULL,
|
||||
.read_delay = NULL,
|
||||
.readunlock = NULL,
|
||||
.name = "rtmutex_lock"
|
||||
};
|
||||
#endif
|
||||
|
||||
static DECLARE_RWSEM(torture_rwsem);
|
||||
static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
|
||||
{
|
||||
|
@ -419,6 +518,7 @@ static void torture_rwsem_up_read(void) __releases(torture_rwsem)
|
|||
static struct lock_torture_ops rwsem_lock_ops = {
|
||||
.writelock = torture_rwsem_down_write,
|
||||
.write_delay = torture_rwsem_write_delay,
|
||||
.task_boost = torture_boost_dummy,
|
||||
.writeunlock = torture_rwsem_up_write,
|
||||
.readlock = torture_rwsem_down_read,
|
||||
.read_delay = torture_rwsem_read_delay,
|
||||
|
@ -442,6 +542,7 @@ static int lock_torture_writer(void *arg)
|
|||
if ((torture_random(&rand) & 0xfffff) == 0)
|
||||
schedule_timeout_uninterruptible(1);
|
||||
|
||||
cxt.cur_ops->task_boost(&rand);
|
||||
cxt.cur_ops->writelock();
|
||||
if (WARN_ON_ONCE(lock_is_write_held))
|
||||
lwsp->n_lock_fail++;
|
||||
|
@ -456,6 +557,8 @@ static int lock_torture_writer(void *arg)
|
|||
|
||||
stutter_wait("lock_torture_writer");
|
||||
} while (!torture_must_stop());
|
||||
|
||||
cxt.cur_ops->task_boost(NULL); /* reset prio */
|
||||
torture_kthread_stopping("lock_torture_writer");
|
||||
return 0;
|
||||
}
|
||||
|
@ -642,6 +745,9 @@ static int __init lock_torture_init(void)
|
|||
&spin_lock_ops, &spin_lock_irq_ops,
|
||||
&rw_lock_ops, &rw_lock_irq_ops,
|
||||
&mutex_lock_ops,
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
&rtmutex_lock_ops,
|
||||
#endif
|
||||
&rwsem_lock_ops,
|
||||
};
|
||||
|
||||
|
@ -676,6 +782,10 @@ static int __init lock_torture_init(void)
|
|||
if (strncmp(torture_type, "mutex", 5) == 0)
|
||||
cxt.debug_lock = true;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
||||
if (strncmp(torture_type, "rtmutex", 7) == 0)
|
||||
cxt.debug_lock = true;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
if ((strncmp(torture_type, "spin", 4) == 0) ||
|
||||
(strncmp(torture_type, "rw_lock", 7) == 0))
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
LOCK01
|
||||
LOCK02
|
||||
LOCK03
|
||||
LOCK04
|
||||
LOCK04
|
||||
LOCK05
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=4
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_PREEMPT_NONE=n
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=y
|
|
@ -0,0 +1 @@
|
|||
locktorture.torture_type=rtmutex_lock
|
Loading…
Reference in New Issue