sched: Provide raw_spin_rq_*lock*() helpers
In prepration for playing games with rq->lock, add some rq_lock wrappers. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Don Hiatt <dhiatt@digitalocean.com> Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/20210422123308.075967879@infradead.org
This commit is contained in:
parent
9099a14708
commit
39d371b7c0
|
@ -184,6 +184,21 @@ int sysctl_sched_rt_runtime = 950000;
|
|||
*
|
||||
*/
|
||||
|
||||
void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
|
||||
{
|
||||
raw_spin_lock_nested(rq_lockp(rq), subclass);
|
||||
}
|
||||
|
||||
bool raw_spin_rq_trylock(struct rq *rq)
|
||||
{
|
||||
return raw_spin_trylock(rq_lockp(rq));
|
||||
}
|
||||
|
||||
void raw_spin_rq_unlock(struct rq *rq)
|
||||
{
|
||||
raw_spin_unlock(rq_lockp(rq));
|
||||
}
|
||||
|
||||
/*
|
||||
* __task_rq_lock - lock the rq @p resides on.
|
||||
*/
|
||||
|
|
|
@ -1113,6 +1113,56 @@ static inline bool is_migration_disabled(struct task_struct *p)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline raw_spinlock_t *rq_lockp(struct rq *rq)
|
||||
{
|
||||
return &rq->lock;
|
||||
}
|
||||
|
||||
static inline void lockdep_assert_rq_held(struct rq *rq)
|
||||
{
|
||||
lockdep_assert_held(rq_lockp(rq));
|
||||
}
|
||||
|
||||
extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
|
||||
extern bool raw_spin_rq_trylock(struct rq *rq);
|
||||
extern void raw_spin_rq_unlock(struct rq *rq);
|
||||
|
||||
static inline void raw_spin_rq_lock(struct rq *rq)
|
||||
{
|
||||
raw_spin_rq_lock_nested(rq, 0);
|
||||
}
|
||||
|
||||
static inline void raw_spin_rq_lock_irq(struct rq *rq)
|
||||
{
|
||||
local_irq_disable();
|
||||
raw_spin_rq_lock(rq);
|
||||
}
|
||||
|
||||
static inline void raw_spin_rq_unlock_irq(struct rq *rq)
|
||||
{
|
||||
raw_spin_rq_unlock(rq);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
raw_spin_rq_lock(rq);
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
|
||||
{
|
||||
raw_spin_rq_unlock(rq);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#define raw_spin_rq_lock_irqsave(rq, flags) \
|
||||
do { \
|
||||
flags = _raw_spin_rq_lock_irqsave(rq); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
extern void __update_idle_core(struct rq *rq);
|
||||
|
||||
|
|
Loading…
Reference in New Issue