sched: Add needbreak for rwlocks
Contention awareness while holding a spin lock is essential for reducing latency when long running kernel operations can hold that lock. Add the same contention detection interface for read/write spin locks. CC: Ingo Molnar <mingo@redhat.com> CC: Will Deacon <will@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Waiman Long <longman@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20210202185734.1680553-8-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
26128cb6c7
commit
a09a689a53
|
@ -1912,6 +1912,23 @@ static inline int spin_needbreak(spinlock_t *lock)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a rwlock is contended.
|
||||
* Returns non-zero if there is another task waiting on the rwlock.
|
||||
* Returns zero if the lock is not contended or the system / underlying
|
||||
* rwlock implementation does not support contention detection.
|
||||
* Technically does not depend on CONFIG_PREEMPTION, but a general need
|
||||
* for low latency.
|
||||
*/
|
||||
static inline int rwlock_needbreak(rwlock_t *lock)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
return rwlock_is_contended(lock);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline bool need_resched(void)
|
||||
{
|
||||
return unlikely(tif_need_resched());
|
||||
|
|
Loading…
Reference in New Issue