sched/wait: Add swq_has_sleeper()
Which is the equivalent of what we have in regular waitqueues. I'm not crazy about the name, but this also helps us get both apis closer -- which iirc comes originally from the -net folks. We also duplicate the comments for the lockless swait_active(), from wait.h. Future users will make use of this interface. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
3a8b0677fc
commit
8cd641e3c7
|
@ -79,9 +79,63 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
|
||||||
DECLARE_SWAIT_QUEUE_HEAD(name)
|
DECLARE_SWAIT_QUEUE_HEAD(name)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int swait_active(struct swait_queue_head *q)
|
/**
|
||||||
|
* swait_active -- locklessly test for waiters on the queue
|
||||||
|
* @wq: the waitqueue to test for waiters
|
||||||
|
*
|
||||||
|
* returns true if the wait list is not empty
|
||||||
|
*
|
||||||
|
* NOTE: this function is lockless and requires care, incorrect usage _will_
|
||||||
|
* lead to sporadic and non-obvious failure.
|
||||||
|
*
|
||||||
|
* NOTE2: this function has the same above implications as regular waitqueues.
|
||||||
|
*
|
||||||
|
* Use either while holding swait_queue_head::lock or when used for wakeups
|
||||||
|
* with an extra smp_mb() like:
|
||||||
|
*
|
||||||
|
* CPU0 - waker CPU1 - waiter
|
||||||
|
*
|
||||||
|
* for (;;) {
|
||||||
|
* @cond = true; prepare_to_swait(&wq_head, &wait, state);
|
||||||
|
* smp_mb(); // smp_mb() from set_current_state()
|
||||||
|
* if (swait_active(wq_head)) if (@cond)
|
||||||
|
* wake_up(wq_head); break;
|
||||||
|
* schedule();
|
||||||
|
* }
|
||||||
|
* finish_swait(&wq_head, &wait);
|
||||||
|
*
|
||||||
|
* Because without the explicit smp_mb() it's possible for the
|
||||||
|
* swait_active() load to get hoisted over the @cond store such that we'll
|
||||||
|
* observe an empty wait list while the waiter might not observe @cond.
|
||||||
|
* This, in turn, can trigger missing wakeups.
|
||||||
|
*
|
||||||
|
* Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
|
||||||
|
* which (when the lock is uncontended) are of roughly equal cost.
|
||||||
|
*/
|
||||||
|
static inline int swait_active(struct swait_queue_head *wq)
|
||||||
{
|
{
|
||||||
return !list_empty(&q->task_list);
|
return !list_empty(&wq->task_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* swq_has_sleeper - check if there are any waiting processes
|
||||||
|
* @wq: the waitqueue to test for waiters
|
||||||
|
*
|
||||||
|
* Returns true if @wq has waiting processes
|
||||||
|
*
|
||||||
|
* Please refer to the comment for swait_active.
|
||||||
|
*/
|
||||||
|
static inline bool swq_has_sleeper(struct swait_queue_head *wq)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We need to be sure we are in sync with the list_add()
|
||||||
|
* modifications to the wait queue (task_list).
|
||||||
|
*
|
||||||
|
* This memory barrier should be paired with one on the
|
||||||
|
* waiting side.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
return swait_active(wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void swake_up(struct swait_queue_head *q);
|
extern void swake_up(struct swait_queue_head *q);
|
||||||
|
|
Loading…
Reference in New Issue