block: extend queue_flag bitops
Add test_and_clear and test_and_set. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
e180f59493
commit
e48ec69005
|
@ -205,8 +205,7 @@ void blk_plug_device(struct request_queue *q)
|
||||||
if (blk_queue_stopped(q))
|
if (blk_queue_stopped(q))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
|
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
|
||||||
__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
|
|
||||||
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
|
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
|
||||||
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
|
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
|
||||||
}
|
}
|
||||||
|
@ -221,10 +220,9 @@ int blk_remove_plug(struct request_queue *q)
|
||||||
{
|
{
|
||||||
WARN_ON(!irqs_disabled());
|
WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
|
if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
|
|
||||||
del_timer(&q->unplug_timer);
|
del_timer(&q->unplug_timer);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -328,8 +326,7 @@ void blk_start_queue(struct request_queue *q)
|
||||||
* one level of recursion is ok and is much faster than kicking
|
* one level of recursion is ok and is much faster than kicking
|
||||||
* the unplug handling
|
* the unplug handling
|
||||||
*/
|
*/
|
||||||
if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
|
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||||
queue_flag_set(QUEUE_FLAG_REENTER, q);
|
|
||||||
q->request_fn(q);
|
q->request_fn(q);
|
||||||
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
||||||
} else {
|
} else {
|
||||||
|
@ -394,8 +391,7 @@ void __blk_run_queue(struct request_queue *q)
|
||||||
* handling reinvoke the handler shortly if we already got there.
|
* handling reinvoke the handler shortly if we already got there.
|
||||||
*/
|
*/
|
||||||
if (!elv_queue_empty(q)) {
|
if (!elv_queue_empty(q)) {
|
||||||
if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
|
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||||
queue_flag_set(QUEUE_FLAG_REENTER, q);
|
|
||||||
q->request_fn(q);
|
q->request_fn(q);
|
||||||
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -428,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
|
||||||
__set_bit(flag, &q->queue_flags);
|
__set_bit(flag, &q->queue_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int queue_flag_test_and_clear(unsigned int flag,
|
||||||
|
struct request_queue *q)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(!queue_is_locked(q));
|
||||||
|
|
||||||
|
if (test_bit(flag, &q->queue_flags)) {
|
||||||
|
__clear_bit(flag, &q->queue_flags);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int queue_flag_test_and_set(unsigned int flag,
|
||||||
|
struct request_queue *q)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(!queue_is_locked(q));
|
||||||
|
|
||||||
|
if (!test_bit(flag, &q->queue_flags)) {
|
||||||
|
__set_bit(flag, &q->queue_flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
|
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(!queue_is_locked(q));
|
WARN_ON_ONCE(!queue_is_locked(q));
|
||||||
|
|
Loading…
Reference in New Issue