block: merge blk_invoke_request_fn() into __blk_run_queue()
__blk_run_queue wraps blk_invoke_request_fn() such that it additionally removes plug and bails out early if the queue is empty. Both extra operations have their own pending mechanisms and don't cause any harm correctness-wise when they are done superflously. The only user of blk_invoke_request_fn() being blk_start_queue(), there isn't much reason to keep both functions around. Merge blk_invoke_request_fn() into __blk_run_queue() and make blk_start_queue() use __blk_run_queue() instead. [ Impact: merge two subtly different internal functions ] Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
db2dbb12dc
commit
a538cd03be
|
@ -333,24 +333,6 @@ void blk_unplug(struct request_queue *q)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_unplug);
|
EXPORT_SYMBOL(blk_unplug);
|
||||||
|
|
||||||
static void blk_invoke_request_fn(struct request_queue *q)
|
|
||||||
{
|
|
||||||
if (unlikely(blk_queue_stopped(q)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* one level of recursion is ok and is much faster than kicking
|
|
||||||
* the unplug handling
|
|
||||||
*/
|
|
||||||
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
|
||||||
q->request_fn(q);
|
|
||||||
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
|
||||||
} else {
|
|
||||||
queue_flag_set(QUEUE_FLAG_PLUGGED, q);
|
|
||||||
kblockd_schedule_work(q, &q->unplug_work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_start_queue - restart a previously stopped queue
|
* blk_start_queue - restart a previously stopped queue
|
||||||
* @q: The &struct request_queue in question
|
* @q: The &struct request_queue in question
|
||||||
|
@ -365,7 +347,7 @@ void blk_start_queue(struct request_queue *q)
|
||||||
WARN_ON(!irqs_disabled());
|
WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||||
blk_invoke_request_fn(q);
|
__blk_run_queue(q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_start_queue);
|
EXPORT_SYMBOL(blk_start_queue);
|
||||||
|
|
||||||
|
@ -425,12 +407,23 @@ void __blk_run_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
blk_remove_plug(q);
|
blk_remove_plug(q);
|
||||||
|
|
||||||
|
if (unlikely(blk_queue_stopped(q)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (elv_queue_empty(q))
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only recurse once to avoid overrunning the stack, let the unplug
|
* Only recurse once to avoid overrunning the stack, let the unplug
|
||||||
* handling reinvoke the handler shortly if we already got there.
|
* handling reinvoke the handler shortly if we already got there.
|
||||||
*/
|
*/
|
||||||
if (!elv_queue_empty(q))
|
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||||
blk_invoke_request_fn(q);
|
q->request_fn(q);
|
||||||
|
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
||||||
|
} else {
|
||||||
|
queue_flag_set(QUEUE_FLAG_PLUGGED, q);
|
||||||
|
kblockd_schedule_work(q, &q->unplug_work);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__blk_run_queue);
|
EXPORT_SYMBOL(__blk_run_queue);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue