blk-mq: handle the single queue case in blk_mq_hctx_next_cpu
Don't duplicate the code to handle the not cpu bounce case in the caller, do it inside blk_mq_hctx_next_cpu instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
5fabcb4c33
commit
b657d7e632
|
@ -788,10 +788,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||||
*/
|
*/
|
||||||
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
int cpu = hctx->next_cpu;
|
if (hctx->queue->nr_hw_queues == 1)
|
||||||
|
return WORK_CPU_UNBOUND;
|
||||||
|
|
||||||
if (--hctx->next_cpu_batch <= 0) {
|
if (--hctx->next_cpu_batch <= 0) {
|
||||||
int next_cpu;
|
int cpu = hctx->next_cpu, next_cpu;
|
||||||
|
|
||||||
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
|
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
|
||||||
if (next_cpu >= nr_cpu_ids)
|
if (next_cpu >= nr_cpu_ids)
|
||||||
|
@ -799,9 +800,11 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||||
|
|
||||||
hctx->next_cpu = next_cpu;
|
hctx->next_cpu = next_cpu;
|
||||||
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
||||||
|
|
||||||
|
return cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
return cpu;
|
return hctx->next_cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||||
|
@ -820,14 +823,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hctx->queue->nr_hw_queues == 1)
|
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
|
||||||
kblockd_schedule_delayed_work(&hctx->run_work, 0);
|
&hctx->run_work, 0);
|
||||||
else {
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
cpu = blk_mq_hctx_next_cpu(hctx);
|
|
||||||
kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_run_queues(struct request_queue *q, bool async)
|
void blk_mq_run_queues(struct request_queue *q, bool async)
|
||||||
|
@ -919,16 +916,8 @@ static void blk_mq_delay_work_fn(struct work_struct *work)
|
||||||
|
|
||||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
||||||
{
|
{
|
||||||
unsigned long tmo = msecs_to_jiffies(msecs);
|
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
|
||||||
|
&hctx->delay_work, msecs_to_jiffies(msecs));
|
||||||
if (hctx->queue->nr_hw_queues == 1)
|
|
||||||
kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
|
|
||||||
else {
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
cpu = blk_mq_hctx_next_cpu(hctx);
|
|
||||||
kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_delay_queue);
|
EXPORT_SYMBOL(blk_mq_delay_queue);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue