blk_mq: call preempt_disable/enable in blk_mq_run_hw_queue, and only if needed

preempt_disable/enable surrounds every call to blk_mq_run_hw_queue,
except the one in blk-flush.c.  In fact that one is always asynchronous,
and it does not need smp_processor_id().

We can do the same for all other calls, avoiding preempt_disable when
async is true.  This avoids peppering blk-mq.c with preemption-disabled
regions.

Cc: Jens Axboe <axboe@kernel.dk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Reported-by: Clark Williams <williams@redhat.com>
Tested-by: Clark Williams <williams@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Paolo Bonzini 2014-11-07 23:03:59 +01:00 committed by Jens Axboe
parent 9c6ac78eb3
commit 398205b839
1 changed files with 12 additions and 9 deletions

View File

@ -801,9 +801,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return; return;
if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) if (!async) {
__blk_mq_run_hw_queue(hctx); preempt_disable();
else if (hctx->queue->nr_hw_queues == 1) if (cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
preempt_enable();
return;
}
preempt_enable();
}
if (hctx->queue->nr_hw_queues == 1)
kblockd_schedule_delayed_work(&hctx->run_work, 0); kblockd_schedule_delayed_work(&hctx->run_work, 0);
else { else {
unsigned int cpu; unsigned int cpu;
@ -824,9 +833,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
test_bit(BLK_MQ_S_STOPPED, &hctx->state)) test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue; continue;
preempt_disable();
blk_mq_run_hw_queue(hctx, async); blk_mq_run_hw_queue(hctx, async);
preempt_enable();
} }
} }
EXPORT_SYMBOL(blk_mq_run_queues); EXPORT_SYMBOL(blk_mq_run_queues);
@ -853,9 +860,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{ {
clear_bit(BLK_MQ_S_STOPPED, &hctx->state); clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
preempt_disable();
blk_mq_run_hw_queue(hctx, false); blk_mq_run_hw_queue(hctx, false);
preempt_enable();
} }
EXPORT_SYMBOL(blk_mq_start_hw_queue); EXPORT_SYMBOL(blk_mq_start_hw_queue);
@ -880,9 +885,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
continue; continue;
clear_bit(BLK_MQ_S_STOPPED, &hctx->state); clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
preempt_disable();
blk_mq_run_hw_queue(hctx, async); blk_mq_run_hw_queue(hctx, async);
preempt_enable();
} }
} }
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);