2019-05-01 02:42:43 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-01-29 21:53:40 +08:00
|
|
|
/*
|
|
|
|
* Functions related to setting various queue properties from drivers
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/blkdev.h>
|
blk-mq: new multi-queue block IO queueing mechanism
Linux currently has two models for block devices:
- The classic request_fn based approach, where drivers use struct
request units for IO. The block layer provides various helper
functionalities to let drivers share code, things like tag
management, timeout handling, queueing, etc.
- The "stacked" approach, where a driver squeezes in between the
block layer and IO submitter. Since this bypasses the IO stack,
driver generally have to manage everything themselves.
With drivers being written for new high IOPS devices, the classic
request_fn based driver doesn't work well enough. The design dates
back to when both SMP and high IOPS was rare. It has problems with
scaling to bigger machines, and runs into scaling issues even on
smaller machines when you have IOPS in the hundreds of thousands
per device.
The stacked approach is then most often selected as the model
for the driver. But this means that everybody has to re-invent
everything, and along with that we get all the problems again
that the shared approach solved.
This commit introduces blk-mq, block multi queue support. The
design is centered around per-cpu queues for queueing IO, which
then funnel down into x number of hardware submission queues.
We might have a 1:1 mapping between the two, or it might be
an N:M mapping. That all depends on what the hardware supports.
blk-mq provides various helper functions, which include:
- Scalable support for request tagging. Most devices need to
be able to uniquely identify a request both in the driver and
to the hardware. The tagging uses per-cpu caches for freed
tags, to enable cache hot reuse.
- Timeout handling without tracking request on a per-device
basis. Basically the driver should be able to get a notification,
if a request happens to fail.
- Optional support for non 1:1 mappings between issue and
submission queues. blk-mq can redirect IO completions to the
desired location.
- Support for per-request payloads. Drivers almost always need
to associate a request structure with some driver private
command structure. Drivers can tell blk-mq this at init time,
and then any request handed to the driver will have the
required size of memory associated with it.
- Support for merging of IO, and plugging. The stacked model
gets neither of these. Even for high IOPS devices, merging
sequential IO reduces per-command overhead and thus
increases bandwidth.
For now, this is provided as a potential 3rd queueing model, with
the hope being that, as it matures, it can replace both the classic
and stacked model. That would get us back to having just 1 real
model for block devices, leaving the stacked approach to dm/md
devices (as it was originally intended).
Contributions in this patch from the following people:
Shaohua Li <shli@fusionio.com>
Alexander Gordeev <agordeev@redhat.com>
Christoph Hellwig <hch@infradead.org>
Mike Christie <michaelc@cs.wisc.edu>
Matias Bjorling <m@bjorling.me>
Jeff Moyer <jmoyer@redhat.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2013-10-24 16:20:05 +08:00
|
|
|
#include <linux/blk-mq.h>
|
2013-02-07 23:46:59 +08:00
|
|
|
#include <linux/sched/sysctl.h>
|
2008-01-29 21:53:40 +08:00
|
|
|
|
|
|
|
#include "blk.h"
|
2017-01-17 21:03:22 +08:00
|
|
|
#include "blk-mq-sched.h"
|
2008-01-29 21:53:40 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_end_sync_rq - executes a completion event on a request
|
|
|
|
* @rq: request to complete
|
2008-08-20 02:13:11 +08:00
|
|
|
* @error: end I/O status of the request
|
2008-01-29 21:53:40 +08:00
|
|
|
*/
|
2017-06-03 15:38:04 +08:00
|
|
|
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
|
2008-01-29 21:53:40 +08:00
|
|
|
{
|
|
|
|
struct completion *waiting = rq->end_io_data;
|
|
|
|
|
2021-06-11 05:44:36 +08:00
|
|
|
rq->end_io_data = (void *)(uintptr_t)error;
|
2008-01-29 21:53:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* complete last, if this is a stack request the process (and thus
|
|
|
|
* the rq pointer) could be invalid right after this complete()
|
|
|
|
*/
|
|
|
|
complete(waiting);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-01-25 12:49:57 +08:00
|
|
|
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
|
2008-01-29 21:53:40 +08:00
|
|
|
* @bd_disk: matching gendisk
|
|
|
|
* @rq: request to insert
|
|
|
|
* @at_head: insert request at head or tail of queue
|
|
|
|
* @done: I/O completion handler
|
|
|
|
*
|
|
|
|
* Description:
|
2008-08-20 02:13:11 +08:00
|
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
2008-01-29 21:53:40 +08:00
|
|
|
* for execution. Don't wait for completion.
|
2012-06-29 23:31:49 +08:00
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* This function will invoke @done directly if the queue is dead.
|
2008-01-29 21:53:40 +08:00
|
|
|
*/
|
2021-01-25 12:49:57 +08:00
|
|
|
void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
|
|
|
|
int at_head, rq_end_io_fn *done)
|
2008-01-29 21:53:40 +08:00
|
|
|
{
|
2011-12-14 07:33:37 +08:00
|
|
|
WARN_ON(irqs_disabled());
|
2017-01-31 23:57:29 +08:00
|
|
|
WARN_ON(!blk_rq_is_passthrough(rq));
|
2012-06-29 23:31:49 +08:00
|
|
|
|
|
|
|
rq->rq_disk = bd_disk;
|
|
|
|
rq->end_io = done;
|
blk-mq: new multi-queue block IO queueing mechanism
Linux currently has two models for block devices:
- The classic request_fn based approach, where drivers use struct
request units for IO. The block layer provides various helper
functionalities to let drivers share code, things like tag
management, timeout handling, queueing, etc.
- The "stacked" approach, where a driver squeezes in between the
block layer and IO submitter. Since this bypasses the IO stack,
driver generally have to manage everything themselves.
With drivers being written for new high IOPS devices, the classic
request_fn based driver doesn't work well enough. The design dates
back to when both SMP and high IOPS was rare. It has problems with
scaling to bigger machines, and runs into scaling issues even on
smaller machines when you have IOPS in the hundreds of thousands
per device.
The stacked approach is then most often selected as the model
for the driver. But this means that everybody has to re-invent
everything, and along with that we get all the problems again
that the shared approach solved.
This commit introduces blk-mq, block multi queue support. The
design is centered around per-cpu queues for queueing IO, which
then funnel down into x number of hardware submission queues.
We might have a 1:1 mapping between the two, or it might be
an N:M mapping. That all depends on what the hardware supports.
blk-mq provides various helper functions, which include:
- Scalable support for request tagging. Most devices need to
be able to uniquely identify a request both in the driver and
to the hardware. The tagging uses per-cpu caches for freed
tags, to enable cache hot reuse.
- Timeout handling without tracking request on a per-device
basis. Basically the driver should be able to get a notification,
if a request happens to fail.
- Optional support for non 1:1 mappings between issue and
submission queues. blk-mq can redirect IO completions to the
desired location.
- Support for per-request payloads. Drivers almost always need
to associate a request structure with some driver private
command structure. Drivers can tell blk-mq this at init time,
and then any request handed to the driver will have the
required size of memory associated with it.
- Support for merging of IO, and plugging. The stacked model
gets neither of these. Even for high IOPS devices, merging
sequential IO reduces per-command overhead and thus
increases bandwidth.
For now, this is provided as a potential 3rd queueing model, with
the hope being that, as it matures, it can replace both the classic
and stacked model. That would get us back to having just 1 real
model for block devices, leaving the stacked approach to dm/md
devices (as it was originally intended).
Contributions in this patch from the following people:
Shaohua Li <shli@fusionio.com>
Alexander Gordeev <agordeev@redhat.com>
Christoph Hellwig <hch@infradead.org>
Mike Christie <michaelc@cs.wisc.edu>
Matias Bjorling <m@bjorling.me>
Jeff Moyer <jmoyer@redhat.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2013-10-24 16:20:05 +08:00
|
|
|
|
2020-05-27 13:24:16 +08:00
|
|
|
blk_account_io_start(rq);
|
2019-10-11 07:36:26 +08:00
|
|
|
|
2013-12-26 21:31:35 +08:00
|
|
|
/*
|
|
|
|
* don't check dying flag for MQ because the request won't
|
2016-07-19 23:18:06 +08:00
|
|
|
* be reused after dying flag is set
|
2013-12-26 21:31:35 +08:00
|
|
|
*/
|
block: remove dead elevator code
This removes a bunch of core and elevator related code. On the core
front, we remove anything related to queue running, draining,
initialization, plugging, and congestions. We also kill anything
related to request allocation, merging, retrieval, and completion.
Remove any checking for single queue IO schedulers, as they no
longer exist. This means we can also delete a bunch of code related
to request issue, adding, completion, etc - and all the SQ related
ops and helpers.
Also kill the load_default_modules(), as all that did was provide
for a way to load the default single queue elevator.
Tested-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2018-10-30 00:23:51 +08:00
|
|
|
blk_mq_sched_insert_request(rq, at_head, true, false);
|
2008-01-29 21:53:40 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
|
|
|
|
2021-06-11 05:44:34 +08:00
|
|
|
static bool blk_rq_is_poll(struct request *rq)
|
|
|
|
{
|
2021-10-12 19:12:24 +08:00
|
|
|
if (!rq->mq_hctx)
|
|
|
|
return false;
|
|
|
|
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
|
|
|
|
return false;
|
|
|
|
if (WARN_ON_ONCE(!rq->bio))
|
|
|
|
return false;
|
|
|
|
return true;
|
2021-06-11 05:44:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
|
|
|
|
{
|
|
|
|
do {
|
2021-10-12 23:24:29 +08:00
|
|
|
bio_poll(rq->bio, NULL, 0);
|
2021-06-11 05:44:34 +08:00
|
|
|
cond_resched();
|
|
|
|
} while (!completion_done(wait));
|
|
|
|
}
|
|
|
|
|
2008-01-29 21:53:40 +08:00
|
|
|
/**
|
|
|
|
* blk_execute_rq - insert a request into queue for execution
|
|
|
|
* @bd_disk: matching gendisk
|
|
|
|
* @rq: request to insert
|
|
|
|
* @at_head: insert request at head or tail of queue
|
|
|
|
*
|
|
|
|
* Description:
|
2008-08-20 02:13:11 +08:00
|
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
2008-01-29 21:53:40 +08:00
|
|
|
* for execution and wait for completion.
|
2021-06-11 05:44:36 +08:00
|
|
|
* Return: The blk_status_t result provided to blk_mq_end_request().
|
2008-01-29 21:53:40 +08:00
|
|
|
*/
|
2021-06-11 05:44:36 +08:00
|
|
|
blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
|
2008-01-29 21:53:40 +08:00
|
|
|
{
|
|
|
|
DECLARE_COMPLETION_ONSTACK(wait);
|
2010-09-24 21:51:13 +08:00
|
|
|
unsigned long hang_check;
|
2008-01-29 21:53:40 +08:00
|
|
|
|
|
|
|
rq->end_io_data = &wait;
|
2021-01-25 12:49:57 +08:00
|
|
|
blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
|
2010-09-24 21:51:13 +08:00
|
|
|
|
|
|
|
/* Prevent hang_check timer from firing at us during very long I/O */
|
|
|
|
hang_check = sysctl_hung_task_timeout_secs;
|
2021-06-11 05:44:34 +08:00
|
|
|
|
|
|
|
if (blk_rq_is_poll(rq))
|
|
|
|
blk_rq_poll_completion(rq, &wait);
|
|
|
|
else if (hang_check)
|
2013-02-14 22:19:59 +08:00
|
|
|
while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
|
2010-09-24 21:51:13 +08:00
|
|
|
else
|
2013-02-14 22:19:59 +08:00
|
|
|
wait_for_completion_io(&wait);
|
2021-06-11 05:44:36 +08:00
|
|
|
|
|
|
|
return (blk_status_t)(uintptr_t)rq->end_io_data;
|
2008-01-29 21:53:40 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_execute_rq);
|