dm: allow immutable request-based targets to use blk-mq pdu
This will allow DM multipath to use a portion of the blk-mq pdu space for target data (e.g. struct dm_mpath_io). Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
30187e1d48
commit
591ddcfc4b
|
@ -1304,7 +1304,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
||||||
dm_set_md_type(md, dm_table_get_type(t));
|
dm_set_md_type(md, dm_table_get_type(t));
|
||||||
|
|
||||||
/* setup md->queue to reflect md's type (may block) */
|
/* setup md->queue to reflect md's type (may block) */
|
||||||
r = dm_setup_md_queue(md);
|
r = dm_setup_md_queue(md, t);
|
||||||
if (r) {
|
if (r) {
|
||||||
DMWARN("unable to set up device queue for new table.");
|
DMWARN("unable to set up device queue for new table.");
|
||||||
goto err_unlock_md_type;
|
goto err_unlock_md_type;
|
||||||
|
|
|
@ -224,7 +224,8 @@ struct mapped_device {
|
||||||
|
|
||||||
/* for blk-mq request-based DM support */
|
/* for blk-mq request-based DM support */
|
||||||
struct blk_mq_tag_set *tag_set;
|
struct blk_mq_tag_set *tag_set;
|
||||||
bool use_blk_mq;
|
bool use_blk_mq:1;
|
||||||
|
bool init_tio_pdu:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DM_MQ_DEFAULT
|
#ifdef CONFIG_DM_MQ_DEFAULT
|
||||||
|
@ -243,6 +244,7 @@ bool dm_use_blk_mq(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
return md->use_blk_mq;
|
return md->use_blk_mq;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dm_use_blk_mq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For mempools pre-allocation at the table loading time.
|
* For mempools pre-allocation at the table loading time.
|
||||||
|
@ -1889,7 +1891,13 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
|
||||||
tio->clone = NULL;
|
tio->clone = NULL;
|
||||||
tio->orig = rq;
|
tio->orig = rq;
|
||||||
tio->error = 0;
|
tio->error = 0;
|
||||||
memset(&tio->info, 0, sizeof(tio->info));
|
/*
|
||||||
|
* Avoid initializing info for blk-mq; it passes
|
||||||
|
* target-specific data through info.ptr
|
||||||
|
* (see: dm_mq_init_request)
|
||||||
|
*/
|
||||||
|
if (!md->init_tio_pdu)
|
||||||
|
memset(&tio->info, 0, sizeof(tio->info));
|
||||||
if (md->kworker_task)
|
if (md->kworker_task)
|
||||||
init_kthread_work(&tio->work, map_tio_request);
|
init_kthread_work(&tio->work, map_tio_request);
|
||||||
}
|
}
|
||||||
|
@ -2313,6 +2321,7 @@ static struct mapped_device *alloc_dev(int minor)
|
||||||
goto bad_io_barrier;
|
goto bad_io_barrier;
|
||||||
|
|
||||||
md->use_blk_mq = use_blk_mq;
|
md->use_blk_mq = use_blk_mq;
|
||||||
|
md->init_tio_pdu = false;
|
||||||
md->type = DM_TYPE_NONE;
|
md->type = DM_TYPE_NONE;
|
||||||
mutex_init(&md->suspend_lock);
|
mutex_init(&md->suspend_lock);
|
||||||
mutex_init(&md->type_lock);
|
mutex_init(&md->type_lock);
|
||||||
|
@ -2653,6 +2662,11 @@ static int dm_mq_init_request(void *data, struct request *rq,
|
||||||
*/
|
*/
|
||||||
tio->md = md;
|
tio->md = md;
|
||||||
|
|
||||||
|
if (md->init_tio_pdu) {
|
||||||
|
/* target-specific per-io data is immediately after the tio */
|
||||||
|
tio->info.ptr = tio + 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2704,7 +2718,8 @@ static struct blk_mq_ops dm_mq_ops = {
|
||||||
.init_request = dm_mq_init_request,
|
.init_request = dm_mq_init_request,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int dm_mq_init_request_queue(struct mapped_device *md)
|
static int dm_mq_init_request_queue(struct mapped_device *md,
|
||||||
|
struct dm_target *immutable_tgt)
|
||||||
{
|
{
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
int err;
|
int err;
|
||||||
|
@ -2726,6 +2741,11 @@ static int dm_mq_init_request_queue(struct mapped_device *md)
|
||||||
md->tag_set->driver_data = md;
|
md->tag_set->driver_data = md;
|
||||||
|
|
||||||
md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
|
md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
|
||||||
|
if (immutable_tgt && immutable_tgt->per_io_data_size) {
|
||||||
|
/* any target-specific per-io data is immediately after the tio */
|
||||||
|
md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
|
||||||
|
md->init_tio_pdu = true;
|
||||||
|
}
|
||||||
|
|
||||||
err = blk_mq_alloc_tag_set(md->tag_set);
|
err = blk_mq_alloc_tag_set(md->tag_set);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -2763,7 +2783,7 @@ static unsigned filter_md_type(unsigned type, struct mapped_device *md)
|
||||||
/*
|
/*
|
||||||
* Setup the DM device's queue based on md's type
|
* Setup the DM device's queue based on md's type
|
||||||
*/
|
*/
|
||||||
int dm_setup_md_queue(struct mapped_device *md)
|
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
unsigned md_type = filter_md_type(dm_get_md_type(md), md);
|
unsigned md_type = filter_md_type(dm_get_md_type(md), md);
|
||||||
|
@ -2777,7 +2797,7 @@ int dm_setup_md_queue(struct mapped_device *md)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case DM_TYPE_MQ_REQUEST_BASED:
|
case DM_TYPE_MQ_REQUEST_BASED:
|
||||||
r = dm_mq_init_request_queue(md);
|
r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t));
|
||||||
if (r) {
|
if (r) {
|
||||||
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
|
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
|
||||||
return r;
|
return r;
|
||||||
|
@ -3505,8 +3525,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
|
||||||
if (!pool_size)
|
if (!pool_size)
|
||||||
pool_size = dm_get_reserved_rq_based_ios();
|
pool_size = dm_get_reserved_rq_based_ios();
|
||||||
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
||||||
/* per_io_data_size is not used. */
|
/* per_io_data_size is used for blk-mq pdu at queue allocation */
|
||||||
WARN_ON(per_io_data_size != 0);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
|
|
|
@ -86,7 +86,7 @@ void dm_set_md_type(struct mapped_device *md, unsigned type);
|
||||||
unsigned dm_get_md_type(struct mapped_device *md);
|
unsigned dm_get_md_type(struct mapped_device *md);
|
||||||
struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
|
struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
|
||||||
|
|
||||||
int dm_setup_md_queue(struct mapped_device *md);
|
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To check the return value from dm_table_find_target().
|
* To check the return value from dm_table_find_target().
|
||||||
|
|
Loading…
Reference in New Issue