dm: do not initialise full request queue when bio based
Change bio-based mapped devices no longer to have a fully initialized request_queue (request_fn, elevator, etc). This means bio-based DM devices no longer register elevator sysfs attributes ('iosched/' tree or 'scheduler' other than "none"). In contrast, a request-based DM device will continue to have a full request_queue and will register elevator sysfs attributes. Therefore a user can determine a DM device's type by checking if elevator sysfs attributes exist. First allocate a minimalist request_queue structure for a DM device (needed for both bio and request-based DM). Initialization of a full request_queue is deferred until it is known that the DM device is request-based, at the end of the table load sequence. Factor DM device's request_queue initialization: - common to both request-based and bio-based into dm_init_md_queue(). - specific to request-based into dm_init_request_based_queue(). The md->type_lock mutex is used to protect md->queue, in addition to md->type, during table_load(). A DM device's first table_load will establish the immutable md->type. But md->queue initialization, based on md->type, may fail at that time (because blk_init_allocated_queue cannot allocate memory). Therefore any subsequent table_load must (re)try dm_setup_md_queue independently of establishing md->type. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
a5664dad7e
commit
4a0b4ddf26
|
@ -1189,7 +1189,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Protect md->type against concurrent table loads. */
|
/* Protect md->type and md->queue against concurrent table loads. */
|
||||||
dm_lock_md_type(md);
|
dm_lock_md_type(md);
|
||||||
if (dm_get_md_type(md) == DM_TYPE_NONE)
|
if (dm_get_md_type(md) == DM_TYPE_NONE)
|
||||||
/* Initial table load: acquire type of table. */
|
/* Initial table load: acquire type of table. */
|
||||||
|
@ -1201,6 +1201,15 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* setup md->queue to reflect md's type (may block) */
|
||||||
|
r = dm_setup_md_queue(md);
|
||||||
|
if (r) {
|
||||||
|
DMWARN("unable to set up device queue for new table.");
|
||||||
|
dm_table_destroy(t);
|
||||||
|
dm_unlock_md_type(md);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
dm_unlock_md_type(md);
|
dm_unlock_md_type(md);
|
||||||
|
|
||||||
/* stage inactive table */
|
/* stage inactive table */
|
||||||
|
|
|
@ -126,7 +126,7 @@ struct mapped_device {
|
||||||
|
|
||||||
struct request_queue *queue;
|
struct request_queue *queue;
|
||||||
unsigned type;
|
unsigned type;
|
||||||
/* Protect type against concurrent access. */
|
/* Protect queue and type against concurrent access. */
|
||||||
struct mutex type_lock;
|
struct mutex type_lock;
|
||||||
|
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
|
@ -1856,6 +1856,28 @@ static const struct block_device_operations dm_blk_dops;
|
||||||
static void dm_wq_work(struct work_struct *work);
|
static void dm_wq_work(struct work_struct *work);
|
||||||
static void dm_rq_barrier_work(struct work_struct *work);
|
static void dm_rq_barrier_work(struct work_struct *work);
|
||||||
|
|
||||||
|
static void dm_init_md_queue(struct mapped_device *md)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Request-based dm devices cannot be stacked on top of bio-based dm
|
||||||
|
* devices. The type of this dm device has not been decided yet.
|
||||||
|
* The type is decided at the first table loading time.
|
||||||
|
* To prevent problematic device stacking, clear the queue flag
|
||||||
|
* for request stacking support until then.
|
||||||
|
*
|
||||||
|
* This queue is new, so no concurrency on the queue_flags.
|
||||||
|
*/
|
||||||
|
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
|
||||||
|
|
||||||
|
md->queue->queuedata = md;
|
||||||
|
md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
||||||
|
md->queue->backing_dev_info.congested_data = md;
|
||||||
|
blk_queue_make_request(md->queue, dm_request);
|
||||||
|
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||||
|
md->queue->unplug_fn = dm_unplug_all;
|
||||||
|
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate and initialise a blank device with a given minor.
|
* Allocate and initialise a blank device with a given minor.
|
||||||
*/
|
*/
|
||||||
|
@ -1895,33 +1917,11 @@ static struct mapped_device *alloc_dev(int minor)
|
||||||
INIT_LIST_HEAD(&md->uevent_list);
|
INIT_LIST_HEAD(&md->uevent_list);
|
||||||
spin_lock_init(&md->uevent_lock);
|
spin_lock_init(&md->uevent_lock);
|
||||||
|
|
||||||
md->queue = blk_init_queue(dm_request_fn, NULL);
|
md->queue = blk_alloc_queue(GFP_KERNEL);
|
||||||
if (!md->queue)
|
if (!md->queue)
|
||||||
goto bad_queue;
|
goto bad_queue;
|
||||||
|
|
||||||
/*
|
dm_init_md_queue(md);
|
||||||
* Request-based dm devices cannot be stacked on top of bio-based dm
|
|
||||||
* devices. The type of this dm device has not been decided yet,
|
|
||||||
* although we initialized the queue using blk_init_queue().
|
|
||||||
* The type is decided at the first table loading time.
|
|
||||||
* To prevent problematic device stacking, clear the queue flag
|
|
||||||
* for request stacking support until then.
|
|
||||||
*
|
|
||||||
* This queue is new, so no concurrency on the queue_flags.
|
|
||||||
*/
|
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
|
|
||||||
md->saved_make_request_fn = md->queue->make_request_fn;
|
|
||||||
md->queue->queuedata = md;
|
|
||||||
md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
|
||||||
md->queue->backing_dev_info.congested_data = md;
|
|
||||||
blk_queue_make_request(md->queue, dm_request);
|
|
||||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
|
||||||
md->queue->unplug_fn = dm_unplug_all;
|
|
||||||
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
|
|
||||||
blk_queue_softirq_done(md->queue, dm_softirq_done);
|
|
||||||
blk_queue_prep_rq(md->queue, dm_prep_fn);
|
|
||||||
blk_queue_lld_busy(md->queue, dm_lld_busy);
|
|
||||||
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
|
|
||||||
|
|
||||||
md->disk = alloc_disk(1);
|
md->disk = alloc_disk(1);
|
||||||
if (!md->disk)
|
if (!md->disk)
|
||||||
|
@ -2160,6 +2160,48 @@ unsigned dm_get_md_type(struct mapped_device *md)
|
||||||
return md->type;
|
return md->type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
|
||||||
|
*/
|
||||||
|
static int dm_init_request_based_queue(struct mapped_device *md)
|
||||||
|
{
|
||||||
|
struct request_queue *q = NULL;
|
||||||
|
|
||||||
|
if (md->queue->elevator)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/* Fully initialize the queue */
|
||||||
|
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
|
||||||
|
if (!q)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
md->queue = q;
|
||||||
|
md->saved_make_request_fn = md->queue->make_request_fn;
|
||||||
|
dm_init_md_queue(md);
|
||||||
|
blk_queue_softirq_done(md->queue, dm_softirq_done);
|
||||||
|
blk_queue_prep_rq(md->queue, dm_prep_fn);
|
||||||
|
blk_queue_lld_busy(md->queue, dm_lld_busy);
|
||||||
|
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
|
||||||
|
|
||||||
|
elv_register_queue(md->queue);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setup the DM device's queue based on md's type
|
||||||
|
*/
|
||||||
|
int dm_setup_md_queue(struct mapped_device *md)
|
||||||
|
{
|
||||||
|
if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
|
||||||
|
!dm_init_request_based_queue(md)) {
|
||||||
|
DMWARN("Cannot initialize queue for request-based mapped device");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct mapped_device *dm_find_md(dev_t dev)
|
static struct mapped_device *dm_find_md(dev_t dev)
|
||||||
{
|
{
|
||||||
struct mapped_device *md;
|
struct mapped_device *md;
|
||||||
|
|
|
@ -71,6 +71,8 @@ void dm_unlock_md_type(struct mapped_device *md);
|
||||||
void dm_set_md_type(struct mapped_device *md, unsigned type);
|
void dm_set_md_type(struct mapped_device *md, unsigned type);
|
||||||
unsigned dm_get_md_type(struct mapped_device *md);
|
unsigned dm_get_md_type(struct mapped_device *md);
|
||||||
|
|
||||||
|
int dm_setup_md_queue(struct mapped_device *md);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To check the return value from dm_table_find_target().
|
* To check the return value from dm_table_find_target().
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue