dm table: set flush capability based on underlying devices
DM has always advertised both REQ_FLUSH and REQ_FUA flush capabilities regardless of whether or not a given DM device's underlying devices also advertised a need for them. Block's flush-merge changes from 2.6.39 have proven to be more costly for DM devices. Performance regressions have been reported even when DM's underlying devices do not advertise that they have a write cache. Fix the performance regressions by configuring a DM device's flushing capabilities based on those of the underlying devices' capabilities. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
772ae5f54d
commit
ed8b752bcc
|
@ -1248,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t)
|
|||
blk_get_integrity(template_disk));
|
||||
}
|
||||
|
||||
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
unsigned flush = (*(unsigned *)data);
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return q && (q->flush_flags & flush);
|
||||
}
|
||||
|
||||
static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
|
||||
/*
|
||||
* Require at least one underlying device to support flushes.
|
||||
* t->devices includes internal dm devices such as mirror logs
|
||||
* so we need to use iterate_devices here, which targets
|
||||
* supporting flushes must provide.
|
||||
*/
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
|
||||
if (!ti->num_flush_requests)
|
||||
continue;
|
||||
|
||||
if (ti->type->iterate_devices &&
|
||||
ti->type->iterate_devices(ti, device_flush_capable, &flush))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||
struct queue_limits *limits)
|
||||
{
|
||||
unsigned flush = 0;
|
||||
|
||||
/*
|
||||
* Copy table's limits to the DM device's request_queue
|
||||
*/
|
||||
|
@ -1261,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||
else
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||
|
||||
if (dm_table_supports_flush(t, REQ_FLUSH)) {
|
||||
flush |= REQ_FLUSH;
|
||||
if (dm_table_supports_flush(t, REQ_FUA))
|
||||
flush |= REQ_FUA;
|
||||
}
|
||||
blk_queue_flush(q, flush);
|
||||
|
||||
dm_table_set_integrity(t);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1808,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md)
|
|||
blk_queue_make_request(md->queue, dm_request);
|
||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
|
||||
blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue