dm: introduce num_flush_requests
Introduce num_flush_requests for a target to set to say how many flush instructions (empty barriers) it wants to receive. These are sent by __clone_and_map_empty_barrier with map_info->flush_request going from 0 to (num_flush_requests - 1). Old targets without flush support won't receive any flush requests. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
27eaa14975
commit
f9ab94cee3
|
@ -750,6 +750,40 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
|
||||||
return clone;
|
return clone;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __flush_target(struct clone_info *ci, struct dm_target *ti,
|
||||||
|
unsigned flush_nr)
|
||||||
|
{
|
||||||
|
struct dm_target_io *tio = alloc_tio(ci->md);
|
||||||
|
struct bio *clone;
|
||||||
|
|
||||||
|
tio->io = ci->io;
|
||||||
|
tio->ti = ti;
|
||||||
|
|
||||||
|
memset(&tio->info, 0, sizeof(tio->info));
|
||||||
|
tio->info.flush_request = flush_nr;
|
||||||
|
|
||||||
|
clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
|
||||||
|
__bio_clone(clone, ci->bio);
|
||||||
|
clone->bi_destructor = dm_bio_destructor;
|
||||||
|
|
||||||
|
__map_bio(ti, clone, tio);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __clone_and_map_empty_barrier(struct clone_info *ci)
|
||||||
|
{
|
||||||
|
unsigned target_nr = 0, flush_nr;
|
||||||
|
struct dm_target *ti;
|
||||||
|
|
||||||
|
while ((ti = dm_table_get_target(ci->map, target_nr++)))
|
||||||
|
for (flush_nr = 0; flush_nr < ti->num_flush_requests;
|
||||||
|
flush_nr++)
|
||||||
|
__flush_target(ci, ti, flush_nr);
|
||||||
|
|
||||||
|
ci->sector_count = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __clone_and_map(struct clone_info *ci)
|
static int __clone_and_map(struct clone_info *ci)
|
||||||
{
|
{
|
||||||
struct bio *clone, *bio = ci->bio;
|
struct bio *clone, *bio = ci->bio;
|
||||||
|
@ -757,6 +791,9 @@ static int __clone_and_map(struct clone_info *ci)
|
||||||
sector_t len = 0, max;
|
sector_t len = 0, max;
|
||||||
struct dm_target_io *tio;
|
struct dm_target_io *tio;
|
||||||
|
|
||||||
|
if (unlikely(bio_empty_barrier(bio)))
|
||||||
|
return __clone_and_map_empty_barrier(ci);
|
||||||
|
|
||||||
ti = dm_table_find_target(ci->map, ci->sector);
|
ti = dm_table_find_target(ci->map, ci->sector);
|
||||||
if (!dm_target_is_valid(ti))
|
if (!dm_target_is_valid(ti))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -877,6 +914,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
|
||||||
ci.io->md = md;
|
ci.io->md = md;
|
||||||
ci.sector = bio->bi_sector;
|
ci.sector = bio->bi_sector;
|
||||||
ci.sector_count = bio_sectors(bio);
|
ci.sector_count = bio_sectors(bio);
|
||||||
|
if (unlikely(bio_empty_barrier(bio)))
|
||||||
|
ci.sector_count = 1;
|
||||||
ci.idx = bio->bi_idx;
|
ci.idx = bio->bi_idx;
|
||||||
|
|
||||||
start_io_acct(ci.io);
|
start_io_acct(ci.io);
|
||||||
|
|
|
@ -21,6 +21,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
|
||||||
union map_info {
|
union map_info {
|
||||||
void *ptr;
|
void *ptr;
|
||||||
unsigned long long ll;
|
unsigned long long ll;
|
||||||
|
unsigned flush_request;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -167,6 +168,16 @@ struct dm_target {
|
||||||
/* Always a power of 2 */
|
/* Always a power of 2 */
|
||||||
sector_t split_io;
|
sector_t split_io;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A number of zero-length barrier requests that will be submitted
|
||||||
|
* to the target for the purpose of flushing cache.
|
||||||
|
*
|
||||||
|
* The request number will be placed in union map_info->flush_request.
|
||||||
|
* It is a responsibility of the target driver to remap these requests
|
||||||
|
* to the real underlying devices.
|
||||||
|
*/
|
||||||
|
unsigned num_flush_requests;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These are automatically filled in by
|
* These are automatically filled in by
|
||||||
* dm_table_get_device.
|
* dm_table_get_device.
|
||||||
|
|
Loading…
Reference in New Issue