- Fix DM core's bioset initialization so that blk integrity pool is
properly setup. Remove now unused bioset_init_from_src. - Fix DM zoned hang from locking imbalance due to needless check in clone_endio(). -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmKjmloACgkQxSPxCi2d A1r8VggAyz5qvVintXmJ5SEnzz/nPfeXcu7UpyDWbabYaRQ+RSdD541TYayhlp0f Pnk2R+aOYG2JvKviIg2Tbf+WqCkhAgStG26QyTWaakwiLva+xE2+lbxJus/8ZcWT pZ8UvwpCFbg7nTpGNHYT2/fs2++6e2Y+bPE87rG534agHf6c48UHdoLiHm3bgNuh iY1ro4UsseVDum48BCPj6mwOaAYfjpp6BlJgzlRol0PRkqKCgRthN0pksSn3fW3y H9LS4gUvJIBVq5oA0T9zRWDeEdKdqXUGnATjWaet5kSjZCCuJmu23vyC707Nnx76 KN7jEmkR1eGuXQlw4URiQ2HuG941dw== =O04+ -----END PGP SIGNATURE----- Merge tag 'for-5.19/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - Fix DM core's bioset initialization so that blk integrity pool is properly setup. Remove now unused bioset_init_from_src. - Fix DM zoned hang from locking imbalance due to needless check in clone_endio(). * tag 'for-5.19/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: fix zoned locking imbalance due to needless check in clone_endio block: remove bioset_init_from_src dm: fix bio_set allocation
This commit is contained in:
commit
90add6d418
20
block/bio.c
20
block/bio.c
|
@ -1747,26 +1747,6 @@ bad:
|
|||
}
|
||||
EXPORT_SYMBOL(bioset_init);
|
||||
|
||||
/*
|
||||
* Initialize and setup a new bio_set, based on the settings from
|
||||
* another bio_set.
|
||||
*/
|
||||
int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
|
||||
{
|
||||
int flags;
|
||||
|
||||
flags = 0;
|
||||
if (src->bvec_pool.min_nr)
|
||||
flags |= BIOSET_NEED_BVECS;
|
||||
if (src->rescue_workqueue)
|
||||
flags |= BIOSET_NEED_RESCUER;
|
||||
if (src->cache)
|
||||
flags |= BIOSET_PERCPU_CACHE;
|
||||
|
||||
return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_init_from_src);
|
||||
|
||||
static int __init init_bio(void)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -33,6 +33,14 @@ struct dm_kobject_holder {
|
|||
* access their members!
|
||||
*/
|
||||
|
||||
/*
|
||||
* For mempools pre-allocation at the table loading time.
|
||||
*/
|
||||
struct dm_md_mempools {
|
||||
struct bio_set bs;
|
||||
struct bio_set io_bs;
|
||||
};
|
||||
|
||||
struct mapped_device {
|
||||
struct mutex suspend_lock;
|
||||
|
||||
|
@ -110,8 +118,7 @@ struct mapped_device {
|
|||
/*
|
||||
* io objects are allocated from here.
|
||||
*/
|
||||
struct bio_set io_bs;
|
||||
struct bio_set bs;
|
||||
struct dm_md_mempools *mempools;
|
||||
|
||||
/* kobject and completion */
|
||||
struct dm_kobject_holder kobj_holder;
|
||||
|
|
|
@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq,
|
|||
{
|
||||
int r;
|
||||
|
||||
r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
|
||||
r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
|
||||
dm_rq_bio_constructor, tio);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
|
|||
return 0;
|
||||
}
|
||||
|
||||
void dm_table_free_md_mempools(struct dm_table *t)
|
||||
{
|
||||
dm_free_md_mempools(t->mempools);
|
||||
t->mempools = NULL;
|
||||
}
|
||||
|
||||
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
|
||||
{
|
||||
return t->mempools;
|
||||
}
|
||||
|
||||
static int setup_indexes(struct dm_table *t)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -136,14 +136,6 @@ static int get_swap_bios(void)
|
|||
return latch;
|
||||
}
|
||||
|
||||
/*
|
||||
* For mempools pre-allocation at the table loading time.
|
||||
*/
|
||||
struct dm_md_mempools {
|
||||
struct bio_set bs;
|
||||
struct bio_set io_bs;
|
||||
};
|
||||
|
||||
struct table_device {
|
||||
struct list_head list;
|
||||
refcount_t count;
|
||||
|
@ -581,7 +573,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
|||
struct dm_target_io *tio;
|
||||
struct bio *clone;
|
||||
|
||||
clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs);
|
||||
clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
|
||||
/* Set default bdev, but target must bio_set_dev() before issuing IO */
|
||||
clone->bi_bdev = md->disk->part0;
|
||||
|
||||
|
@ -628,7 +620,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
|
|||
} else {
|
||||
struct mapped_device *md = ci->io->md;
|
||||
|
||||
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs);
|
||||
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
|
||||
&md->mempools->bs);
|
||||
if (!clone)
|
||||
return NULL;
|
||||
/* Set default bdev, but target must bio_set_dev() before issuing IO */
|
||||
|
@ -1023,22 +1016,18 @@ static void clone_endio(struct bio *bio)
|
|||
struct dm_io *io = tio->io;
|
||||
struct mapped_device *md = io->md;
|
||||
|
||||
if (likely(bio->bi_bdev != md->disk->part0)) {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (unlikely(error == BLK_STS_TARGET)) {
|
||||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||
!bdev_max_discard_sectors(bio->bi_bdev))
|
||||
disable_discard(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!q->limits.max_write_zeroes_sectors)
|
||||
!bdev_write_zeroes_sectors(bio->bi_bdev))
|
||||
disable_write_zeroes(md);
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&zoned_enabled) &&
|
||||
unlikely(blk_queue_is_zoned(q)))
|
||||
unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
|
||||
dm_zone_endio(io, bio);
|
||||
}
|
||||
|
||||
if (endio) {
|
||||
int r = endio(ti, bio, &error);
|
||||
|
@ -1876,8 +1865,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
|||
{
|
||||
if (md->wq)
|
||||
destroy_workqueue(md->wq);
|
||||
bioset_exit(&md->bs);
|
||||
bioset_exit(&md->io_bs);
|
||||
dm_free_md_mempools(md->mempools);
|
||||
|
||||
if (md->dax_dev) {
|
||||
dax_remove_host(md->disk);
|
||||
|
@ -2049,48 +2037,6 @@ static void free_dev(struct mapped_device *md)
|
|||
kvfree(md);
|
||||
}
|
||||
|
||||
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
||||
{
|
||||
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
||||
int ret = 0;
|
||||
|
||||
if (dm_table_bio_based(t)) {
|
||||
/*
|
||||
* The md may already have mempools that need changing.
|
||||
* If so, reload bioset because front_pad may have changed
|
||||
* because a different table was loaded.
|
||||
*/
|
||||
bioset_exit(&md->bs);
|
||||
bioset_exit(&md->io_bs);
|
||||
|
||||
} else if (bioset_initialized(&md->bs)) {
|
||||
/*
|
||||
* There's no need to reload with request-based dm
|
||||
* because the size of front_pad doesn't change.
|
||||
* Note for future: If you are to reload bioset,
|
||||
* prep-ed requests in the queue may refer
|
||||
* to bio from the old bioset, so you must walk
|
||||
* through the queue to unprep.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
BUG_ON(!p ||
|
||||
bioset_initialized(&md->bs) ||
|
||||
bioset_initialized(&md->io_bs));
|
||||
|
||||
ret = bioset_init_from_src(&md->bs, &p->bs);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
|
||||
if (ret)
|
||||
bioset_exit(&md->bs);
|
||||
out:
|
||||
/* mempool bind completed, no longer need any mempools in the table */
|
||||
dm_table_free_md_mempools(t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bind a table to the device.
|
||||
*/
|
||||
|
@ -2144,12 +2090,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
|||
* immutable singletons - used to optimize dm_mq_queue_rq.
|
||||
*/
|
||||
md->immutable_target = dm_table_get_immutable_target(t);
|
||||
}
|
||||
|
||||
ret = __bind_mempools(md, t);
|
||||
if (ret) {
|
||||
old_map = ERR_PTR(ret);
|
||||
goto out;
|
||||
/*
|
||||
* There is no need to reload with request-based dm because the
|
||||
* size of front_pad doesn't change.
|
||||
*
|
||||
* Note for future: If you are to reload bioset, prep-ed
|
||||
* requests in the queue may refer to bio from the old bioset,
|
||||
* so you must walk through the queue to unprep.
|
||||
*/
|
||||
if (!md->mempools) {
|
||||
md->mempools = t->mempools;
|
||||
t->mempools = NULL;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* The md may already have mempools that need changing.
|
||||
* If so, reload bioset because front_pad may have changed
|
||||
* because a different table was loaded.
|
||||
*/
|
||||
dm_free_md_mempools(md->mempools);
|
||||
md->mempools = t->mempools;
|
||||
t->mempools = NULL;
|
||||
}
|
||||
|
||||
ret = dm_table_set_restrictions(t, md->queue, limits);
|
||||
|
|
|
@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
|
|||
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
|
||||
bool dm_table_bio_based(struct dm_table *t);
|
||||
bool dm_table_request_based(struct dm_table *t);
|
||||
void dm_table_free_md_mempools(struct dm_table *t);
|
||||
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
|
||||
|
||||
void dm_lock_md_type(struct mapped_device *md);
|
||||
void dm_unlock_md_type(struct mapped_device *md);
|
||||
|
|
|
@ -403,7 +403,6 @@ enum {
|
|||
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
|
||||
extern void bioset_exit(struct bio_set *);
|
||||
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
|
||||
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
||||
|
||||
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||
unsigned int opf, gfp_t gfp_mask,
|
||||
|
|
Loading…
Reference in New Issue