dm zoned: fix various dmz_get_mblock() issues

dmz_fetch_mblock() called from dmz_get_mblock() has a race since the
allocation of the new metadata block descriptor and its insertion in
the cache rbtree with the READING state is not atomic. Two different
contexts requesting the same block may end up each adding two different
descriptors of the same block to the cache.

Another problem for this function is that the BIO for processing the
block read is allocated after the metadata block descriptor is inserted
in the cache rbtree. If the BIO allocation fails, the metadata block
descriptor is freed without first being removed from the rbtree.

Fix the first problem by checking again if the requested block is not in
the cache right before inserting the newly allocated descriptor,
atomically under the mblk_lock spinlock. The second problem is fixed by
simply allocating the BIO before inserting the new block in the cache.

Finally, since dmz_fetch_mblock() also increments a block reference
counter, rename the function to dmz_get_mblock_slow(). To be symmetric
and clear, also rename dmz_lookup_mblock() to dmz_get_mblock_fast() and
increment the block reference counter directly in that function rather
than in dmz_get_mblock().

Fixes: 3b1a94c88b ("dm zoned: drive-managed zoned block device target")
Cc: stable@vger.kernel.org
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Damien Le Moal 2018-10-17 18:05:08 +09:00 committed by Mike Snitzer
parent 33c2865f8d
commit 3d4e738311
1 changed files with 42 additions and 24 deletions

View File

@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
} }
/* /*
* Lookup a metadata block in the rbtree. * Lookup a metadata block in the rbtree. If the block is found, increment
* its reference count.
*/ */
static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd, static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
sector_t mblk_no) sector_t mblk_no)
{ {
struct rb_root *root = &zmd->mblk_rbtree; struct rb_root *root = &zmd->mblk_rbtree;
struct rb_node *node = root->rb_node; struct rb_node *node = root->rb_node;
@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
while (node) { while (node) {
mblk = container_of(node, struct dmz_mblock, node); mblk = container_of(node, struct dmz_mblock, node);
if (mblk->no == mblk_no) if (mblk->no == mblk_no) {
/*
* If this is the first reference to the block,
* remove it from the LRU list.
*/
mblk->ref++;
if (mblk->ref == 1 &&
!test_bit(DMZ_META_DIRTY, &mblk->state))
list_del_init(&mblk->link);
return mblk; return mblk;
}
node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right; node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
} }
@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio)
} }
/* /*
* Read a metadata block from disk. * Read an uncached metadata block from disk and add it to the cache.
*/ */
static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
sector_t mblk_no) sector_t mblk_no)
{ {
struct dmz_mblock *mblk; struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio; struct bio *bio;
/* Get block and insert it */ /* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no); mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk) if (!mblk)
return NULL; return NULL;
spin_lock(&zmd->mblk_lock);
mblk->ref++;
set_bit(DMZ_META_READING, &mblk->state);
dmz_insert_mblock(zmd, mblk);
spin_unlock(&zmd->mblk_lock);
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
if (!bio) { if (!bio) {
dmz_free_mblock(zmd, mblk); dmz_free_mblock(zmd, mblk);
return NULL; return NULL;
} }
spin_lock(&zmd->mblk_lock);
/*
* Make sure that another context did not start reading
* the block already.
*/
m = dmz_get_mblock_fast(zmd, mblk_no);
if (m) {
spin_unlock(&zmd->mblk_lock);
dmz_free_mblock(zmd, mblk);
bio_put(bio);
return m;
}
mblk->ref++;
set_bit(DMZ_META_READING, &mblk->state);
dmz_insert_mblock(zmd, mblk);
spin_unlock(&zmd->mblk_lock);
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, zmd->dev->bdev); bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk; bio->bi_private = mblk;
@ -509,19 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
/* Check rbtree */ /* Check rbtree */
spin_lock(&zmd->mblk_lock); spin_lock(&zmd->mblk_lock);
mblk = dmz_lookup_mblock(zmd, mblk_no); mblk = dmz_get_mblock_fast(zmd, mblk_no);
if (mblk) {
/* Cache hit: remove block from LRU list */
mblk->ref++;
if (mblk->ref == 1 &&
!test_bit(DMZ_META_DIRTY, &mblk->state))
list_del_init(&mblk->link);
}
spin_unlock(&zmd->mblk_lock); spin_unlock(&zmd->mblk_lock);
if (!mblk) { if (!mblk) {
/* Cache miss: read the block from disk */ /* Cache miss: read the block from disk */
mblk = dmz_fetch_mblock(zmd, mblk_no); mblk = dmz_get_mblock_slow(zmd, mblk_no);
if (!mblk) if (!mblk)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }