dm zoned: fix metadata block ref counting
Since the ref field of struct dmz_mblock is always used with the
spinlock of struct dmz_metadata locked, there is no need to use an
atomic_t type. Change the type of the ref field to an unsigne
integer.
Fixes: 3b1a94c88b
("dm zoned: drive-managed zoned block device target")
Cc: stable@vger.kernel.org
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
d857ad75ed
commit
33c2865f8d
|
@ -99,7 +99,7 @@ struct dmz_mblock {
|
|||
struct rb_node node;
|
||||
struct list_head link;
|
||||
sector_t no;
|
||||
atomic_t ref;
|
||||
unsigned int ref;
|
||||
unsigned long state;
|
||||
struct page *page;
|
||||
void *data;
|
||||
|
@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
|
|||
|
||||
RB_CLEAR_NODE(&mblk->node);
|
||||
INIT_LIST_HEAD(&mblk->link);
|
||||
atomic_set(&mblk->ref, 0);
|
||||
mblk->ref = 0;
|
||||
mblk->state = 0;
|
||||
mblk->no = mblk_no;
|
||||
mblk->data = page_address(mblk->page);
|
||||
|
@ -397,7 +397,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
|
|||
return NULL;
|
||||
|
||||
spin_lock(&zmd->mblk_lock);
|
||||
atomic_inc(&mblk->ref);
|
||||
mblk->ref++;
|
||||
set_bit(DMZ_META_READING, &mblk->state);
|
||||
dmz_insert_mblock(zmd, mblk);
|
||||
spin_unlock(&zmd->mblk_lock);
|
||||
|
@ -484,7 +484,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
|
|||
|
||||
spin_lock(&zmd->mblk_lock);
|
||||
|
||||
if (atomic_dec_and_test(&mblk->ref)) {
|
||||
mblk->ref--;
|
||||
if (mblk->ref == 0) {
|
||||
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
|
||||
rb_erase(&mblk->node, &zmd->mblk_rbtree);
|
||||
dmz_free_mblock(zmd, mblk);
|
||||
|
@ -511,7 +512,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
|
|||
mblk = dmz_lookup_mblock(zmd, mblk_no);
|
||||
if (mblk) {
|
||||
/* Cache hit: remove block from LRU list */
|
||||
if (atomic_inc_return(&mblk->ref) == 1 &&
|
||||
mblk->ref++;
|
||||
if (mblk->ref == 1 &&
|
||||
!test_bit(DMZ_META_DIRTY, &mblk->state))
|
||||
list_del_init(&mblk->link);
|
||||
}
|
||||
|
@ -753,7 +755,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
|
|||
|
||||
spin_lock(&zmd->mblk_lock);
|
||||
clear_bit(DMZ_META_DIRTY, &mblk->state);
|
||||
if (atomic_read(&mblk->ref) == 0)
|
||||
if (mblk->ref == 0)
|
||||
list_add_tail(&mblk->link, &zmd->mblk_lru_list);
|
||||
spin_unlock(&zmd->mblk_lock);
|
||||
}
|
||||
|
@ -2308,7 +2310,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
|
|||
mblk = list_first_entry(&zmd->mblk_dirty_list,
|
||||
struct dmz_mblock, link);
|
||||
dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
|
||||
(u64)mblk->no, atomic_read(&mblk->ref));
|
||||
(u64)mblk->no, mblk->ref);
|
||||
list_del_init(&mblk->link);
|
||||
rb_erase(&mblk->node, &zmd->mblk_rbtree);
|
||||
dmz_free_mblock(zmd, mblk);
|
||||
|
@ -2326,8 +2328,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
|
|||
root = &zmd->mblk_rbtree;
|
||||
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
|
||||
dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
|
||||
(u64)mblk->no, atomic_read(&mblk->ref));
|
||||
atomic_set(&mblk->ref, 0);
|
||||
(u64)mblk->no, mblk->ref);
|
||||
mblk->ref = 0;
|
||||
dmz_free_mblock(zmd, mblk);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue