scsi: target: Remove se_dev_entry.ua_count
se_dev_entry.ua_count is only used to check whether or not se_dev_entry.ua_list is empty. Use list_empty_careful() instead. Checking whether or not ua_list is empty without holding the lock that protects that list is fine because the code that dequeues from that list will check again whether or not that list is empty. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Mike Christie <mchristi@redhat.com> Cc: Mike Christie <mchristi@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
325c1e8b24
commit
e936a38ac9
|
@ -336,7 +336,6 @@ int core_enable_device_list_for_node(
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
atomic_set(&new->ua_count, 0);
|
||||
spin_lock_init(&new->ua_lock);
|
||||
INIT_LIST_HEAD(&new->ua_list);
|
||||
INIT_LIST_HEAD(&new->lun_link);
|
||||
|
|
|
@ -55,7 +55,7 @@ target_scsi3_ua_check(struct se_cmd *cmd)
|
|||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
if (!atomic_read(&deve->ua_count)) {
|
||||
if (list_empty_careful(&deve->ua_list)) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
@ -154,7 +154,6 @@ int core_scsi3_ua_allocate(
|
|||
&deve->ua_list);
|
||||
spin_unlock(&deve->ua_lock);
|
||||
|
||||
atomic_inc_mb(&deve->ua_count);
|
||||
return 0;
|
||||
}
|
||||
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
|
||||
|
@ -164,7 +163,6 @@ int core_scsi3_ua_allocate(
|
|||
" 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
|
||||
asc, ascq);
|
||||
|
||||
atomic_inc_mb(&deve->ua_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -196,8 +194,6 @@ void core_scsi3_ua_release_all(
|
|||
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
|
||||
list_del(&ua->ua_nacl_list);
|
||||
kmem_cache_free(se_ua_cache, ua);
|
||||
|
||||
atomic_dec_mb(&deve->ua_count);
|
||||
}
|
||||
spin_unlock(&deve->ua_lock);
|
||||
}
|
||||
|
@ -263,8 +259,6 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
|
|||
}
|
||||
list_del(&ua->ua_nacl_list);
|
||||
kmem_cache_free(se_ua_cache, ua);
|
||||
|
||||
atomic_dec_mb(&deve->ua_count);
|
||||
}
|
||||
spin_unlock(&deve->ua_lock);
|
||||
rcu_read_unlock();
|
||||
|
@ -304,7 +298,7 @@ int core_scsi3_ua_clear_for_request_sense(
|
|||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!atomic_read(&deve->ua_count)) {
|
||||
if (list_empty_careful(&deve->ua_list)) {
|
||||
rcu_read_unlock();
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -327,8 +321,6 @@ int core_scsi3_ua_clear_for_request_sense(
|
|||
}
|
||||
list_del(&ua->ua_nacl_list);
|
||||
kmem_cache_free(se_ua_cache, ua);
|
||||
|
||||
atomic_dec_mb(&deve->ua_count);
|
||||
}
|
||||
spin_unlock(&deve->ua_lock);
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -638,7 +638,6 @@ struct se_dev_entry {
|
|||
atomic_long_t total_cmds;
|
||||
atomic_long_t read_bytes;
|
||||
atomic_long_t write_bytes;
|
||||
atomic_t ua_count;
|
||||
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
|
||||
struct kref pr_kref;
|
||||
struct completion pr_comp;
|
||||
|
|
Loading…
Reference in New Issue