scsi: ufs: add reference counting for scsi block requests
Currently we call the scsi_block_requests()/scsi_unblock_requests() whenever we want to block/unblock scsi requests but as there is no reference counting, nesting of these calls could leave us in undesired state sometime. Consider following call flow sequence: 1. func1() calls scsi_block_requests() but calls func2() before calling scsi_unblock_requests() 2. func2() calls scsi_block_requests() 3. func2() calls scsi_unblock_requests() 4. func1() calls scsi_unblock_requests() As there is no reference counting, we will have scsi requests unblocked after #3 instead of it to be unblocked only after #4. Though we may not have failures seen with this, we might run into some failures in future. Better solution would be to fix this by adding reference counting. Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org> Signed-off-by: Can Guo <cang@codeaurora.org> Signed-off-by: Asutosh Das <asutoshd@codeaurora.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
b334456ec2
commit
38135535dc
|
@ -264,6 +264,18 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
|
|||
}
|
||||
}
|
||||
|
||||
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
|
||||
{
|
||||
if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
|
||||
scsi_unblock_requests(hba->host);
|
||||
}
|
||||
|
||||
static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
|
||||
{
|
||||
if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
|
||||
scsi_block_requests(hba->host);
|
||||
}
|
||||
|
||||
/* replace non-printable or non-ASCII characters with spaces */
|
||||
static inline void ufshcd_remove_non_printable(char *val)
|
||||
{
|
||||
|
@ -1074,12 +1086,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
|
|||
* make sure that there are no outstanding requests when
|
||||
* clock scaling is in progress
|
||||
*/
|
||||
scsi_block_requests(hba->host);
|
||||
ufshcd_scsi_block_requests(hba);
|
||||
down_write(&hba->clk_scaling_lock);
|
||||
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
|
||||
ret = -EBUSY;
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
scsi_unblock_requests(hba->host);
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1088,7 +1100,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
|
|||
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
|
||||
{
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
scsi_unblock_requests(hba->host);
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1460,7 +1472,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
|
|||
hba->clk_gating.is_suspended = false;
|
||||
}
|
||||
unblock_reqs:
|
||||
scsi_unblock_requests(hba->host);
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1516,7 +1528,7 @@ start:
|
|||
* work and to enable clocks.
|
||||
*/
|
||||
case CLKS_OFF:
|
||||
scsi_block_requests(hba->host);
|
||||
ufshcd_scsi_block_requests(hba);
|
||||
hba->clk_gating.state = REQ_CLKS_ON;
|
||||
trace_ufshcd_clk_gating(dev_name(hba->dev),
|
||||
hba->clk_gating.state);
|
||||
|
@ -5298,7 +5310,7 @@ skip_err_handling:
|
|||
|
||||
out:
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
scsi_unblock_requests(hba->host);
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
ufshcd_release(hba);
|
||||
pm_runtime_put_sync(hba->dev);
|
||||
}
|
||||
|
@ -5400,7 +5412,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
|
|||
/* handle fatal errors only when link is functional */
|
||||
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
|
||||
/* block commands from scsi mid-layer */
|
||||
scsi_block_requests(hba->host);
|
||||
ufshcd_scsi_block_requests(hba);
|
||||
|
||||
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
|
||||
|
||||
|
@ -8032,7 +8044,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||
|
||||
/* Hold auto suspend until async scan completes */
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
atomic_set(&hba->scsi_block_reqs_cnt, 0);
|
||||
/*
|
||||
* We are assuming that device wasn't put in sleep/power-down
|
||||
* state exclusively during the boot stage before kernel.
|
||||
|
|
|
@ -499,6 +499,7 @@ struct ufs_stats {
|
|||
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
|
||||
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
|
||||
* device is known or not.
|
||||
* @scsi_block_reqs_cnt: reference counting for scsi block requests
|
||||
*/
|
||||
struct ufs_hba {
|
||||
void __iomem *mmio_base;
|
||||
|
@ -699,6 +700,7 @@ struct ufs_hba {
|
|||
|
||||
struct rw_semaphore clk_scaling_lock;
|
||||
struct ufs_desc_size desc_size;
|
||||
atomic_t scsi_block_reqs_cnt;
|
||||
};
|
||||
|
||||
/* Returns true if clocks can be gated. Otherwise false */
|
||||
|
|
Loading…
Reference in New Issue