scsi: cxlflash: Combine the send queue locks
Currently there are separate spin locks for the two supported I/O queueing models. This makes it difficult to serialize with paths outside the enqueue path. As a design simplification and to support serialization with enqueue operations, move to only a single lock that is used for enqueueing regardless of the queueing model. Signed-off-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com> Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
d31b779115
commit
66ea9bcc39
|
@ -193,7 +193,7 @@ struct hwq {
|
|||
u32 index; /* Index of this hwq */
|
||||
|
||||
atomic_t hsq_credits;
|
||||
spinlock_t hsq_slock;
|
||||
spinlock_t hsq_slock; /* Hardware send queue lock */
|
||||
struct sisl_ioarcb *hsq_start;
|
||||
struct sisl_ioarcb *hsq_end;
|
||||
struct sisl_ioarcb *hsq_curr;
|
||||
|
@ -204,7 +204,6 @@ struct hwq {
|
|||
bool toggle;
|
||||
|
||||
s64 room;
|
||||
spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
|
||||
|
||||
struct irq_poll irqpoll;
|
||||
} __aligned(cache_line_size());
|
||||
|
|
|
@ -261,7 +261,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
|
|||
* To avoid the performance penalty of MMIO, spread the update of
|
||||
* 'room' over multiple commands.
|
||||
*/
|
||||
spin_lock_irqsave(&hwq->rrin_slock, lock_flags);
|
||||
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
|
||||
if (--hwq->room < 0) {
|
||||
room = readq_be(&hwq->host_map->cmd_room);
|
||||
if (room <= 0) {
|
||||
|
@ -277,7 +277,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
|
|||
|
||||
writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
|
||||
out:
|
||||
spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags);
|
||||
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
|
||||
dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
|
||||
cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
|
||||
return rc;
|
||||
|
@ -1722,7 +1722,10 @@ static int start_afu(struct cxlflash_cfg *cfg)
|
|||
hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
|
||||
hwq->hrrq_curr = hwq->hrrq_start;
|
||||
hwq->toggle = 1;
|
||||
|
||||
/* Initialize spin locks */
|
||||
spin_lock_init(&hwq->hrrq_slock);
|
||||
spin_lock_init(&hwq->hsq_slock);
|
||||
|
||||
/* Initialize SQ */
|
||||
if (afu_is_sq_cmd_mode(afu)) {
|
||||
|
@ -1731,7 +1734,6 @@ static int start_afu(struct cxlflash_cfg *cfg)
|
|||
hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
|
||||
hwq->hsq_curr = hwq->hsq_start;
|
||||
|
||||
spin_lock_init(&hwq->hsq_slock);
|
||||
atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
|
||||
}
|
||||
|
||||
|
@ -1984,7 +1986,6 @@ static int init_afu(struct cxlflash_cfg *cfg)
|
|||
for (i = 0; i < afu->num_hwqs; i++) {
|
||||
hwq = get_hwq(afu, i);
|
||||
|
||||
spin_lock_init(&hwq->rrin_slock);
|
||||
hwq->room = readq_be(&hwq->host_map->cmd_room);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue