scsi: lpfc: Add per io channel NVME IO statistics

When debugging various issues, per IO channel IO statistics were useful
to understand what was happening. However, many of the stats were on a
port basis rather than an io channel basis.

Move statistics to an io channel basis.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
James Smart 2018-04-09 14:24:23 -07:00 committed by Martin K. Petersen
parent f91bc594ba
commit 66a210ffb8
6 changed files with 174 additions and 92 deletions

View File

@ -920,12 +920,6 @@ struct lpfc_hba {
atomic_t fc4ScsiOutputRequests; atomic_t fc4ScsiOutputRequests;
atomic_t fc4ScsiControlRequests; atomic_t fc4ScsiControlRequests;
atomic_t fc4ScsiIoCmpls; atomic_t fc4ScsiIoCmpls;
atomic_t fc4NvmeInputRequests;
atomic_t fc4NvmeOutputRequests;
atomic_t fc4NvmeControlRequests;
atomic_t fc4NvmeIoCmpls;
atomic_t fc4NvmeLsRequests;
atomic_t fc4NvmeLsCmpls;
uint64_t bg_guard_err_cnt; uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt; uint64_t bg_apptag_err_cnt;

View File

@ -151,8 +151,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_nvme_lport *lport; struct lpfc_nvme_lport *lport;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport; struct nvme_fc_remote_port *nrport;
uint64_t data1, data2, data3, tot; struct lpfc_nvme_ctrl_stat *cstat;
uint64_t data1, data2, data3;
uint64_t totin, totout, tot;
char *statep; char *statep;
int i;
int len = 0; int len = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
@ -364,11 +367,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
if (!lport)
return len;
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n"); len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
"LS: Xmt %010x Cmpl %010x Abort %08x\n", "LS: Xmt %010x Cmpl %010x Abort %08x\n",
atomic_read(&phba->fc4NvmeLsRequests), atomic_read(&lport->fc4NvmeLsRequests),
atomic_read(&phba->fc4NvmeLsCmpls), atomic_read(&lport->fc4NvmeLsCmpls),
atomic_read(&lport->xmt_ls_abort)); atomic_read(&lport->xmt_ls_abort));
len += snprintf(buf + len, PAGE_SIZE - len, len += snprintf(buf + len, PAGE_SIZE - len,
@ -377,27 +383,31 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_ls_xb), atomic_read(&lport->cmpl_ls_xb),
atomic_read(&lport->cmpl_ls_err)); atomic_read(&lport->cmpl_ls_err));
tot = atomic_read(&phba->fc4NvmeIoCmpls); totin = 0;
data1 = atomic_read(&phba->fc4NvmeInputRequests); totout = 0;
data2 = atomic_read(&phba->fc4NvmeOutputRequests); for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
data3 = atomic_read(&phba->fc4NvmeControlRequests); cstat = &lport->cstat[i];
tot = atomic_read(&cstat->fc4NvmeIoCmpls);
totin += tot;
data1 = atomic_read(&cstat->fc4NvmeInputRequests);
data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
totout += (data1 + data2 + data3);
}
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n", "Total FCP Cmpl %016llx Issue %016llx "
data1, data2, data3); "OutIO %016llx\n",
totin, totout, totout - totin);
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
" noxri %08x nondlp %08x qdepth %08x " " abort %08x noxri %08x nondlp %08x qdepth %08x "
"wqerr %08x\n", "wqerr %08x\n",
atomic_read(&lport->xmt_fcp_abort),
atomic_read(&lport->xmt_fcp_noxri), atomic_read(&lport->xmt_fcp_noxri),
atomic_read(&lport->xmt_fcp_bad_ndlp), atomic_read(&lport->xmt_fcp_bad_ndlp),
atomic_read(&lport->xmt_fcp_qdepth), atomic_read(&lport->xmt_fcp_qdepth),
atomic_read(&lport->xmt_fcp_wqerr)); atomic_read(&lport->xmt_fcp_wqerr));
len += snprintf(buf + len, PAGE_SIZE - len,
" Cmpl %016llx Outstanding %016llx Abort %08x\n",
tot, ((data1 + data2 + data3) - tot),
atomic_read(&lport->xmt_fcp_abort));
len += snprintf(buf + len, PAGE_SIZE - len, len += snprintf(buf + len, PAGE_SIZE - len,
"FCP CMPL: xb %08x Err %08x\n", "FCP CMPL: xb %08x Err %08x\n",
atomic_read(&lport->cmpl_fcp_xb), atomic_read(&lport->cmpl_fcp_xb),

View File

@ -767,10 +767,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
struct nvme_fc_local_port *localport; struct nvme_fc_local_port *localport;
struct lpfc_nvme_ctrl_stat *cstat;
struct lpfc_nvme_lport *lport; struct lpfc_nvme_lport *lport;
uint64_t tot, data1, data2, data3; uint64_t data1, data2, data3;
uint64_t tot, totin, totout;
int cnt, i, maxch;
int len = 0; int len = 0;
int cnt;
if (phba->nvmet_support) { if (phba->nvmet_support) {
if (!phba->targetport) if (!phba->targetport)
@ -896,27 +898,6 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len; return len;
len += snprintf(buf + len, size - len,
"\nNVME Lport Statistics\n");
len += snprintf(buf + len, size - len,
"LS: Xmt %016x Cmpl %016x\n",
atomic_read(&phba->fc4NvmeLsRequests),
atomic_read(&phba->fc4NvmeLsCmpls));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
data2 = atomic_read(&phba->fc4NvmeOutputRequests);
data3 = atomic_read(&phba->fc4NvmeControlRequests);
len += snprintf(buf + len, size - len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
data1, data2, data3);
len += snprintf(buf + len, size - len,
" Cmpl %016llx Outstanding %016llx\n",
tot, (data1 + data2 + data3) - tot);
localport = vport->localport; localport = vport->localport;
if (!localport) if (!localport)
return len; return len;
@ -924,6 +905,46 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (!lport) if (!lport)
return len; return len;
len += snprintf(buf + len, size - len,
"\nNVME Lport Statistics\n");
len += snprintf(buf + len, size - len,
"LS: Xmt %016x Cmpl %016x\n",
atomic_read(&lport->fc4NvmeLsRequests),
atomic_read(&lport->fc4NvmeLsCmpls));
if (phba->cfg_nvme_io_channel < 32)
maxch = phba->cfg_nvme_io_channel;
else
maxch = 32;
totin = 0;
totout = 0;
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
cstat = &lport->cstat[i];
tot = atomic_read(&cstat->fc4NvmeIoCmpls);
totin += tot;
data1 = atomic_read(&cstat->fc4NvmeInputRequests);
data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
totout += (data1 + data2 + data3);
/* Limit to 32, debugfs display buffer limitation */
if (i >= 32)
continue;
len += snprintf(buf + len, PAGE_SIZE - len,
"FCP (%d): Rd %016llx Wr %016llx "
"IO %016llx ",
i, data1, data2, data3);
len += snprintf(buf + len, PAGE_SIZE - len,
"Cmpl %016llx OutIO %016llx\n",
tot, ((data1 + data2 + data3) - tot));
}
len += snprintf(buf + len, PAGE_SIZE - len,
"Total FCP Cmpl %016llx Issue %016llx "
"OutIO %016llx\n",
totin, totout, totout - totin);
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"LS Xmt Err: Abrt %08x Err %08x " "LS Xmt Err: Abrt %08x Err %08x "
"Cmpl Err: xb %08x Err %08x\n", "Cmpl Err: xb %08x Err %08x\n",

View File

@ -1266,6 +1266,9 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
uint64_t tot, data1, data2, data3; uint64_t tot, data1, data2, data3;
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_register reg_data; struct lpfc_register reg_data;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
vports = lpfc_create_vport_work_array(phba); vports = lpfc_create_vport_work_array(phba);
@ -1299,14 +1302,25 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
tot += atomic_read(&tgtp->xmt_fcp_release); tot += atomic_read(&tgtp->xmt_fcp_release);
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
} else { } else {
tot = atomic_read(&phba->fc4NvmeIoCmpls); localport = phba->pport->localport;
data1 = atomic_read( if (!localport || !localport->private)
&phba->fc4NvmeInputRequests); goto skip_eqdelay;
data2 = atomic_read( lport = (struct lpfc_nvme_lport *)
&phba->fc4NvmeOutputRequests); localport->private;
data3 = atomic_read( tot = 0;
&phba->fc4NvmeControlRequests); for (i = 0;
tot = (data1 + data2 + data3) - tot; i < phba->cfg_nvme_io_channel; i++) {
cstat = &lport->cstat[i];
data1 = atomic_read(
&cstat->fc4NvmeInputRequests);
data2 = atomic_read(
&cstat->fc4NvmeOutputRequests);
data3 = atomic_read(
&cstat->fc4NvmeControlRequests);
tot += (data1 + data2 + data3);
tot -= atomic_read(
&cstat->fc4NvmeIoCmpls);
}
} }
} }
@ -6895,12 +6909,6 @@ lpfc_create_shost(struct lpfc_hba *phba)
atomic_set(&phba->fc4ScsiOutputRequests, 0); atomic_set(&phba->fc4ScsiOutputRequests, 0);
atomic_set(&phba->fc4ScsiControlRequests, 0); atomic_set(&phba->fc4ScsiControlRequests, 0);
atomic_set(&phba->fc4ScsiIoCmpls, 0); atomic_set(&phba->fc4ScsiIoCmpls, 0);
atomic_set(&phba->fc4NvmeInputRequests, 0);
atomic_set(&phba->fc4NvmeOutputRequests, 0);
atomic_set(&phba->fc4NvmeControlRequests, 0);
atomic_set(&phba->fc4NvmeIoCmpls, 0);
atomic_set(&phba->fc4NvmeLsRequests, 0);
atomic_set(&phba->fc4NvmeLsCmpls, 0);
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport) if (!vport)
return -ENODEV; return -ENODEV;

View File

@ -357,15 +357,17 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_dmabuf *buf_ptr; struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
atomic_inc(&vport->phba->fc4NvmeLsCmpls); lport = (struct lpfc_nvme_lport *)vport->localport->private;
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
if (status) {
lport = (struct lpfc_nvme_lport *)vport->localport->private; if (lport) {
if (bf_get(lpfc_wcqe_c_xb, wcqe)) atomic_inc(&lport->fc4NvmeLsCmpls);
atomic_inc(&lport->cmpl_ls_xb); if (status) {
atomic_inc(&lport->cmpl_ls_err); if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_ls_xb);
atomic_inc(&lport->cmpl_ls_err);
}
} }
ndlp = (struct lpfc_nodelist *)cmdwqe->context1; ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
@ -570,6 +572,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private; lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
rport = (struct lpfc_nvme_rport *)pnvme_rport->private; rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
if (unlikely(!lport) || unlikely(!rport))
return -EINVAL;
vport = lport->vport; vport = lport->vport;
if (vport->load_flag & FC_UNLOADING) if (vport->load_flag & FC_UNLOADING)
@ -639,7 +644,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma); &pnvme_lsreq->rspdma);
atomic_inc(&vport->phba->fc4NvmeLsRequests); atomic_inc(&lport->fc4NvmeLsRequests);
/* Hardcode the wait to 30 seconds. Connections are failing otherwise. /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
* This code allows it all to work. * This code allows it all to work.
@ -690,6 +695,8 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *wqe, *next_wqe; struct lpfc_iocbq *wqe, *next_wqe;
lport = (struct lpfc_nvme_lport *)pnvme_lport->private; lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
if (unlikely(!lport))
return;
vport = lport->vport; vport = lport->vport;
phba = vport->phba; phba = vport->phba;
@ -949,8 +956,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_fcpreq_priv *freqpriv;
struct lpfc_nvme_lport *lport; struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
unsigned long flags; unsigned long flags;
uint32_t code, status; uint32_t code, status, idx;
uint16_t cid, sqhd, data; uint16_t cid, sqhd, data;
uint32_t *ptr; uint32_t *ptr;
@ -961,16 +969,20 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqe); wcqe);
return; return;
} }
atomic_inc(&phba->fc4NvmeIoCmpls);
nCmd = lpfc_ncmd->nvmeCmd; nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport; rport = lpfc_ncmd->nrport;
status = bf_get(lpfc_wcqe_c_status, wcqe); status = bf_get(lpfc_wcqe_c_status, wcqe);
if (status) {
lport = (struct lpfc_nvme_lport *)vport->localport->private; lport = (struct lpfc_nvme_lport *)vport->localport->private;
if (bf_get(lpfc_wcqe_c_xb, wcqe)) if (lport) {
atomic_inc(&lport->cmpl_fcp_xb); idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
atomic_inc(&lport->cmpl_fcp_err); cstat = &lport->cstat[idx];
atomic_inc(&cstat->fc4NvmeIoCmpls);
if (status) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_fcp_xb);
atomic_inc(&lport->cmpl_fcp_err);
}
} }
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
@ -1163,7 +1175,8 @@ out_err:
static int static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
struct lpfc_nvme_buf *lpfc_ncmd, struct lpfc_nvme_buf *lpfc_ncmd,
struct lpfc_nodelist *pnode) struct lpfc_nodelist *pnode,
struct lpfc_nvme_ctrl_stat *cstat)
{ {
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@ -1201,7 +1214,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
} else { } else {
wqe->fcp_iwrite.initial_xfer_len = 0; wqe->fcp_iwrite.initial_xfer_len = 0;
} }
atomic_inc(&phba->fc4NvmeOutputRequests); atomic_inc(&cstat->fc4NvmeOutputRequests);
} else { } else {
/* From the iread template, initialize words 7 - 11 */ /* From the iread template, initialize words 7 - 11 */
memcpy(&wqe->words[7], memcpy(&wqe->words[7],
@ -1214,13 +1227,13 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 5 */ /* Word 5 */
wqe->fcp_iread.rsrvd5 = 0; wqe->fcp_iread.rsrvd5 = 0;
atomic_inc(&phba->fc4NvmeInputRequests); atomic_inc(&cstat->fc4NvmeInputRequests);
} }
} else { } else {
/* From the icmnd template, initialize words 4 - 11 */ /* From the icmnd template, initialize words 4 - 11 */
memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
sizeof(uint32_t) * 8); sizeof(uint32_t) * 8);
atomic_inc(&phba->fc4NvmeControlRequests); atomic_inc(&cstat->fc4NvmeControlRequests);
} }
/* /*
* Finish initializing those WQE fields that are independent * Finish initializing those WQE fields that are independent
@ -1400,7 +1413,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
{ {
int ret = 0; int ret = 0;
int expedite = 0; int expedite = 0;
int idx;
struct lpfc_nvme_lport *lport; struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
struct lpfc_vport *vport; struct lpfc_vport *vport;
struct lpfc_hba *phba; struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
@ -1543,15 +1558,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp; lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->start_time = jiffies; lpfc_ncmd->start_time = jiffies;
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
if (ret) {
ret = -ENOMEM;
goto out_free_nvme_buf;
}
atomic_inc(&ndlp->cmd_pending);
/* /*
* Issue the IO on the WQ indicated by index in the hw_queue_handle. * Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback * This identfier was create in our hardware queue create callback
@ -1560,7 +1566,18 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* index to use and that they have affinitized a CPU to this hardware * index to use and that they have affinitized a CPU to this hardware
* queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
*/ */
lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index; idx = lpfc_queue_info->index;
lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
cstat = &lport->cstat[idx];
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
if (ret) {
ret = -ENOMEM;
goto out_free_nvme_buf;
}
atomic_inc(&ndlp->cmd_pending);
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.sli4_xritag,
@ -1605,11 +1622,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
out_free_nvme_buf: out_free_nvme_buf:
if (lpfc_ncmd->nvmeCmd->sg_cnt) { if (lpfc_ncmd->nvmeCmd->sg_cnt) {
if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
atomic_dec(&phba->fc4NvmeOutputRequests); atomic_dec(&cstat->fc4NvmeOutputRequests);
else else
atomic_dec(&phba->fc4NvmeInputRequests); atomic_dec(&cstat->fc4NvmeInputRequests);
} else } else
atomic_dec(&phba->fc4NvmeControlRequests); atomic_dec(&cstat->fc4NvmeControlRequests);
lpfc_release_nvme_buf(phba, lpfc_ncmd); lpfc_release_nvme_buf(phba, lpfc_ncmd);
out_fail: out_fail:
return ret; return ret;
@ -2390,7 +2407,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
struct nvme_fc_port_info nfcp_info; struct nvme_fc_port_info nfcp_info;
struct nvme_fc_local_port *localport; struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport; struct lpfc_nvme_lport *lport;
int len; struct lpfc_nvme_ctrl_stat *cstat;
int len, i;
/* Initialize this localport instance. The vport wwn usage ensures /* Initialize this localport instance. The vport wwn usage ensures
* that NPIV is accounted for. * that NPIV is accounted for.
@ -2414,6 +2432,11 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
phba->cfg_nvme_io_channel), GFP_KERNEL);
if (!cstat)
return -ENOMEM;
/* localport is allocated from the stack, but the registration /* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area. * call allocates heap memory as well as the private area.
*/ */
@ -2436,6 +2459,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private; lport = (struct lpfc_nvme_lport *)localport->private;
vport->localport = localport; vport->localport = localport;
lport->vport = vport; lport->vport = vport;
lport->cstat = cstat;
vport->nvmei_support = 1; vport->nvmei_support = 1;
atomic_set(&lport->xmt_fcp_noxri, 0); atomic_set(&lport->xmt_fcp_noxri, 0);
@ -2449,6 +2473,16 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
atomic_set(&lport->cmpl_fcp_err, 0); atomic_set(&lport->cmpl_fcp_err, 0);
atomic_set(&lport->cmpl_ls_xb, 0); atomic_set(&lport->cmpl_ls_xb, 0);
atomic_set(&lport->cmpl_ls_err, 0); atomic_set(&lport->cmpl_ls_err, 0);
atomic_set(&lport->fc4NvmeLsRequests, 0);
atomic_set(&lport->fc4NvmeLsCmpls, 0);
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
cstat = &lport->cstat[i];
atomic_set(&cstat->fc4NvmeInputRequests, 0);
atomic_set(&cstat->fc4NvmeOutputRequests, 0);
atomic_set(&cstat->fc4NvmeControlRequests, 0);
atomic_set(&cstat->fc4NvmeIoCmpls, 0);
}
/* Don't post more new bufs if repost already recovered /* Don't post more new bufs if repost already recovered
* the nvme sgls. * the nvme sgls.
@ -2458,6 +2492,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
phba->sli4_hba.nvme_xri_max); phba->sli4_hba.nvme_xri_max);
vport->phba->total_nvme_bufs += len; vport->phba->total_nvme_bufs += len;
} }
} else {
kfree(cstat);
} }
return ret; return ret;
@ -2520,6 +2556,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
#if (IS_ENABLED(CONFIG_NVME_FC)) #if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport; struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport; struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
int ret; int ret;
if (vport->nvmei_support == 0) if (vport->nvmei_support == 0)
@ -2528,6 +2565,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
localport = vport->localport; localport = vport->localport;
vport->localport = NULL; vport->localport = NULL;
lport = (struct lpfc_nvme_lport *)localport->private; lport = (struct lpfc_nvme_lport *)localport->private;
cstat = lport->cstat;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n", "6011 Destroying NVME localport %p\n",
@ -2543,6 +2581,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
* indefinitely or succeeds * indefinitely or succeeds
*/ */
lpfc_nvme_lport_unreg_wait(vport, lport); lpfc_nvme_lport_unreg_wait(vport, lport);
kfree(cstat);
/* Regardless of the unregister upcall response, clear /* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the * nvmei_support. All rports are unregistered and the

View File

@ -36,11 +36,21 @@ struct lpfc_nvme_qhandle {
uint32_t cpu_id; /* current cpu id at time of create */ uint32_t cpu_id; /* current cpu id at time of create */
}; };
struct lpfc_nvme_ctrl_stat {
atomic_t fc4NvmeInputRequests;
atomic_t fc4NvmeOutputRequests;
atomic_t fc4NvmeControlRequests;
atomic_t fc4NvmeIoCmpls;
};
/* Declare nvme-based local and remote port definitions. */ /* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport { struct lpfc_nvme_lport {
struct lpfc_vport *vport; struct lpfc_vport *vport;
struct completion lport_unreg_done; struct completion lport_unreg_done;
/* Add stats counters here */ /* Add stats counters here */
struct lpfc_nvme_ctrl_stat *cstat;
atomic_t fc4NvmeLsRequests;
atomic_t fc4NvmeLsCmpls;
atomic_t xmt_fcp_noxri; atomic_t xmt_fcp_noxri;
atomic_t xmt_fcp_bad_ndlp; atomic_t xmt_fcp_bad_ndlp;
atomic_t xmt_fcp_qdepth; atomic_t xmt_fcp_qdepth;