xprtrdma: Don't defer MR recovery if ro_map fails
Deferred MR recovery does a DMA-unmapping of the MW. However, ro_map invokes rpcrdma_defer_mr_recovery in some error cases where the MW has not even been DMA-mapped yet. Avoid a DMA-unmapping error replacing rpcrdma_defer_mr_recovery. Also note that if ib_dma_map_sg is asked to map 0 nents, it will return 0. So the extra "if (i == 0)" check is no longer needed. Fixes:42fe28f607
("xprtrdma: Do not leak an MW during a DMA ...") Fixes:505bbe64dd
("xprtrdma: Refactor MR recovery work queues") Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
8d75483a23
commit
1f541895da
|
@ -213,13 +213,11 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
|
||||
break;
|
||||
}
|
||||
mw->mw_nents = i;
|
||||
mw->mw_dir = rpcrdma_data_dir(writing);
|
||||
if (i == 0)
|
||||
goto out_dmamap_err;
|
||||
|
||||
if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
|
||||
mw->mw_sg, mw->mw_nents, mw->mw_dir))
|
||||
mw->mw_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
|
||||
mw->mw_sg, i, mw->mw_dir);
|
||||
if (!mw->mw_nents)
|
||||
goto out_dmamap_err;
|
||||
|
||||
for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
|
||||
|
@ -237,16 +235,18 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
return mw->mw_nents;
|
||||
|
||||
out_dmamap_err:
|
||||
pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
|
||||
mw->mw_sg, mw->mw_nents);
|
||||
rpcrdma_defer_mr_recovery(mw);
|
||||
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
|
||||
mw->mw_sg, i);
|
||||
rpcrdma_put_mw(r_xprt, mw);
|
||||
return -EIO;
|
||||
|
||||
out_maperr:
|
||||
pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
|
||||
len, (unsigned long long)dma_pages[0],
|
||||
pageoff, mw->mw_nents, rc);
|
||||
rpcrdma_defer_mr_recovery(mw);
|
||||
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
||||
mw->mw_sg, mw->mw_nents, mw->mw_dir);
|
||||
rpcrdma_put_mw(r_xprt, mw);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -355,7 +355,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
struct ib_mr *mr;
|
||||
struct ib_reg_wr *reg_wr;
|
||||
struct ib_send_wr *bad_wr;
|
||||
int rc, i, n, dma_nents;
|
||||
int rc, i, n;
|
||||
u8 key;
|
||||
|
||||
mw = NULL;
|
||||
|
@ -391,14 +391,10 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
|
||||
break;
|
||||
}
|
||||
mw->mw_nents = i;
|
||||
mw->mw_dir = rpcrdma_data_dir(writing);
|
||||
if (i == 0)
|
||||
goto out_dmamap_err;
|
||||
|
||||
dma_nents = ib_dma_map_sg(ia->ri_device,
|
||||
mw->mw_sg, mw->mw_nents, mw->mw_dir);
|
||||
if (!dma_nents)
|
||||
mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir);
|
||||
if (!mw->mw_nents)
|
||||
goto out_dmamap_err;
|
||||
|
||||
n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
|
||||
|
@ -436,13 +432,14 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
return mw->mw_nents;
|
||||
|
||||
out_dmamap_err:
|
||||
pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
|
||||
mw->mw_sg, mw->mw_nents);
|
||||
rpcrdma_defer_mr_recovery(mw);
|
||||
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
|
||||
mw->mw_sg, i);
|
||||
frmr->fr_state = FRMR_IS_INVALID;
|
||||
rpcrdma_put_mw(r_xprt, mw);
|
||||
return -EIO;
|
||||
|
||||
out_mapmr_err:
|
||||
pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
|
||||
pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
|
||||
frmr->fr_mr, n, mw->mw_nents);
|
||||
rpcrdma_defer_mr_recovery(mw);
|
||||
return -EIO;
|
||||
|
|
Loading…
Reference in New Issue