xprtrdma: Remove imul instructions from chunk list encoders
Re-arrange the pointer arithmetic in the chunk list encoders to eliminate several more integer multiplication instructions during Transport Header encoding. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
28d9d56f4c
commit
6748b0caf8
|
@ -177,7 +177,7 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
|
|||
/* Use the ib_map_phys_fmr() verb to register a memory region
|
||||
* for remote access via RDMA READ or RDMA WRITE.
|
||||
*/
|
||||
static int
|
||||
static struct rpcrdma_mr_seg *
|
||||
fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||
int nsegs, bool writing, struct rpcrdma_mw **out)
|
||||
{
|
||||
|
@ -188,7 +188,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
|
||||
mw = rpcrdma_get_mw(r_xprt);
|
||||
if (!mw)
|
||||
return -ENOBUFS;
|
||||
return ERR_PTR(-ENOBUFS);
|
||||
|
||||
pageoff = offset_in_page(seg1->mr_offset);
|
||||
seg1->mr_offset -= pageoff; /* start of page */
|
||||
|
@ -232,13 +232,13 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
mw->mw_offset = dma_pages[0] + pageoff;
|
||||
|
||||
*out = mw;
|
||||
return mw->mw_nents;
|
||||
return seg;
|
||||
|
||||
out_dmamap_err:
|
||||
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
|
||||
mw->mw_sg, i);
|
||||
rpcrdma_put_mw(r_xprt, mw);
|
||||
return -EIO;
|
||||
return ERR_PTR(-EIO);
|
||||
|
||||
out_maperr:
|
||||
pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
|
||||
|
@ -247,7 +247,7 @@ out_maperr:
|
|||
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
||||
mw->mw_sg, mw->mw_nents, mw->mw_dir);
|
||||
rpcrdma_put_mw(r_xprt, mw);
|
||||
return -EIO;
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
/* Invalidate all memory regions that were registered for "req".
|
||||
|
|
|
@ -344,7 +344,7 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
|
|||
/* Post a REG_MR Work Request to register a memory region
|
||||
* for remote access via RDMA READ or RDMA WRITE.
|
||||
*/
|
||||
static int
|
||||
static struct rpcrdma_mr_seg *
|
||||
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||
int nsegs, bool writing, struct rpcrdma_mw **out)
|
||||
{
|
||||
|
@ -364,7 +364,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
rpcrdma_defer_mr_recovery(mw);
|
||||
mw = rpcrdma_get_mw(r_xprt);
|
||||
if (!mw)
|
||||
return -ENOBUFS;
|
||||
return ERR_PTR(-ENOBUFS);
|
||||
} while (mw->frmr.fr_state != FRMR_IS_INVALID);
|
||||
frmr = &mw->frmr;
|
||||
frmr->fr_state = FRMR_IS_VALID;
|
||||
|
@ -429,25 +429,25 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
mw->mw_offset = mr->iova;
|
||||
|
||||
*out = mw;
|
||||
return mw->mw_nents;
|
||||
return seg;
|
||||
|
||||
out_dmamap_err:
|
||||
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
|
||||
mw->mw_sg, i);
|
||||
frmr->fr_state = FRMR_IS_INVALID;
|
||||
rpcrdma_put_mw(r_xprt, mw);
|
||||
return -EIO;
|
||||
return ERR_PTR(-EIO);
|
||||
|
||||
out_mapmr_err:
|
||||
pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
|
||||
frmr->fr_mr, n, mw->mw_nents);
|
||||
rpcrdma_defer_mr_recovery(mw);
|
||||
return -EIO;
|
||||
return ERR_PTR(-EIO);
|
||||
|
||||
out_senderr:
|
||||
pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
|
||||
rpcrdma_defer_mr_recovery(mw);
|
||||
return -ENOTCONN;
|
||||
return ERR_PTR(-ENOTCONN);
|
||||
}
|
||||
|
||||
/* Invalidate all memory regions that were registered for "req".
|
||||
|
|
|
@ -349,7 +349,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
struct rpcrdma_mr_seg *seg;
|
||||
struct rpcrdma_mw *mw;
|
||||
unsigned int pos;
|
||||
int n, nsegs;
|
||||
int nsegs;
|
||||
|
||||
pos = rqst->rq_snd_buf.head[0].iov_len;
|
||||
if (rtype == rpcrdma_areadch)
|
||||
|
@ -361,10 +361,10 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
return nsegs;
|
||||
|
||||
do {
|
||||
n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
false, &mw);
|
||||
if (n < 0)
|
||||
return n;
|
||||
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
false, &mw);
|
||||
if (IS_ERR(seg))
|
||||
return PTR_ERR(seg);
|
||||
rpcrdma_push_mw(mw, &req->rl_registered);
|
||||
|
||||
if (encode_read_segment(xdr, mw, pos) < 0)
|
||||
|
@ -373,11 +373,10 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
|
||||
rqst->rq_task->tk_pid, __func__, pos,
|
||||
mw->mw_length, (unsigned long long)mw->mw_offset,
|
||||
mw->mw_handle, n < nsegs ? "more" : "last");
|
||||
mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
|
||||
|
||||
r_xprt->rx_stats.read_chunk_count++;
|
||||
seg += n;
|
||||
nsegs -= n;
|
||||
nsegs -= mw->mw_nents;
|
||||
} while (nsegs);
|
||||
|
||||
return 0;
|
||||
|
@ -405,7 +404,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
struct xdr_stream *xdr = &req->rl_stream;
|
||||
struct rpcrdma_mr_seg *seg;
|
||||
struct rpcrdma_mw *mw;
|
||||
int n, nsegs, nchunks;
|
||||
int nsegs, nchunks;
|
||||
__be32 *segcount;
|
||||
|
||||
seg = req->rl_segments;
|
||||
|
@ -424,10 +423,10 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
|
||||
nchunks = 0;
|
||||
do {
|
||||
n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
true, &mw);
|
||||
if (n < 0)
|
||||
return n;
|
||||
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
true, &mw);
|
||||
if (IS_ERR(seg))
|
||||
return PTR_ERR(seg);
|
||||
rpcrdma_push_mw(mw, &req->rl_registered);
|
||||
|
||||
if (encode_rdma_segment(xdr, mw) < 0)
|
||||
|
@ -436,13 +435,12 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
|
||||
rqst->rq_task->tk_pid, __func__,
|
||||
mw->mw_length, (unsigned long long)mw->mw_offset,
|
||||
mw->mw_handle, n < nsegs ? "more" : "last");
|
||||
mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
|
||||
|
||||
r_xprt->rx_stats.write_chunk_count++;
|
||||
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
|
||||
nchunks++;
|
||||
seg += n;
|
||||
nsegs -= n;
|
||||
nsegs -= mw->mw_nents;
|
||||
} while (nsegs);
|
||||
|
||||
/* Update count of segments in this Write chunk */
|
||||
|
@ -470,7 +468,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
struct xdr_stream *xdr = &req->rl_stream;
|
||||
struct rpcrdma_mr_seg *seg;
|
||||
struct rpcrdma_mw *mw;
|
||||
int n, nsegs, nchunks;
|
||||
int nsegs, nchunks;
|
||||
__be32 *segcount;
|
||||
|
||||
seg = req->rl_segments;
|
||||
|
@ -487,10 +485,10 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
|
||||
nchunks = 0;
|
||||
do {
|
||||
n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
true, &mw);
|
||||
if (n < 0)
|
||||
return n;
|
||||
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
true, &mw);
|
||||
if (IS_ERR(seg))
|
||||
return PTR_ERR(seg);
|
||||
rpcrdma_push_mw(mw, &req->rl_registered);
|
||||
|
||||
if (encode_rdma_segment(xdr, mw) < 0)
|
||||
|
@ -499,13 +497,12 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
|
||||
rqst->rq_task->tk_pid, __func__,
|
||||
mw->mw_length, (unsigned long long)mw->mw_offset,
|
||||
mw->mw_handle, n < nsegs ? "more" : "last");
|
||||
mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
|
||||
|
||||
r_xprt->rx_stats.reply_chunk_count++;
|
||||
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
|
||||
nchunks++;
|
||||
seg += n;
|
||||
nsegs -= n;
|
||||
nsegs -= mw->mw_nents;
|
||||
} while (nsegs);
|
||||
|
||||
/* Update count of segments in the Reply chunk */
|
||||
|
|
|
@ -466,7 +466,8 @@ struct rpcrdma_stats {
|
|||
*/
|
||||
struct rpcrdma_xprt;
|
||||
struct rpcrdma_memreg_ops {
|
||||
int (*ro_map)(struct rpcrdma_xprt *,
|
||||
struct rpcrdma_mr_seg *
|
||||
(*ro_map)(struct rpcrdma_xprt *,
|
||||
struct rpcrdma_mr_seg *, int, bool,
|
||||
struct rpcrdma_mw **);
|
||||
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
|
||||
|
|
Loading…
Reference in New Issue