SUNRPC: Revert 579900670a ("svcrdma: Remove unused sc_pages field")

Pre-requisite for releasing pages in the send completion handler.
Reverted by hand: patch -R would not apply cleanly.

Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Chuck Lever 2023-06-12 10:10:07 -04:00
parent 6be7afcd92
commit a944209c11
2 changed files with 27 additions and 1 deletions

View File

@ -158,8 +158,9 @@ struct svc_rdma_send_ctxt {
struct xdr_buf sc_hdrbuf; struct xdr_buf sc_hdrbuf;
struct xdr_stream sc_stream; struct xdr_stream sc_stream;
void *sc_xprt_buf; void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no; int sc_cur_sge_no;
struct page *sc_pages[RPCSVC_MAXPAGES];
struct ib_sge sc_sges[]; struct ib_sge sc_sges[];
}; };

View File

@ -213,6 +213,7 @@ out:
ctxt->sc_send_wr.num_sge = 0; ctxt->sc_send_wr.num_sge = 0;
ctxt->sc_cur_sge_no = 0; ctxt->sc_cur_sge_no = 0;
ctxt->sc_page_count = 0;
return ctxt; return ctxt;
out_empty: out_empty:
@ -227,6 +228,8 @@ out_empty:
* svc_rdma_send_ctxt_put - Return send_ctxt to free list * svc_rdma_send_ctxt_put - Return send_ctxt to free list
* @rdma: controlling svcxprt_rdma * @rdma: controlling svcxprt_rdma
* @ctxt: object to return to the free list * @ctxt: object to return to the free list
*
* Pages left in sc_pages are DMA unmapped and released.
*/ */
void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt) struct svc_rdma_send_ctxt *ctxt)
@ -234,6 +237,9 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct ib_device *device = rdma->sc_cm_id->device; struct ib_device *device = rdma->sc_cm_id->device;
unsigned int i; unsigned int i;
for (i = 0; i < ctxt->sc_page_count; ++i)
put_page(ctxt->sc_pages[i]);
/* The first SGE contains the transport header, which /* The first SGE contains the transport header, which
* remains mapped until @ctxt is destroyed. * remains mapped until @ctxt is destroyed.
*/ */
@ -798,6 +804,25 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
svc_rdma_xb_dma_map, &args); svc_rdma_xb_dma_map, &args);
} }
/* The svc_rqst and all resources it owns are released as soon as
* svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
* so they are released by the Send completion handler.
*/
static inline void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
struct svc_rdma_send_ctxt *ctxt)
{
int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
ctxt->sc_page_count += pages;
for (i = 0; i < pages; i++) {
ctxt->sc_pages[i] = rqstp->rq_respages[i];
rqstp->rq_respages[i] = NULL;
}
/* Prevent svc_xprt_release from releasing pages in rq_pages */
rqstp->rq_next_page = rqstp->rq_respages;
}
/* Prepare the portion of the RPC Reply that will be transmitted /* Prepare the portion of the RPC Reply that will be transmitted
* via RDMA Send. The RPC-over-RDMA transport header is prepared * via RDMA Send. The RPC-over-RDMA transport header is prepared
* in sc_sges[0], and the RPC xdr_buf is prepared in following sges. * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.