svcrdma: Poll CQs in "workqueue" mode
svcrdma calls svc_xprt_put() in its completion handlers, which currently run in IRQ context. However, svc_xprt_put() is meant to be invoked in process context, not in IRQ context. After the last transport reference is gone, it directly calls a transport release function that expects to run in process context. Change the CQ polling modes to IB_POLL_WORKQUEUE so that svcrdma invokes svc_xprt_put() only in process context. As an added benefit, bottom half-disabled spin locking can be eliminated from I/O paths. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
parent
a3ab867fa6
commit
81fa3275f9
|
@ -606,12 +606,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
||||||
|
|
||||||
dprintk("svcrdma: rqstp=%p\n", rqstp);
|
dprintk("svcrdma: rqstp=%p\n", rqstp);
|
||||||
|
|
||||||
spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
|
spin_lock(&rdma_xprt->sc_rq_dto_lock);
|
||||||
if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
|
if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
|
||||||
ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q,
|
ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q,
|
||||||
struct svc_rdma_op_ctxt, list);
|
struct svc_rdma_op_ctxt, list);
|
||||||
list_del(&ctxt->list);
|
list_del(&ctxt->list);
|
||||||
spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
|
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
||||||
rdma_read_complete(rqstp, ctxt);
|
rdma_read_complete(rqstp, ctxt);
|
||||||
goto complete;
|
goto complete;
|
||||||
} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
|
} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
|
||||||
|
@ -623,7 +623,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
||||||
clear_bit(XPT_DATA, &xprt->xpt_flags);
|
clear_bit(XPT_DATA, &xprt->xpt_flags);
|
||||||
ctxt = NULL;
|
ctxt = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
|
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
||||||
if (!ctxt) {
|
if (!ctxt) {
|
||||||
/* This is the EAGAIN path. The svc_recv routine will
|
/* This is the EAGAIN path. The svc_recv routine will
|
||||||
* return -EAGAIN, the nfsd thread will go to call into
|
* return -EAGAIN, the nfsd thread will go to call into
|
||||||
|
|
|
@ -188,7 +188,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
|
||||||
{
|
{
|
||||||
struct svc_rdma_op_ctxt *ctxt = NULL;
|
struct svc_rdma_op_ctxt *ctxt = NULL;
|
||||||
|
|
||||||
spin_lock_bh(&xprt->sc_ctxt_lock);
|
spin_lock(&xprt->sc_ctxt_lock);
|
||||||
xprt->sc_ctxt_used++;
|
xprt->sc_ctxt_used++;
|
||||||
if (list_empty(&xprt->sc_ctxts))
|
if (list_empty(&xprt->sc_ctxts))
|
||||||
goto out_empty;
|
goto out_empty;
|
||||||
|
@ -196,7 +196,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
|
||||||
ctxt = list_first_entry(&xprt->sc_ctxts,
|
ctxt = list_first_entry(&xprt->sc_ctxts,
|
||||||
struct svc_rdma_op_ctxt, list);
|
struct svc_rdma_op_ctxt, list);
|
||||||
list_del(&ctxt->list);
|
list_del(&ctxt->list);
|
||||||
spin_unlock_bh(&xprt->sc_ctxt_lock);
|
spin_unlock(&xprt->sc_ctxt_lock);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ctxt->count = 0;
|
ctxt->count = 0;
|
||||||
|
@ -208,15 +208,15 @@ out_empty:
|
||||||
/* Either pre-allocation missed the mark, or send
|
/* Either pre-allocation missed the mark, or send
|
||||||
* queue accounting is broken.
|
* queue accounting is broken.
|
||||||
*/
|
*/
|
||||||
spin_unlock_bh(&xprt->sc_ctxt_lock);
|
spin_unlock(&xprt->sc_ctxt_lock);
|
||||||
|
|
||||||
ctxt = alloc_ctxt(xprt, GFP_NOIO);
|
ctxt = alloc_ctxt(xprt, GFP_NOIO);
|
||||||
if (ctxt)
|
if (ctxt)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
spin_lock_bh(&xprt->sc_ctxt_lock);
|
spin_lock(&xprt->sc_ctxt_lock);
|
||||||
xprt->sc_ctxt_used--;
|
xprt->sc_ctxt_used--;
|
||||||
spin_unlock_bh(&xprt->sc_ctxt_lock);
|
spin_unlock(&xprt->sc_ctxt_lock);
|
||||||
WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n");
|
WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -253,10 +253,10 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
|
||||||
for (i = 0; i < ctxt->count; i++)
|
for (i = 0; i < ctxt->count; i++)
|
||||||
put_page(ctxt->pages[i]);
|
put_page(ctxt->pages[i]);
|
||||||
|
|
||||||
spin_lock_bh(&xprt->sc_ctxt_lock);
|
spin_lock(&xprt->sc_ctxt_lock);
|
||||||
xprt->sc_ctxt_used--;
|
xprt->sc_ctxt_used--;
|
||||||
list_add(&ctxt->list, &xprt->sc_ctxts);
|
list_add(&ctxt->list, &xprt->sc_ctxts);
|
||||||
spin_unlock_bh(&xprt->sc_ctxt_lock);
|
spin_unlock(&xprt->sc_ctxt_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
|
static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
|
||||||
|
@ -921,14 +921,14 @@ struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
|
||||||
{
|
{
|
||||||
struct svc_rdma_fastreg_mr *frmr = NULL;
|
struct svc_rdma_fastreg_mr *frmr = NULL;
|
||||||
|
|
||||||
spin_lock_bh(&rdma->sc_frmr_q_lock);
|
spin_lock(&rdma->sc_frmr_q_lock);
|
||||||
if (!list_empty(&rdma->sc_frmr_q)) {
|
if (!list_empty(&rdma->sc_frmr_q)) {
|
||||||
frmr = list_entry(rdma->sc_frmr_q.next,
|
frmr = list_entry(rdma->sc_frmr_q.next,
|
||||||
struct svc_rdma_fastreg_mr, frmr_list);
|
struct svc_rdma_fastreg_mr, frmr_list);
|
||||||
list_del_init(&frmr->frmr_list);
|
list_del_init(&frmr->frmr_list);
|
||||||
frmr->sg_nents = 0;
|
frmr->sg_nents = 0;
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&rdma->sc_frmr_q_lock);
|
spin_unlock(&rdma->sc_frmr_q_lock);
|
||||||
if (frmr)
|
if (frmr)
|
||||||
return frmr;
|
return frmr;
|
||||||
|
|
||||||
|
@ -941,10 +941,10 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
|
||||||
if (frmr) {
|
if (frmr) {
|
||||||
ib_dma_unmap_sg(rdma->sc_cm_id->device,
|
ib_dma_unmap_sg(rdma->sc_cm_id->device,
|
||||||
frmr->sg, frmr->sg_nents, frmr->direction);
|
frmr->sg, frmr->sg_nents, frmr->direction);
|
||||||
spin_lock_bh(&rdma->sc_frmr_q_lock);
|
spin_lock(&rdma->sc_frmr_q_lock);
|
||||||
WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
|
WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
|
||||||
list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
|
list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
|
||||||
spin_unlock_bh(&rdma->sc_frmr_q_lock);
|
spin_unlock(&rdma->sc_frmr_q_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1026,13 +1026,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
|
||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
|
newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
|
||||||
0, IB_POLL_SOFTIRQ);
|
0, IB_POLL_WORKQUEUE);
|
||||||
if (IS_ERR(newxprt->sc_sq_cq)) {
|
if (IS_ERR(newxprt->sc_sq_cq)) {
|
||||||
dprintk("svcrdma: error creating SQ CQ for connect request\n");
|
dprintk("svcrdma: error creating SQ CQ for connect request\n");
|
||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth,
|
newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth,
|
||||||
0, IB_POLL_SOFTIRQ);
|
0, IB_POLL_WORKQUEUE);
|
||||||
if (IS_ERR(newxprt->sc_rq_cq)) {
|
if (IS_ERR(newxprt->sc_rq_cq)) {
|
||||||
dprintk("svcrdma: error creating RQ CQ for connect request\n");
|
dprintk("svcrdma: error creating RQ CQ for connect request\n");
|
||||||
goto errout;
|
goto errout;
|
||||||
|
|
Loading…
Reference in New Issue