IB: new common API for draining queues
Add provider-specific drain_sq/drain_rq functions for providers needing special drain logic. Add static functions __ib_drain_sq() and __ib_drain_rq() which post noop WRs to the SQ or RQ and block until their completions are processed. This ensures the applications completions for work requests posted prior to the drain work request have all been processed. Add API functions ib_drain_sq(), ib_drain_rq(), and ib_drain_qp(). For the drain logic to work, the caller must: ensure there is room in the CQ(s) and QP for the drain work request and completion. allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be IB_POLL_DIRECT. ensure that there are no other contexts that are posting WRs concurrently. Otherwise the drain is not guaranteed. Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
fc77dbd34c
commit
765d67748b
|
@ -1657,3 +1657,167 @@ next_page:
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_sg_to_pages);
|
EXPORT_SYMBOL(ib_sg_to_pages);
|
||||||
|
|
||||||
|
struct ib_drain_cqe {
|
||||||
|
struct ib_cqe cqe;
|
||||||
|
struct completion done;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
|
{
|
||||||
|
struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
|
||||||
|
cqe);
|
||||||
|
|
||||||
|
complete(&cqe->done);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Post a WR and block until its completion is reaped for the SQ.
|
||||||
|
*/
|
||||||
|
static void __ib_drain_sq(struct ib_qp *qp)
|
||||||
|
{
|
||||||
|
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||||
|
struct ib_drain_cqe sdrain;
|
||||||
|
struct ib_send_wr swr = {}, *bad_swr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
|
||||||
|
WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
|
||||||
|
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
swr.wr_cqe = &sdrain.cqe;
|
||||||
|
sdrain.cqe.done = ib_drain_qp_done;
|
||||||
|
init_completion(&sdrain.done);
|
||||||
|
|
||||||
|
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
|
||||||
|
if (ret) {
|
||||||
|
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = ib_post_send(qp, &swr, &bad_swr);
|
||||||
|
if (ret) {
|
||||||
|
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_completion(&sdrain.done);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Post a WR and block until its completion is reaped for the RQ.
|
||||||
|
*/
|
||||||
|
static void __ib_drain_rq(struct ib_qp *qp)
|
||||||
|
{
|
||||||
|
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||||
|
struct ib_drain_cqe rdrain;
|
||||||
|
struct ib_recv_wr rwr = {}, *bad_rwr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
|
||||||
|
WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
|
||||||
|
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
rwr.wr_cqe = &rdrain.cqe;
|
||||||
|
rdrain.cqe.done = ib_drain_qp_done;
|
||||||
|
init_completion(&rdrain.done);
|
||||||
|
|
||||||
|
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
|
||||||
|
if (ret) {
|
||||||
|
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = ib_post_recv(qp, &rwr, &bad_rwr);
|
||||||
|
if (ret) {
|
||||||
|
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_completion(&rdrain.done);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ib_drain_sq() - Block until all SQ CQEs have been consumed by the
|
||||||
|
* application.
|
||||||
|
* @qp: queue pair to drain
|
||||||
|
*
|
||||||
|
* If the device has a provider-specific drain function, then
|
||||||
|
* call that. Otherwise call the generic drain function
|
||||||
|
* __ib_drain_sq().
|
||||||
|
*
|
||||||
|
* The caller must:
|
||||||
|
*
|
||||||
|
* ensure there is room in the CQ and SQ for the drain work request and
|
||||||
|
* completion.
|
||||||
|
*
|
||||||
|
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
|
||||||
|
* IB_POLL_DIRECT.
|
||||||
|
*
|
||||||
|
* ensure that there are no other contexts that are posting WRs concurrently.
|
||||||
|
* Otherwise the drain is not guaranteed.
|
||||||
|
*/
|
||||||
|
void ib_drain_sq(struct ib_qp *qp)
|
||||||
|
{
|
||||||
|
if (qp->device->drain_sq)
|
||||||
|
qp->device->drain_sq(qp);
|
||||||
|
else
|
||||||
|
__ib_drain_sq(qp);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ib_drain_sq);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ib_drain_rq() - Block until all RQ CQEs have been consumed by the
|
||||||
|
* application.
|
||||||
|
* @qp: queue pair to drain
|
||||||
|
*
|
||||||
|
* If the device has a provider-specific drain function, then
|
||||||
|
* call that. Otherwise call the generic drain function
|
||||||
|
* __ib_drain_rq().
|
||||||
|
*
|
||||||
|
* The caller must:
|
||||||
|
*
|
||||||
|
* ensure there is room in the CQ and RQ for the drain work request and
|
||||||
|
* completion.
|
||||||
|
*
|
||||||
|
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
|
||||||
|
* IB_POLL_DIRECT.
|
||||||
|
*
|
||||||
|
* ensure that there are no other contexts that are posting WRs concurrently.
|
||||||
|
* Otherwise the drain is not guaranteed.
|
||||||
|
*/
|
||||||
|
void ib_drain_rq(struct ib_qp *qp)
|
||||||
|
{
|
||||||
|
if (qp->device->drain_rq)
|
||||||
|
qp->device->drain_rq(qp);
|
||||||
|
else
|
||||||
|
__ib_drain_rq(qp);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ib_drain_rq);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ib_drain_qp() - Block until all CQEs have been consumed by the
|
||||||
|
* application on both the RQ and SQ.
|
||||||
|
* @qp: queue pair to drain
|
||||||
|
*
|
||||||
|
* The caller must:
|
||||||
|
*
|
||||||
|
* ensure there is room in the CQ(s), SQ, and RQ for drain work requests
|
||||||
|
* and completions.
|
||||||
|
*
|
||||||
|
* allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
|
||||||
|
* IB_POLL_DIRECT.
|
||||||
|
*
|
||||||
|
* ensure that there are no other contexts that are posting WRs concurrently.
|
||||||
|
* Otherwise the drain is not guaranteed.
|
||||||
|
*/
|
||||||
|
void ib_drain_qp(struct ib_qp *qp)
|
||||||
|
{
|
||||||
|
ib_drain_sq(qp);
|
||||||
|
ib_drain_rq(qp);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ib_drain_qp);
|
||||||
|
|
|
@ -1846,6 +1846,8 @@ struct ib_device {
|
||||||
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
|
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
|
||||||
struct ib_mr_status *mr_status);
|
struct ib_mr_status *mr_status);
|
||||||
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
|
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
|
||||||
|
void (*drain_rq)(struct ib_qp *qp);
|
||||||
|
void (*drain_sq)(struct ib_qp *qp);
|
||||||
|
|
||||||
struct ib_dma_mapping_ops *dma_ops;
|
struct ib_dma_mapping_ops *dma_ops;
|
||||||
|
|
||||||
|
@ -3094,4 +3096,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
|
||||||
int sg_nents,
|
int sg_nents,
|
||||||
int (*set_page)(struct ib_mr *, u64));
|
int (*set_page)(struct ib_mr *, u64));
|
||||||
|
|
||||||
|
void ib_drain_rq(struct ib_qp *qp);
|
||||||
|
void ib_drain_sq(struct ib_qp *qp);
|
||||||
|
void ib_drain_qp(struct ib_qp *qp);
|
||||||
#endif /* IB_VERBS_H */
|
#endif /* IB_VERBS_H */
|
||||||
|
|
Loading…
Reference in New Issue