IB/hfi1, IB/rdmavt: Allow for extending of QP's s_ack_queue
The OPFN protocol uses the COMPARE_SWAP request to exchange data between the requester and the responder and therefore needs to be stored in the QP's s_ack_queue when the request is received on the responder side. However, because the user does not know anything about the OPFN protocol, this extra entry in the queue cannot be advertised to the user. This patch adds an extra entry in a QP's s_ack_queue. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com> Signed-off-by: Kaike Wan <kaike.wan@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
f01b4d5a43
commit
ddf922c31f
|
@ -122,7 +122,8 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
|
|||
* response has been sent instead of only being
|
||||
* constructed.
|
||||
*/
|
||||
if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
|
||||
if (++qp->s_tail_ack_queue >
|
||||
rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
|
||||
qp->s_tail_ack_queue = 0;
|
||||
/* FALLTHROUGH */
|
||||
case OP(SEND_ONLY):
|
||||
|
@ -1818,7 +1819,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
|
|||
if (i)
|
||||
prev = i - 1;
|
||||
else
|
||||
prev = HFI1_MAX_RDMA_ATOMIC;
|
||||
prev = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
|
||||
if (prev == qp->r_head_ack_queue) {
|
||||
e = NULL;
|
||||
break;
|
||||
|
@ -1942,7 +1943,7 @@ static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
|
|||
unsigned next;
|
||||
|
||||
next = n + 1;
|
||||
if (next > HFI1_MAX_RDMA_ATOMIC)
|
||||
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
|
||||
next = 0;
|
||||
qp->s_tail_ack_queue = next;
|
||||
qp->s_ack_state = OP(ACKNOWLEDGE);
|
||||
|
@ -2298,8 +2299,8 @@ send_last:
|
|||
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
|
||||
goto nack_inv;
|
||||
next = qp->r_head_ack_queue + 1;
|
||||
/* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
|
||||
if (next > HFI1_MAX_RDMA_ATOMIC)
|
||||
/* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
|
||||
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
|
||||
next = 0;
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (unlikely(next == qp->s_tail_ack_queue)) {
|
||||
|
@ -2373,7 +2374,7 @@ send_last:
|
|||
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
|
||||
goto nack_inv;
|
||||
next = qp->r_head_ack_queue + 1;
|
||||
if (next > HFI1_MAX_RDMA_ATOMIC)
|
||||
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
|
||||
next = 0;
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (unlikely(next == qp->s_tail_ack_queue)) {
|
||||
|
|
|
@ -1735,6 +1735,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
|
|||
dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
|
||||
dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
|
||||
dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
|
||||
dd->verbs_dev.rdi.dparms.extra_rdma_atomic = 1;
|
||||
|
||||
/* post send table */
|
||||
dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
|
||||
|
|
|
@ -182,6 +182,7 @@ struct rvt_driver_params {
|
|||
u32 max_mad_size;
|
||||
u8 qos_shift;
|
||||
u8 max_rdma_atomic;
|
||||
u8 extra_rdma_atomic;
|
||||
u8 reserved_operations;
|
||||
};
|
||||
|
||||
|
@ -519,7 +520,14 @@ static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
|
|||
*/
|
||||
static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
|
||||
{
|
||||
return rdi->dparms.max_rdma_atomic + 1;
|
||||
return rdi->dparms.max_rdma_atomic +
|
||||
rdi->dparms.extra_rdma_atomic + 1;
|
||||
}
|
||||
|
||||
static inline unsigned int rvt_size_atomic(struct rvt_dev_info *rdi)
|
||||
{
|
||||
return rdi->dparms.max_rdma_atomic +
|
||||
rdi->dparms.extra_rdma_atomic;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue