IB/hfi1, IB/rdmavt: Move r_adefered to r_lock cache line

This field is causing excessive cache line bouncing.

There are spare bytes in the r_lock cache line so the best approach
is to make an rvt QP field and remove from the hfi1 priv field.

Signed-off-by: Sebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Mike Marciniszyn 2017-05-04 05:14:04 -07:00 committed by Doug Ledford
parent 02d1008bcf
commit 688f21c0be
4 changed files with 6 additions and 13 deletions

View File

@ -731,9 +731,7 @@ void quiesce_qp(struct rvt_qp *qp)
void notify_qp_reset(struct rvt_qp *qp) void notify_qp_reset(struct rvt_qp *qp)
{ {
struct hfi1_qp_priv *priv = qp->priv; qp->r_adefered = 0;
priv->r_adefered = 0;
clear_ahg(qp); clear_ahg(qp);
} }

View File

@ -727,10 +727,9 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
struct ib_header hdr; struct ib_header hdr;
struct ib_other_headers *ohdr; struct ib_other_headers *ohdr;
unsigned long flags; unsigned long flags;
struct hfi1_qp_priv *priv = qp->priv;
/* clear the defer count */ /* clear the defer count */
priv->r_adefered = 0; qp->r_adefered = 0;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
if (qp->s_flags & RVT_S_RESP_PENDING) if (qp->s_flags & RVT_S_RESP_PENDING)
@ -1604,9 +1603,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
static inline void rc_cancel_ack(struct rvt_qp *qp) static inline void rc_cancel_ack(struct rvt_qp *qp)
{ {
struct hfi1_qp_priv *priv = qp->priv; qp->r_adefered = 0;
priv->r_adefered = 0;
if (list_empty(&qp->rspwait)) if (list_empty(&qp->rspwait))
return; return;
list_del_init(&qp->rspwait); list_del_init(&qp->rspwait);
@ -2314,13 +2311,11 @@ send_last:
qp->r_nak_state = 0; qp->r_nak_state = 0;
/* Send an ACK if requested or required. */ /* Send an ACK if requested or required. */
if (psn & IB_BTH_REQ_ACK) { if (psn & IB_BTH_REQ_ACK) {
struct hfi1_qp_priv *priv = qp->priv;
if (packet->numpkt == 0) { if (packet->numpkt == 0) {
rc_cancel_ack(qp); rc_cancel_ack(qp);
goto send_ack; goto send_ack;
} }
if (priv->r_adefered >= HFI1_PSN_CREDIT) { if (qp->r_adefered >= HFI1_PSN_CREDIT) {
rc_cancel_ack(qp); rc_cancel_ack(qp);
goto send_ack; goto send_ack;
} }
@ -2328,7 +2323,7 @@ send_last:
rc_cancel_ack(qp); rc_cancel_ack(qp);
goto send_ack; goto send_ack;
} }
priv->r_adefered++; qp->r_adefered++;
rc_defered_ack(rcd, qp); rc_defered_ack(rcd, qp);
} }
return; return;

View File

@ -125,7 +125,6 @@ struct hfi1_qp_priv {
struct sdma_engine *s_sde; /* current sde */ struct sdma_engine *s_sde; /* current sde */
struct send_context *s_sendcontext; /* current sendcontext */ struct send_context *s_sendcontext; /* current sendcontext */
u8 s_sc; /* SC[0..4] for next packet */ u8 s_sc; /* SC[0..4] for next packet */
u8 r_adefered; /* number of acks defered */
struct iowait s_iowait; struct iowait s_iowait;
struct rvt_qp *owner; struct rvt_qp *owner;
}; };

View File

@ -324,6 +324,7 @@ struct rvt_qp {
u8 r_state; /* opcode of last packet received */ u8 r_state; /* opcode of last packet received */
u8 r_flags; u8 r_flags;
u8 r_head_ack_queue; /* index into s_ack_queue[] */ u8 r_head_ack_queue; /* index into s_ack_queue[] */
u8 r_adefered; /* defered ack count */
struct list_head rspwait; /* link for waiting to respond */ struct list_head rspwait; /* link for waiting to respond */