2010-05-24 12:44:54 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rdma/ib_smi.h>
|
2016-01-23 04:44:53 +08:00
|
|
|
#include <rdma/ib_verbs.h>
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
#include "qib.h"
|
|
|
|
#include "qib_mad.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_ud_loopback - handle send on loopback QPs
|
|
|
|
* @sqp: the sending QP
|
|
|
|
* @swqe: the send work request
|
|
|
|
*
|
|
|
|
* This is called from qib_make_ud_req() to forward a WQE addressed
|
|
|
|
* to the same HCA.
|
|
|
|
* Note that the receive interrupt handler may be calling qib_ud_rcv()
|
|
|
|
* while this is being called.
|
|
|
|
*/
|
2016-01-23 04:45:59 +08:00
|
|
|
static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
|
|
|
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
|
2016-02-04 06:20:19 +08:00
|
|
|
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
|
|
|
|
struct qib_devdata *dd = ppd->dd;
|
|
|
|
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_qp *qp;
|
2017-04-30 02:41:18 +08:00
|
|
|
struct rdma_ah_attr *ah_attr;
|
2010-05-24 12:44:54 +08:00
|
|
|
unsigned long flags;
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_sge_state ssge;
|
|
|
|
struct rvt_sge *sge;
|
2010-05-24 12:44:54 +08:00
|
|
|
struct ib_wc wc;
|
|
|
|
u32 length;
|
2013-12-19 00:41:37 +08:00
|
|
|
enum ib_qp_type sqptype, dqptype;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-02-04 06:20:19 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn);
|
2010-05-24 12:44:54 +08:00
|
|
|
if (!qp) {
|
2016-01-23 04:56:02 +08:00
|
|
|
ibp->rvp.n_pkt_drops++;
|
2017-05-30 08:22:01 +08:00
|
|
|
goto drop;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
2013-12-19 00:41:37 +08:00
|
|
|
|
|
|
|
sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
|
|
|
|
IB_QPT_UD : sqp->ibqp.qp_type;
|
|
|
|
dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
|
|
|
|
IB_QPT_UD : qp->ibqp.qp_type;
|
|
|
|
|
|
|
|
if (dqptype != sqptype ||
|
2016-01-23 05:07:42 +08:00
|
|
|
!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
|
2016-01-23 04:56:02 +08:00
|
|
|
ibp->rvp.n_pkt_drops++;
|
2010-05-24 12:44:54 +08:00
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
|
2016-01-23 04:46:07 +08:00
|
|
|
ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
|
2010-05-24 12:44:54 +08:00
|
|
|
ppd = ppd_from_ibp(ibp);
|
|
|
|
|
|
|
|
if (qp->ibqp.qp_num > 1) {
|
|
|
|
u16 pkey1;
|
|
|
|
u16 pkey2;
|
|
|
|
u16 lid;
|
|
|
|
|
|
|
|
pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
|
|
|
|
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
|
|
|
|
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
|
2017-04-30 02:41:28 +08:00
|
|
|
lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
|
2010-05-24 12:44:54 +08:00
|
|
|
((1 << ppd->lmc) - 1));
|
2017-05-30 08:22:01 +08:00
|
|
|
qib_bad_pkey(ibp, pkey1,
|
|
|
|
rdma_ah_get_sl(ah_attr),
|
|
|
|
sqp->ibqp.qp_num, qp->ibqp.qp_num,
|
|
|
|
cpu_to_be16(lid),
|
|
|
|
cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
|
2010-05-24 12:44:54 +08:00
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the qkey matches (except for QP0, see 9.6.1.4.1).
|
|
|
|
* Qkeys with the high order bit set mean use the
|
|
|
|
* qkey from the QP context instead of the WR (see 10.2.5).
|
|
|
|
*/
|
|
|
|
if (qp->ibqp.qp_num) {
|
|
|
|
u32 qkey;
|
|
|
|
|
2015-10-08 16:16:33 +08:00
|
|
|
qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
|
|
|
|
sqp->qkey : swqe->ud_wr.remote_qkey;
|
2017-05-30 08:22:01 +08:00
|
|
|
if (unlikely(qkey != qp->qkey))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-03-31 09:57:33 +08:00
|
|
|
* A GRH is expected to precede the data even if not
|
2010-05-24 12:44:54 +08:00
|
|
|
* present on the wire.
|
|
|
|
*/
|
|
|
|
length = swqe->length;
|
2015-01-16 23:50:32 +08:00
|
|
|
memset(&wc, 0, sizeof(wc));
|
2010-05-24 12:44:54 +08:00
|
|
|
wc.byte_len = length + sizeof(struct ib_grh);
|
|
|
|
|
|
|
|
if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
|
|
|
|
wc.wc_flags = IB_WC_WITH_IMM;
|
|
|
|
wc.ex.imm_data = swqe->wr.ex.imm_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&qp->r_lock, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the next work request entry to find where to put the data.
|
|
|
|
*/
|
2016-01-23 04:56:46 +08:00
|
|
|
if (qp->r_flags & RVT_R_REUSE_SGE)
|
|
|
|
qp->r_flags &= ~RVT_R_REUSE_SGE;
|
2010-05-24 12:44:54 +08:00
|
|
|
else {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = qib_get_rwqe(qp, 0);
|
|
|
|
if (ret < 0) {
|
2017-02-08 21:27:01 +08:00
|
|
|
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail_unlock;
|
|
|
|
}
|
|
|
|
if (!ret) {
|
|
|
|
if (qp->ibqp.qp_num == 0)
|
2016-01-23 04:56:02 +08:00
|
|
|
ibp->rvp.n_vl15_dropped++;
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail_unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Silently drop packets which are too big. */
|
|
|
|
if (unlikely(wc.byte_len > qp->r_len)) {
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->r_flags |= RVT_R_REUSE_SGE;
|
2016-01-23 04:56:02 +08:00
|
|
|
ibp->rvp.n_pkt_drops++;
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail_unlock;
|
|
|
|
}
|
|
|
|
|
2017-04-30 02:41:28 +08:00
|
|
|
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
|
2016-07-26 04:40:40 +08:00
|
|
|
struct ib_grh grh;
|
2017-04-30 02:41:28 +08:00
|
|
|
const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
|
2016-07-26 04:40:40 +08:00
|
|
|
|
2017-04-30 02:41:28 +08:00
|
|
|
qib_make_grh(ibp, &grh, grd, 0, 0);
|
2016-07-26 04:40:40 +08:00
|
|
|
qib_copy_sge(&qp->r_sge, &grh,
|
|
|
|
sizeof(grh), 1);
|
2010-05-24 12:44:54 +08:00
|
|
|
wc.wc_flags |= IB_WC_GRH;
|
|
|
|
} else
|
2017-02-08 21:27:43 +08:00
|
|
|
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
2010-05-24 12:44:54 +08:00
|
|
|
ssge.sg_list = swqe->sg_list + 1;
|
|
|
|
ssge.sge = *swqe->sg_list;
|
|
|
|
ssge.num_sge = swqe->wr.num_sge;
|
|
|
|
sge = &ssge.sge;
|
|
|
|
while (length) {
|
|
|
|
u32 len = sge->length;
|
|
|
|
|
|
|
|
if (len > length)
|
|
|
|
len = length;
|
|
|
|
if (len > sge->sge_length)
|
|
|
|
len = sge->sge_length;
|
|
|
|
BUG_ON(len == 0);
|
|
|
|
qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
|
|
|
|
sge->vaddr += len;
|
|
|
|
sge->length -= len;
|
|
|
|
sge->sge_length -= len;
|
|
|
|
if (sge->sge_length == 0) {
|
|
|
|
if (--ssge.num_sge)
|
|
|
|
*sge = *ssge.sg_list++;
|
|
|
|
} else if (sge->length == 0 && sge->mr->lkey) {
|
2016-01-23 04:45:59 +08:00
|
|
|
if (++sge->n >= RVT_SEGSZ) {
|
2010-05-24 12:44:54 +08:00
|
|
|
if (++sge->m >= sge->mr->mapsz)
|
|
|
|
break;
|
|
|
|
sge->n = 0;
|
|
|
|
}
|
|
|
|
sge->vaddr =
|
|
|
|
sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
|
|
|
sge->length =
|
|
|
|
sge->mr->map[sge->m]->segs[sge->n].length;
|
|
|
|
}
|
|
|
|
length -= len;
|
|
|
|
}
|
2016-02-04 06:20:27 +08:00
|
|
|
rvt_put_ss(&qp->r_sge);
|
2016-01-23 04:56:46 +08:00
|
|
|
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail_unlock;
|
|
|
|
wc.wr_id = qp->r_wr_id;
|
|
|
|
wc.status = IB_WC_SUCCESS;
|
|
|
|
wc.opcode = IB_WC_RECV;
|
|
|
|
wc.qp = &qp->ibqp;
|
|
|
|
wc.src_qp = sqp->ibqp.qp_num;
|
|
|
|
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
|
2015-10-08 16:16:33 +08:00
|
|
|
swqe->ud_wr.pkey_index : 0;
|
2017-04-30 02:41:28 +08:00
|
|
|
wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
|
|
|
|
((1 << ppd->lmc) - 1));
|
|
|
|
wc.sl = rdma_ah_get_sl(ah_attr);
|
|
|
|
wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
|
2010-05-24 12:44:54 +08:00
|
|
|
wc.port_num = qp->port_num;
|
|
|
|
/* Signal completion event if the solicited bit is set. */
|
2016-01-23 05:07:36 +08:00
|
|
|
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
|
2010-05-24 12:44:54 +08:00
|
|
|
swqe->wr.send_flags & IB_SEND_SOLICITED);
|
2016-01-23 04:56:02 +08:00
|
|
|
ibp->rvp.n_loop_pkts++;
|
2010-05-24 12:44:54 +08:00
|
|
|
bail_unlock:
|
|
|
|
spin_unlock_irqrestore(&qp->r_lock, flags);
|
|
|
|
drop:
|
2016-02-04 06:20:19 +08:00
|
|
|
rcu_read_unlock();
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_make_ud_req - construct a UD request packet
|
|
|
|
* @qp: the QP
|
|
|
|
*
|
2016-02-15 04:10:04 +08:00
|
|
|
* Assumes the s_lock is held.
|
|
|
|
*
|
2010-05-24 12:44:54 +08:00
|
|
|
* Return 1 if constructed; otherwise, return 0.
|
|
|
|
*/
|
2016-04-13 01:46:10 +08:00
|
|
|
int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:45:11 +08:00
|
|
|
struct qib_qp_priv *priv = qp->priv;
|
2016-09-06 19:35:05 +08:00
|
|
|
struct ib_other_headers *ohdr;
|
2017-04-30 02:41:18 +08:00
|
|
|
struct rdma_ah_attr *ah_attr;
|
2010-05-24 12:44:54 +08:00
|
|
|
struct qib_pportdata *ppd;
|
|
|
|
struct qib_ibport *ibp;
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_swqe *wqe;
|
2010-05-24 12:44:54 +08:00
|
|
|
u32 nwords;
|
|
|
|
u32 extra_bytes;
|
|
|
|
u32 bth0;
|
|
|
|
u16 lrh0;
|
|
|
|
u16 lid;
|
|
|
|
int ret = 0;
|
|
|
|
int next_cur;
|
|
|
|
|
2016-01-23 05:07:42 +08:00
|
|
|
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
|
|
|
|
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail;
|
|
|
|
/* We are in the error state, flush the work request. */
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
if (qp->s_last == READ_ONCE(qp->s_head))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail;
|
|
|
|
/* If DMAs are in progress, we can't flush immediately. */
|
2016-01-23 04:45:11 +08:00
|
|
|
if (atomic_read(&priv->s_dma_busy)) {
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->s_flags |= RVT_S_WAIT_DMA;
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail;
|
|
|
|
}
|
2016-01-23 05:07:42 +08:00
|
|
|
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
|
2010-05-24 12:44:54 +08:00
|
|
|
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2016-02-15 04:10:04 +08:00
|
|
|
/* see post_one_send() */
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
if (qp->s_cur == READ_ONCE(qp->s_head))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail;
|
|
|
|
|
2016-01-23 05:07:42 +08:00
|
|
|
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
|
2010-05-24 12:44:54 +08:00
|
|
|
next_cur = qp->s_cur + 1;
|
|
|
|
if (next_cur >= qp->s_size)
|
|
|
|
next_cur = 0;
|
|
|
|
|
|
|
|
/* Construct the header. */
|
|
|
|
ibp = to_iport(qp->ibqp.device, qp->port_num);
|
|
|
|
ppd = ppd_from_ibp(ibp);
|
2016-01-23 04:46:07 +08:00
|
|
|
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
|
2017-04-30 02:41:28 +08:00
|
|
|
if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
|
|
|
|
if (rdma_ah_get_dlid(ah_attr) !=
|
|
|
|
be16_to_cpu(IB_LID_PERMISSIVE))
|
2014-03-07 21:40:55 +08:00
|
|
|
this_cpu_inc(ibp->pmastats->n_multicast_xmit);
|
2010-05-24 12:44:54 +08:00
|
|
|
else
|
2014-03-07 21:40:55 +08:00
|
|
|
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
|
2010-05-24 12:44:54 +08:00
|
|
|
} else {
|
2014-03-07 21:40:55 +08:00
|
|
|
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
|
2017-04-30 02:41:28 +08:00
|
|
|
lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
|
2010-05-24 12:44:54 +08:00
|
|
|
if (unlikely(lid == ppd->lid)) {
|
2016-04-13 01:46:10 +08:00
|
|
|
unsigned long tflags = *flags;
|
2010-05-24 12:44:54 +08:00
|
|
|
/*
|
|
|
|
* If DMAs are in progress, we can't generate
|
|
|
|
* a completion for the loopback packet since
|
|
|
|
* it would be out of order.
|
|
|
|
* XXX Instead of waiting, we could queue a
|
|
|
|
* zero length descriptor so we get a callback.
|
|
|
|
*/
|
2016-01-23 04:45:11 +08:00
|
|
|
if (atomic_read(&priv->s_dma_busy)) {
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->s_flags |= RVT_S_WAIT_DMA;
|
2010-05-24 12:44:54 +08:00
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
qp->s_cur = next_cur;
|
2016-04-13 01:46:10 +08:00
|
|
|
spin_unlock_irqrestore(&qp->s_lock, tflags);
|
2010-05-24 12:44:54 +08:00
|
|
|
qib_ud_loopback(qp, wqe);
|
2016-04-13 01:46:10 +08:00
|
|
|
spin_lock_irqsave(&qp->s_lock, tflags);
|
|
|
|
*flags = tflags;
|
2010-05-24 12:44:54 +08:00
|
|
|
qib_send_complete(qp, wqe, IB_WC_SUCCESS);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qp->s_cur = next_cur;
|
|
|
|
extra_bytes = -wqe->length & 3;
|
|
|
|
nwords = (wqe->length + extra_bytes) >> 2;
|
|
|
|
|
|
|
|
/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
|
|
|
|
qp->s_hdrwords = 7;
|
|
|
|
qp->s_cur_size = wqe->length;
|
|
|
|
qp->s_cur_sge = &qp->s_sge;
|
2017-04-30 02:41:28 +08:00
|
|
|
qp->s_srate = rdma_ah_get_static_rate(ah_attr);
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->s_wqe = wqe;
|
|
|
|
qp->s_sge.sge = wqe->sg_list[0];
|
|
|
|
qp->s_sge.sg_list = wqe->sg_list + 1;
|
|
|
|
qp->s_sge.num_sge = wqe->wr.num_sge;
|
|
|
|
qp->s_sge.total_len = wqe->length;
|
|
|
|
|
2017-04-30 02:41:28 +08:00
|
|
|
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
|
2010-05-24 12:44:54 +08:00
|
|
|
/* Header size in 32-bit words. */
|
2016-01-23 04:45:11 +08:00
|
|
|
qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
|
2017-04-30 02:41:28 +08:00
|
|
|
rdma_ah_read_grh(ah_attr),
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->s_hdrwords, nwords);
|
|
|
|
lrh0 = QIB_LRH_GRH;
|
2016-01-23 04:45:11 +08:00
|
|
|
ohdr = &priv->s_hdr->u.l.oth;
|
2010-05-24 12:44:54 +08:00
|
|
|
/*
|
|
|
|
* Don't worry about sending to locally attached multicast
|
|
|
|
* QPs. It is unspecified by the spec. what happens.
|
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
/* Header size in 32-bit words. */
|
|
|
|
lrh0 = QIB_LRH_BTH;
|
2016-01-23 04:45:11 +08:00
|
|
|
ohdr = &priv->s_hdr->u.oth;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
|
|
|
|
qp->s_hdrwords++;
|
|
|
|
ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
|
|
|
|
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
|
|
|
|
} else
|
|
|
|
bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
|
2017-04-30 02:41:28 +08:00
|
|
|
lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
|
2010-05-24 12:44:54 +08:00
|
|
|
if (qp->ibqp.qp_type == IB_QPT_SMI)
|
|
|
|
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
|
|
|
|
else
|
2017-04-30 02:41:28 +08:00
|
|
|
lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
|
2016-01-23 04:45:11 +08:00
|
|
|
priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
|
2017-04-30 02:41:28 +08:00
|
|
|
priv->s_hdr->lrh[1] =
|
|
|
|
cpu_to_be16(rdma_ah_get_dlid(ah_attr)); /* DEST LID */
|
2016-01-23 04:45:11 +08:00
|
|
|
priv->s_hdr->lrh[2] =
|
|
|
|
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
|
2010-05-24 12:44:54 +08:00
|
|
|
lid = ppd->lid;
|
|
|
|
if (lid) {
|
2017-04-30 02:41:28 +08:00
|
|
|
lid |= rdma_ah_get_path_bits(ah_attr) &
|
|
|
|
((1 << ppd->lmc) - 1);
|
2016-01-23 04:45:11 +08:00
|
|
|
priv->s_hdr->lrh[3] = cpu_to_be16(lid);
|
2010-05-24 12:44:54 +08:00
|
|
|
} else
|
2016-01-23 04:45:11 +08:00
|
|
|
priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
|
2010-05-24 12:44:54 +08:00
|
|
|
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
|
|
|
bth0 |= IB_BTH_SOLICITED;
|
|
|
|
bth0 |= extra_bytes << 20;
|
|
|
|
bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
|
|
|
|
qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
|
2015-10-08 16:16:33 +08:00
|
|
|
wqe->ud_wr.pkey_index : qp->s_pkey_index);
|
2010-05-24 12:44:54 +08:00
|
|
|
ohdr->bth[0] = cpu_to_be32(bth0);
|
|
|
|
/*
|
|
|
|
* Use the multicast QP if the destination LID is a multicast LID.
|
|
|
|
*/
|
2017-04-30 02:41:28 +08:00
|
|
|
ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
|
|
|
|
be16_to_cpu(IB_MULTICAST_LID_BASE) &&
|
|
|
|
rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
|
2010-05-24 12:44:54 +08:00
|
|
|
cpu_to_be32(QIB_MULTICAST_QPN) :
|
2015-10-08 16:16:33 +08:00
|
|
|
cpu_to_be32(wqe->ud_wr.remote_qpn);
|
2016-02-15 04:10:04 +08:00
|
|
|
ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
|
2010-05-24 12:44:54 +08:00
|
|
|
/*
|
|
|
|
* Qkeys with the high order bit set mean use the
|
|
|
|
* qkey from the QP context instead of the WR (see 10.2.5).
|
|
|
|
*/
|
2015-10-08 16:16:33 +08:00
|
|
|
ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
|
|
|
|
qp->qkey : wqe->ud_wr.remote_qkey);
|
2010-05-24 12:44:54 +08:00
|
|
|
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
|
|
|
|
|
|
|
|
done:
|
2016-02-15 04:10:04 +08:00
|
|
|
return 1;
|
2010-05-24 12:44:54 +08:00
|
|
|
bail:
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->s_flags &= ~RVT_S_BUSY;
|
2010-05-24 12:44:54 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
|
|
|
|
{
|
|
|
|
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
|
|
|
|
struct qib_devdata *dd = ppd->dd;
|
|
|
|
unsigned ctxt = ppd->hw_pidx;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
pkey &= 0x7fff; /* remove limited/full membership bit */
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
|
|
|
|
if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
|
|
|
|
return i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Should not get here, this means hardware failed to validate pkeys.
|
|
|
|
* Punt and return index 0.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_ud_rcv - receive an incoming UD packet
|
|
|
|
* @ibp: the port the packet came in on
|
|
|
|
* @hdr: the packet header
|
|
|
|
* @has_grh: true if the packet has a GRH
|
|
|
|
* @data: the packet data
|
|
|
|
* @tlen: the packet length
|
|
|
|
* @qp: the QP the packet came on
|
|
|
|
*
|
|
|
|
* This is called from qib_qp_rcv() to process an incoming UD packet
|
|
|
|
* for the given QP.
|
|
|
|
* Called at interrupt level.
|
|
|
|
*/
|
2016-09-06 19:35:05 +08:00
|
|
|
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
2016-01-23 04:45:59 +08:00
|
|
|
int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-09-06 19:35:05 +08:00
|
|
|
struct ib_other_headers *ohdr;
|
2010-05-24 12:44:54 +08:00
|
|
|
int opcode;
|
|
|
|
u32 hdrsize;
|
|
|
|
u32 pad;
|
|
|
|
struct ib_wc wc;
|
|
|
|
u32 qkey;
|
|
|
|
u32 src_qp;
|
|
|
|
u16 dlid;
|
|
|
|
|
|
|
|
/* Check for GRH */
|
|
|
|
if (!has_grh) {
|
|
|
|
ohdr = &hdr->u.oth;
|
|
|
|
hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
|
|
|
|
} else {
|
|
|
|
ohdr = &hdr->u.l.oth;
|
|
|
|
hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
|
|
|
|
}
|
|
|
|
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
|
2016-02-04 06:20:27 +08:00
|
|
|
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2011-01-11 09:42:21 +08:00
|
|
|
/*
|
|
|
|
* Get the number of bytes the message was padded by
|
|
|
|
* and drop incomplete packets.
|
|
|
|
*/
|
2010-05-24 12:44:54 +08:00
|
|
|
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
|
2011-01-11 09:42:21 +08:00
|
|
|
if (unlikely(tlen < (hdrsize + pad + 4)))
|
|
|
|
goto drop;
|
|
|
|
|
2010-05-24 12:44:54 +08:00
|
|
|
tlen -= hdrsize + pad + 4;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the permissive LID is only used on QP0
|
|
|
|
* and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
|
|
|
|
*/
|
|
|
|
if (qp->ibqp.qp_num) {
|
|
|
|
if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
|
2011-01-11 09:42:21 +08:00
|
|
|
hdr->lrh[3] == IB_LID_PERMISSIVE))
|
|
|
|
goto drop;
|
2010-05-24 12:44:54 +08:00
|
|
|
if (qp->ibqp.qp_num > 1) {
|
|
|
|
u16 pkey1, pkey2;
|
|
|
|
|
|
|
|
pkey1 = be32_to_cpu(ohdr->bth[0]);
|
|
|
|
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
|
|
|
|
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
|
2017-05-30 08:22:01 +08:00
|
|
|
qib_bad_pkey(ibp,
|
|
|
|
pkey1,
|
|
|
|
(be16_to_cpu(hdr->lrh[0]) >> 4) &
|
2010-05-24 12:44:54 +08:00
|
|
|
0xF,
|
2017-05-30 08:22:01 +08:00
|
|
|
src_qp, qp->ibqp.qp_num,
|
|
|
|
hdr->lrh[3], hdr->lrh[1]);
|
2011-01-11 09:42:21 +08:00
|
|
|
return;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
}
|
2017-05-30 08:22:01 +08:00
|
|
|
if (unlikely(qkey != qp->qkey))
|
2011-01-11 09:42:21 +08:00
|
|
|
return;
|
2017-05-30 08:22:01 +08:00
|
|
|
|
2010-05-24 12:44:54 +08:00
|
|
|
/* Drop invalid MAD packets (see 13.5.3.1). */
|
|
|
|
if (unlikely(qp->ibqp.qp_num == 1 &&
|
|
|
|
(tlen != 256 ||
|
2011-01-11 09:42:21 +08:00
|
|
|
(be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
|
|
|
|
goto drop;
|
2010-05-24 12:44:54 +08:00
|
|
|
} else {
|
|
|
|
struct ib_smp *smp;
|
|
|
|
|
|
|
|
/* Drop invalid MAD packets (see 13.5.3.1). */
|
2011-01-11 09:42:21 +08:00
|
|
|
if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
|
|
|
|
goto drop;
|
2010-05-24 12:44:54 +08:00
|
|
|
smp = (struct ib_smp *) data;
|
|
|
|
if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
|
|
|
|
hdr->lrh[3] == IB_LID_PERMISSIVE) &&
|
2011-01-11 09:42:21 +08:00
|
|
|
smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
|
|
|
|
goto drop;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The opcode is in the low byte when its in network order
|
|
|
|
* (top byte when in host order).
|
|
|
|
*/
|
|
|
|
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
|
|
|
|
if (qp->ibqp.qp_num > 1 &&
|
|
|
|
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
|
|
|
|
wc.ex.imm_data = ohdr->u.ud.imm_data;
|
|
|
|
wc.wc_flags = IB_WC_WITH_IMM;
|
2011-01-11 09:42:20 +08:00
|
|
|
tlen -= sizeof(u32);
|
2010-05-24 12:44:54 +08:00
|
|
|
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
|
|
|
|
wc.ex.imm_data = 0;
|
|
|
|
wc.wc_flags = 0;
|
2011-01-11 09:42:21 +08:00
|
|
|
} else
|
|
|
|
goto drop;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
/*
|
2011-03-31 09:57:33 +08:00
|
|
|
* A GRH is expected to precede the data even if not
|
2010-05-24 12:44:54 +08:00
|
|
|
* present on the wire.
|
|
|
|
*/
|
|
|
|
wc.byte_len = tlen + sizeof(struct ib_grh);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the next work request entry to find where to put the data.
|
|
|
|
*/
|
2016-01-23 04:56:46 +08:00
|
|
|
if (qp->r_flags & RVT_R_REUSE_SGE)
|
|
|
|
qp->r_flags &= ~RVT_R_REUSE_SGE;
|
2010-05-24 12:44:54 +08:00
|
|
|
else {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = qib_get_rwqe(qp, 0);
|
|
|
|
if (ret < 0) {
|
2017-02-08 21:27:01 +08:00
|
|
|
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
2010-08-03 06:39:30 +08:00
|
|
|
return;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
if (!ret) {
|
|
|
|
if (qp->ibqp.qp_num == 0)
|
2016-01-23 04:56:02 +08:00
|
|
|
ibp->rvp.n_vl15_dropped++;
|
2010-08-03 06:39:30 +08:00
|
|
|
return;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Silently drop packets which are too big. */
|
|
|
|
if (unlikely(wc.byte_len > qp->r_len)) {
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->r_flags |= RVT_R_REUSE_SGE;
|
2011-01-11 09:42:21 +08:00
|
|
|
goto drop;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
if (has_grh) {
|
|
|
|
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
|
|
|
|
sizeof(struct ib_grh), 1);
|
|
|
|
wc.wc_flags |= IB_WC_GRH;
|
|
|
|
} else
|
2017-02-08 21:27:43 +08:00
|
|
|
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
2010-05-24 12:44:54 +08:00
|
|
|
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
|
2016-02-04 06:20:27 +08:00
|
|
|
rvt_put_ss(&qp->r_sge);
|
2016-01-23 04:56:46 +08:00
|
|
|
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
|
2010-08-03 06:39:30 +08:00
|
|
|
return;
|
2010-05-24 12:44:54 +08:00
|
|
|
wc.wr_id = qp->r_wr_id;
|
|
|
|
wc.status = IB_WC_SUCCESS;
|
|
|
|
wc.opcode = IB_WC_RECV;
|
|
|
|
wc.vendor_err = 0;
|
|
|
|
wc.qp = &qp->ibqp;
|
|
|
|
wc.src_qp = src_qp;
|
|
|
|
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
|
|
|
|
qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
|
|
|
|
wc.slid = be16_to_cpu(hdr->lrh[3]);
|
|
|
|
wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
|
|
|
|
dlid = be16_to_cpu(hdr->lrh[1]);
|
|
|
|
/*
|
|
|
|
* Save the LMC lower bits if the destination LID is a unicast LID.
|
|
|
|
*/
|
2016-01-23 04:44:53 +08:00
|
|
|
wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
|
2010-05-24 12:44:54 +08:00
|
|
|
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
|
|
|
|
wc.port_num = qp->port_num;
|
|
|
|
/* Signal completion event if the solicited bit is set. */
|
2016-01-23 05:07:36 +08:00
|
|
|
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
|
2018-02-02 02:46:15 +08:00
|
|
|
ib_bth_is_solicited(ohdr));
|
2011-01-11 09:42:21 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
drop:
|
2016-01-23 04:56:02 +08:00
|
|
|
ibp->rvp.n_pkt_drops++;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|