2010-05-24 12:44:54 +08:00
|
|
|
/*
|
2013-06-16 05:06:58 +08:00
|
|
|
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
|
2012-07-17 01:11:06 +08:00
|
|
|
* Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
|
2010-05-24 12:44:54 +08:00
|
|
|
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2016-01-23 04:45:02 +08:00
|
|
|
#include <rdma/rdma_vt.h>
|
2013-06-16 05:07:14 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#endif
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
#include "qib.h"
|
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
/*
|
|
|
|
* mask field which was present in now deleted qib_qpn_table
|
|
|
|
* is not present in rvt_qpn_table. Defining the same field
|
|
|
|
* as qpt_mask here instead of adding the mask field to
|
|
|
|
* rvt_qpn_table.
|
|
|
|
*/
|
2016-01-23 04:56:52 +08:00
|
|
|
u16 qpt_mask;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
|
|
|
|
struct rvt_qpn_map *map, unsigned off)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:56:27 +08:00
|
|
|
return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
|
|
|
|
struct rvt_qpn_map *map, unsigned off,
|
2011-01-11 09:42:21 +08:00
|
|
|
unsigned n)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:56:27 +08:00
|
|
|
if (qpt_mask) {
|
2010-05-24 12:44:54 +08:00
|
|
|
off++;
|
2016-01-23 04:56:27 +08:00
|
|
|
if (((off & qpt_mask) >> 1) >= n)
|
|
|
|
off = (off | qpt_mask) + 2;
|
|
|
|
} else {
|
|
|
|
off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
|
|
|
|
}
|
2010-05-24 12:44:54 +08:00
|
|
|
return off;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert the AETH credit code into the number of credits.
|
|
|
|
*/
|
|
|
|
static u32 credit_table[31] = {
|
|
|
|
0, /* 0 */
|
|
|
|
1, /* 1 */
|
|
|
|
2, /* 2 */
|
|
|
|
3, /* 3 */
|
|
|
|
4, /* 4 */
|
|
|
|
6, /* 5 */
|
|
|
|
8, /* 6 */
|
|
|
|
12, /* 7 */
|
|
|
|
16, /* 8 */
|
|
|
|
24, /* 9 */
|
|
|
|
32, /* A */
|
|
|
|
48, /* B */
|
|
|
|
64, /* C */
|
|
|
|
96, /* D */
|
|
|
|
128, /* E */
|
|
|
|
192, /* F */
|
|
|
|
256, /* 10 */
|
|
|
|
384, /* 11 */
|
|
|
|
512, /* 12 */
|
|
|
|
768, /* 13 */
|
|
|
|
1024, /* 14 */
|
|
|
|
1536, /* 15 */
|
|
|
|
2048, /* 16 */
|
|
|
|
3072, /* 17 */
|
|
|
|
4096, /* 18 */
|
|
|
|
6144, /* 19 */
|
|
|
|
8192, /* 1A */
|
|
|
|
12288, /* 1B */
|
|
|
|
16384, /* 1C */
|
|
|
|
24576, /* 1D */
|
|
|
|
32768 /* 1E */
|
|
|
|
};
|
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
|
2016-01-12 01:57:25 +08:00
|
|
|
gfp_t gfp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-12 01:57:25 +08:00
|
|
|
unsigned long page = get_zeroed_page(gfp);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the page if someone raced with us installing it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_lock(&qpt->lock);
|
|
|
|
if (map->page)
|
|
|
|
free_page(page);
|
|
|
|
else
|
|
|
|
map->page = (void *)page;
|
|
|
|
spin_unlock(&qpt->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate the next available QPN or
|
|
|
|
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
|
|
|
|
*/
|
2016-01-23 04:56:52 +08:00
|
|
|
int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
|
|
|
|
enum ib_qp_type type, u8 port, gfp_t gfp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
|
|
|
u32 i, offset, max_scan, qpn;
|
2016-01-23 04:56:27 +08:00
|
|
|
struct rvt_qpn_map *map;
|
2010-05-24 12:44:54 +08:00
|
|
|
u32 ret;
|
2016-01-23 04:56:52 +08:00
|
|
|
struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
|
|
|
|
struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
|
|
|
|
verbs_dev);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
|
|
|
|
unsigned n;
|
|
|
|
|
|
|
|
ret = type == IB_QPT_GSI;
|
|
|
|
n = 1 << (ret + 2 * (port - 1));
|
|
|
|
spin_lock(&qpt->lock);
|
|
|
|
if (qpt->flags & n)
|
|
|
|
ret = -EINVAL;
|
|
|
|
else
|
|
|
|
qpt->flags |= n;
|
|
|
|
spin_unlock(&qpt->lock);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2011-01-11 09:42:22 +08:00
|
|
|
qpn = qpt->last + 2;
|
2016-01-23 04:56:27 +08:00
|
|
|
if (qpn >= RVT_QPN_MAX)
|
2010-05-24 12:44:54 +08:00
|
|
|
qpn = 2;
|
2016-01-23 04:56:27 +08:00
|
|
|
if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
|
|
|
|
qpn = (qpn | qpt_mask) + 2;
|
|
|
|
offset = qpn & RVT_BITS_PER_PAGE_MASK;
|
|
|
|
map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
|
2010-05-24 12:44:54 +08:00
|
|
|
max_scan = qpt->nmaps - !offset;
|
|
|
|
for (i = 0;;) {
|
|
|
|
if (unlikely(!map->page)) {
|
2016-01-12 01:57:25 +08:00
|
|
|
get_map_page(qpt, map, gfp);
|
2010-05-24 12:44:54 +08:00
|
|
|
if (unlikely(!map->page))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
do {
|
|
|
|
if (!test_and_set_bit(offset, map->page)) {
|
|
|
|
qpt->last = qpn;
|
|
|
|
ret = qpn;
|
|
|
|
goto bail;
|
|
|
|
}
|
2011-01-11 09:42:21 +08:00
|
|
|
offset = find_next_offset(qpt, map, offset,
|
|
|
|
dd->n_krcv_queues);
|
2010-05-24 12:44:54 +08:00
|
|
|
qpn = mk_qpn(qpt, map, offset);
|
|
|
|
/*
|
|
|
|
* This test differs from alloc_pidmap().
|
|
|
|
* If find_next_offset() does find a zero
|
|
|
|
* bit, we don't need to check for QPN
|
|
|
|
* wrapping around past our starting QPN.
|
|
|
|
* We just need to be sure we don't loop
|
|
|
|
* forever.
|
|
|
|
*/
|
2016-01-23 04:56:27 +08:00
|
|
|
} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
|
2010-05-24 12:44:54 +08:00
|
|
|
/*
|
|
|
|
* In order to keep the number of pages allocated to a
|
|
|
|
* minimum, we scan the all existing pages before increasing
|
|
|
|
* the size of the bitmap table.
|
|
|
|
*/
|
|
|
|
if (++i > max_scan) {
|
2016-01-23 04:56:27 +08:00
|
|
|
if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
|
2010-05-24 12:44:54 +08:00
|
|
|
break;
|
|
|
|
map = &qpt->map[qpt->nmaps++];
|
2011-01-11 09:42:21 +08:00
|
|
|
offset = 0;
|
2010-05-24 12:44:54 +08:00
|
|
|
} else if (map < &qpt->map[qpt->nmaps]) {
|
|
|
|
++map;
|
2011-01-11 09:42:21 +08:00
|
|
|
offset = 0;
|
2010-05-24 12:44:54 +08:00
|
|
|
} else {
|
|
|
|
map = &qpt->map[0];
|
2011-01-11 09:42:21 +08:00
|
|
|
offset = 2;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
qpn = mk_qpn(qpt, map, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
|
|
|
|
bail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:56:27 +08:00
|
|
|
struct rvt_qpn_map *map;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
map = qpt->map + qpn / RVT_BITS_PER_PAGE;
|
2010-05-24 12:44:54 +08:00
|
|
|
if (map->page)
|
2016-01-23 04:56:27 +08:00
|
|
|
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
2011-09-24 01:16:44 +08:00
|
|
|
static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
|
|
|
|
{
|
2016-02-04 06:20:19 +08:00
|
|
|
return hash_32(qpn, dev->rdi.qp_dev->qp_table_bits);
|
2011-09-24 01:16:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-05-24 12:44:54 +08:00
|
|
|
/*
|
|
|
|
* Put the QP into the hash table.
|
|
|
|
* The hash table holds a reference to the QP.
|
|
|
|
*/
|
2016-01-23 04:45:59 +08:00
|
|
|
static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
|
|
|
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
|
|
|
unsigned long flags;
|
2011-09-24 01:16:44 +08:00
|
|
|
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2011-09-24 01:16:44 +08:00
|
|
|
atomic_inc(&qp->refcount);
|
2016-01-23 04:56:27 +08:00
|
|
|
spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
if (qp->ibqp.qp_num == 0)
|
2016-01-23 04:56:02 +08:00
|
|
|
rcu_assign_pointer(ibp->rvp.qp[0], qp);
|
2010-05-24 12:44:54 +08:00
|
|
|
else if (qp->ibqp.qp_num == 1)
|
2016-01-23 04:56:02 +08:00
|
|
|
rcu_assign_pointer(ibp->rvp.qp[1], qp);
|
2010-05-24 12:44:54 +08:00
|
|
|
else {
|
2016-01-23 04:56:27 +08:00
|
|
|
qp->next = dev->rdi.qp_dev->qp_table[n];
|
|
|
|
rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the QP from the table so it can't be found asynchronously by
|
|
|
|
* the receive interrupt routine.
|
|
|
|
*/
|
2016-01-23 04:45:59 +08:00
|
|
|
static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
|
|
|
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
2011-09-24 01:16:44 +08:00
|
|
|
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
|
2010-05-24 12:44:54 +08:00
|
|
|
unsigned long flags;
|
2013-06-16 05:06:58 +08:00
|
|
|
int removed = 1;
|
2016-01-23 04:56:27 +08:00
|
|
|
spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
|
2016-01-23 04:56:02 +08:00
|
|
|
if (rcu_dereference_protected(ibp->rvp.qp[0],
|
2016-01-23 04:56:27 +08:00
|
|
|
lockdep_is_held(qpt_lock_ptr)) == qp) {
|
2016-01-23 04:56:02 +08:00
|
|
|
RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
|
|
|
|
} else if (rcu_dereference_protected(ibp->rvp.qp[1],
|
2016-01-23 04:56:27 +08:00
|
|
|
lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
|
2016-01-23 04:56:02 +08:00
|
|
|
RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
|
2011-09-24 01:16:44 +08:00
|
|
|
} else {
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_qp *q;
|
|
|
|
struct rvt_qp __rcu **qpp;
|
2011-09-24 01:16:44 +08:00
|
|
|
|
2013-06-16 05:06:58 +08:00
|
|
|
removed = 0;
|
2016-01-23 04:56:27 +08:00
|
|
|
qpp = &dev->rdi.qp_dev->qp_table[n];
|
2013-01-25 02:59:34 +08:00
|
|
|
for (; (q = rcu_dereference_protected(*qpp,
|
2016-01-23 04:56:27 +08:00
|
|
|
lockdep_is_held(qpt_lock_ptr))) != NULL;
|
2013-01-25 02:59:34 +08:00
|
|
|
qpp = &q->next)
|
2010-05-24 12:44:54 +08:00
|
|
|
if (q == qp) {
|
2015-01-16 23:19:53 +08:00
|
|
|
RCU_INIT_POINTER(*qpp,
|
2013-02-08 04:47:51 +08:00
|
|
|
rcu_dereference_protected(qp->next,
|
2016-01-23 04:56:27 +08:00
|
|
|
lockdep_is_held(qpt_lock_ptr)));
|
2013-06-16 05:06:58 +08:00
|
|
|
removed = 1;
|
2010-05-24 12:44:54 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-09-24 01:16:44 +08:00
|
|
|
}
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
|
2013-06-16 05:06:58 +08:00
|
|
|
if (removed) {
|
|
|
|
synchronize_rcu();
|
2016-02-04 06:20:19 +08:00
|
|
|
if (atomic_dec_and_test(&qp->refcount))
|
|
|
|
wake_up(&qp->wait);
|
2013-06-16 05:06:58 +08:00
|
|
|
}
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_free_all_qps - check for QPs still in use
|
|
|
|
*/
|
2016-01-23 04:56:52 +08:00
|
|
|
unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:56:52 +08:00
|
|
|
struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
|
|
|
|
struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
|
|
|
|
verbs_dev);
|
2010-05-24 12:44:54 +08:00
|
|
|
unsigned n, qp_inuse = 0;
|
|
|
|
|
|
|
|
for (n = 0; n < dd->num_pports; n++) {
|
|
|
|
struct qib_ibport *ibp = &dd->pport[n].ibport_data;
|
|
|
|
|
2011-09-24 01:16:44 +08:00
|
|
|
rcu_read_lock();
|
2016-01-23 04:56:02 +08:00
|
|
|
if (rcu_dereference(ibp->rvp.qp[0]))
|
2010-05-24 12:44:54 +08:00
|
|
|
qp_inuse++;
|
2016-01-23 04:56:02 +08:00
|
|
|
if (rcu_dereference(ibp->rvp.qp[1]))
|
2010-05-24 12:44:54 +08:00
|
|
|
qp_inuse++;
|
2011-09-24 01:16:44 +08:00
|
|
|
rcu_read_unlock();
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
return qp_inuse;
|
|
|
|
}
|
|
|
|
|
2016-01-23 04:56:52 +08:00
|
|
|
void notify_qp_reset(struct rvt_qp *qp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:45:11 +08:00
|
|
|
struct qib_qp_priv *priv = qp->priv;
|
2016-01-23 04:56:52 +08:00
|
|
|
|
2016-01-23 04:45:11 +08:00
|
|
|
atomic_set(&priv->s_dma_busy, 0);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
2016-01-23 04:45:59 +08:00
|
|
|
static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
|
|
|
unsigned n;
|
|
|
|
|
2016-01-23 04:56:46 +08:00
|
|
|
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
|
2012-06-28 06:33:12 +08:00
|
|
|
qib_put_ss(&qp->s_rdma_read_sge);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2012-06-28 06:33:12 +08:00
|
|
|
qib_put_ss(&qp->r_sge);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
if (clr_sends) {
|
|
|
|
while (qp->s_last != qp->s_head) {
|
2016-01-23 05:07:42 +08:00
|
|
|
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
|
2010-05-24 12:44:54 +08:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < wqe->wr.num_sge; i++) {
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_sge *sge = &wqe->sg_list[i];
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:45:59 +08:00
|
|
|
rvt_put_mr(sge->mr);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
if (qp->ibqp.qp_type == IB_QPT_UD ||
|
|
|
|
qp->ibqp.qp_type == IB_QPT_SMI ||
|
|
|
|
qp->ibqp.qp_type == IB_QPT_GSI)
|
2016-01-23 04:46:07 +08:00
|
|
|
atomic_dec(
|
|
|
|
&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
|
2010-05-24 12:44:54 +08:00
|
|
|
if (++qp->s_last >= qp->s_size)
|
|
|
|
qp->s_last = 0;
|
|
|
|
}
|
|
|
|
if (qp->s_rdma_mr) {
|
2016-01-23 04:45:59 +08:00
|
|
|
rvt_put_mr(qp->s_rdma_mr);
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->s_rdma_mr = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qp->ibqp.qp_type != IB_QPT_RC)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_ack_entry *e = &qp->s_ack_queue[n];
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
|
|
|
|
e->rdma_sge.mr) {
|
2016-01-23 04:45:59 +08:00
|
|
|
rvt_put_mr(e->rdma_sge.mr);
|
2010-05-24 12:44:54 +08:00
|
|
|
e->rdma_sge.mr = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_error_qp - put a QP into the error state
|
|
|
|
* @qp: the QP to put into the error state
|
|
|
|
* @err: the receive completion error to signal if a RWQE is active
|
|
|
|
*
|
|
|
|
* Flushes both send and receive work queues.
|
|
|
|
* Returns true if last WQE event should be generated.
|
2010-08-03 06:39:30 +08:00
|
|
|
* The QP r_lock and s_lock should be held and interrupts disabled.
|
2010-05-24 12:44:54 +08:00
|
|
|
* If we are already in error state, just return.
|
|
|
|
*/
|
2016-01-23 04:45:59 +08:00
|
|
|
int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:45:11 +08:00
|
|
|
struct qib_qp_priv *priv = qp->priv;
|
2010-05-24 12:44:54 +08:00
|
|
|
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
|
|
|
|
struct ib_wc wc;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
|
|
|
|
goto bail;
|
|
|
|
|
|
|
|
qp->state = IB_QPS_ERR;
|
|
|
|
|
2016-01-23 04:56:46 +08:00
|
|
|
if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
|
|
|
|
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
|
2010-05-24 12:44:54 +08:00
|
|
|
del_timer(&qp->s_timer);
|
|
|
|
}
|
2011-01-11 09:42:20 +08:00
|
|
|
|
2016-01-23 04:56:46 +08:00
|
|
|
if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
|
|
|
|
qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
|
2011-01-11 09:42:20 +08:00
|
|
|
|
2016-01-23 04:56:14 +08:00
|
|
|
spin_lock(&dev->rdi.pending_lock);
|
2016-01-23 04:56:46 +08:00
|
|
|
if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
|
|
|
|
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
|
2016-01-23 04:45:11 +08:00
|
|
|
list_del_init(&priv->iowait);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
2016-01-23 04:56:14 +08:00
|
|
|
spin_unlock(&dev->rdi.pending_lock);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:46 +08:00
|
|
|
if (!(qp->s_flags & RVT_S_BUSY)) {
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->s_hdrwords = 0;
|
|
|
|
if (qp->s_rdma_mr) {
|
2016-01-23 04:45:59 +08:00
|
|
|
rvt_put_mr(qp->s_rdma_mr);
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->s_rdma_mr = NULL;
|
|
|
|
}
|
2016-01-23 04:45:11 +08:00
|
|
|
if (priv->s_tx) {
|
|
|
|
qib_put_txreq(priv->s_tx);
|
|
|
|
priv->s_tx = NULL;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Schedule the sending tasklet to drain the send work queue. */
|
|
|
|
if (qp->s_last != qp->s_head)
|
|
|
|
qib_schedule_send(qp);
|
|
|
|
|
|
|
|
clear_mr_refs(qp, 0);
|
|
|
|
|
|
|
|
memset(&wc, 0, sizeof(wc));
|
|
|
|
wc.qp = &qp->ibqp;
|
|
|
|
wc.opcode = IB_WC_RECV;
|
|
|
|
|
2016-01-23 04:56:46 +08:00
|
|
|
if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
|
2010-05-24 12:44:54 +08:00
|
|
|
wc.wr_id = qp->r_wr_id;
|
|
|
|
wc.status = err;
|
2016-01-23 05:07:36 +08:00
|
|
|
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
wc.status = IB_WC_WR_FLUSH_ERR;
|
|
|
|
|
|
|
|
if (qp->r_rq.wq) {
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_rwq *wq;
|
2010-05-24 12:44:54 +08:00
|
|
|
u32 head;
|
|
|
|
u32 tail;
|
|
|
|
|
|
|
|
spin_lock(&qp->r_rq.lock);
|
|
|
|
|
|
|
|
/* sanity check pointers before trusting them */
|
|
|
|
wq = qp->r_rq.wq;
|
|
|
|
head = wq->head;
|
|
|
|
if (head >= qp->r_rq.size)
|
|
|
|
head = 0;
|
|
|
|
tail = wq->tail;
|
|
|
|
if (tail >= qp->r_rq.size)
|
|
|
|
tail = 0;
|
|
|
|
while (tail != head) {
|
|
|
|
wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
|
|
|
|
if (++tail >= qp->r_rq.size)
|
|
|
|
tail = 0;
|
2016-01-23 05:07:36 +08:00
|
|
|
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
wq->tail = tail;
|
|
|
|
|
|
|
|
spin_unlock(&qp->r_rq.lock);
|
|
|
|
} else if (qp->ibqp.event_handler)
|
|
|
|
ret = 1;
|
|
|
|
|
|
|
|
bail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_modify_qp - modify the attributes of a queue pair
|
|
|
|
* @ibqp: the queue pair who's attributes we're modifying
|
|
|
|
* @attr: the new attributes
|
|
|
|
* @attr_mask: the mask of attributes to modify
|
|
|
|
* @udata: user data for libibverbs.so
|
|
|
|
*
|
|
|
|
* Returns 0 on success, otherwise returns an errno.
|
|
|
|
*/
|
|
|
|
int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
|
|
int attr_mask, struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
struct qib_ibdev *dev = to_idev(ibqp->device);
|
2016-01-23 05:07:42 +08:00
|
|
|
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
|
2016-01-23 04:45:11 +08:00
|
|
|
struct qib_qp_priv *priv = qp->priv;
|
2010-05-24 12:44:54 +08:00
|
|
|
enum ib_qp_state cur_state, new_state;
|
|
|
|
struct ib_event ev;
|
|
|
|
int lastwqe = 0;
|
|
|
|
int mig = 0;
|
|
|
|
int ret;
|
|
|
|
u32 pmtu = 0; /* for gcc warning only */
|
|
|
|
|
|
|
|
spin_lock_irq(&qp->r_lock);
|
|
|
|
spin_lock(&qp->s_lock);
|
|
|
|
|
|
|
|
cur_state = attr_mask & IB_QP_CUR_STATE ?
|
|
|
|
attr->cur_qp_state : qp->state;
|
|
|
|
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
|
|
|
|
|
|
|
|
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
|
IB/core: Ethernet L2 attributes in verbs/cm structures
This patch add the support for Ethernet L2 attributes in the
verbs/cm/cma structures.
When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority
in a similar manner that the IB L2 (and the L4 PKEY) attributes are used.
Thus, those attributes were added to the following structures:
* ib_ah_attr - added dmac
* ib_qp_attr - added smac and vlan_id, (sl remains vlan priority)
* ib_wc - added smac, vlan_id
* ib_sa_path_rec - added smac, dmac, vlan_id
* cm_av - added smac and vlan_id
For the path record structure, extra care was taken to avoid the new
fields when packing it into wire format, so we don't break the IB CM
and SA wire protocol.
On the active side, the CM fills. its internal structures from the
path provided by the ULP. We add there taking the ETH L2 attributes
and placing them into the CM Address Handle (struct cm_av).
On the passive side, the CM fills its internal structures from the WC
associated with the REQ message. We add there taking the ETH L2
attributes from the WC.
When the HW driver provides the required ETH L2 attributes in the WC,
they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core
code checks for the presence of these flags, and in their absence does
address resolution from the ib_init_ah_from_wc() helper function.
ib_modify_qp_is_ok is also updated to consider the link layer. Some
parameters are mandatory for Ethernet link layer, while they are
irrelevant for IB. Vendor drivers are modified to support the new
function signature.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-12-13 00:03:11 +08:00
|
|
|
attr_mask, IB_LINK_LAYER_UNSPECIFIED))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto inval;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_AV) {
|
2016-01-23 04:44:53 +08:00
|
|
|
if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto inval;
|
2016-01-23 04:46:07 +08:00
|
|
|
if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_ALT_PATH) {
|
2016-01-23 04:44:53 +08:00
|
|
|
if (attr->alt_ah_attr.dlid >=
|
|
|
|
be16_to_cpu(IB_MULTICAST_LID_BASE))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto inval;
|
2016-01-23 04:46:07 +08:00
|
|
|
if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
|
2010-05-24 12:44:54 +08:00
|
|
|
goto inval;
|
|
|
|
if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
|
|
|
|
goto inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_PKEY_INDEX)
|
|
|
|
if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_MIN_RNR_TIMER)
|
|
|
|
if (attr->min_rnr_timer > 31)
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_PORT)
|
|
|
|
if (qp->ibqp.qp_type == IB_QPT_SMI ||
|
|
|
|
qp->ibqp.qp_type == IB_QPT_GSI ||
|
|
|
|
attr->port_num == 0 ||
|
|
|
|
attr->port_num > ibqp->device->phys_port_cnt)
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_DEST_QPN)
|
|
|
|
if (attr->dest_qp_num > QIB_QPN_MASK)
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_RETRY_CNT)
|
|
|
|
if (attr->retry_cnt > 7)
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_RNR_RETRY)
|
|
|
|
if (attr->rnr_retry > 7)
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't allow invalid path_mtu values. OK to set greater
|
|
|
|
* than the active mtu (or even the max_cap, if we have tuned
|
|
|
|
* that to a small mtu. We'll set qp->path_mtu
|
|
|
|
* to the lesser of requested attribute mtu and active,
|
|
|
|
* for packetizing messages.
|
|
|
|
* Note that the QP port has to be set in INIT and MTU in RTR.
|
|
|
|
*/
|
|
|
|
if (attr_mask & IB_QP_PATH_MTU) {
|
|
|
|
struct qib_devdata *dd = dd_from_dev(dev);
|
|
|
|
int mtu, pidx = qp->port_num - 1;
|
|
|
|
|
|
|
|
mtu = ib_mtu_enum_to_int(attr->path_mtu);
|
|
|
|
if (mtu == -1)
|
|
|
|
goto inval;
|
|
|
|
if (mtu > dd->pport[pidx].ibmtu) {
|
|
|
|
switch (dd->pport[pidx].ibmtu) {
|
|
|
|
case 4096:
|
|
|
|
pmtu = IB_MTU_4096;
|
|
|
|
break;
|
|
|
|
case 2048:
|
|
|
|
pmtu = IB_MTU_2048;
|
|
|
|
break;
|
|
|
|
case 1024:
|
|
|
|
pmtu = IB_MTU_1024;
|
|
|
|
break;
|
|
|
|
case 512:
|
|
|
|
pmtu = IB_MTU_512;
|
|
|
|
break;
|
|
|
|
case 256:
|
|
|
|
pmtu = IB_MTU_256;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pmtu = IB_MTU_2048;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
pmtu = attr->path_mtu;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_PATH_MIG_STATE) {
|
|
|
|
if (attr->path_mig_state == IB_MIG_REARM) {
|
|
|
|
if (qp->s_mig_state == IB_MIG_ARMED)
|
|
|
|
goto inval;
|
|
|
|
if (new_state != IB_QPS_RTS)
|
|
|
|
goto inval;
|
|
|
|
} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
|
|
|
|
if (qp->s_mig_state == IB_MIG_REARM)
|
|
|
|
goto inval;
|
|
|
|
if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
|
|
|
|
goto inval;
|
|
|
|
if (qp->s_mig_state == IB_MIG_ARMED)
|
|
|
|
mig = 1;
|
|
|
|
} else
|
|
|
|
goto inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
|
|
|
|
if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
switch (new_state) {
|
|
|
|
case IB_QPS_RESET:
|
|
|
|
if (qp->state != IB_QPS_RESET) {
|
|
|
|
qp->state = IB_QPS_RESET;
|
2016-01-23 04:56:14 +08:00
|
|
|
spin_lock(&dev->rdi.pending_lock);
|
2016-01-23 04:45:11 +08:00
|
|
|
if (!list_empty(&priv->iowait))
|
|
|
|
list_del_init(&priv->iowait);
|
2016-01-23 04:56:14 +08:00
|
|
|
spin_unlock(&dev->rdi.pending_lock);
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
|
2010-05-24 12:44:54 +08:00
|
|
|
spin_unlock(&qp->s_lock);
|
|
|
|
spin_unlock_irq(&qp->r_lock);
|
|
|
|
/* Stop the sending work queue and retry timer */
|
2016-01-23 04:45:11 +08:00
|
|
|
cancel_work_sync(&priv->s_work);
|
2010-05-24 12:44:54 +08:00
|
|
|
del_timer_sync(&qp->s_timer);
|
2016-01-23 04:45:11 +08:00
|
|
|
wait_event(priv->wait_dma,
|
|
|
|
!atomic_read(&priv->s_dma_busy));
|
|
|
|
if (priv->s_tx) {
|
|
|
|
qib_put_txreq(priv->s_tx);
|
|
|
|
priv->s_tx = NULL;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
remove_qp(dev, qp);
|
|
|
|
wait_event(qp->wait, !atomic_read(&qp->refcount));
|
|
|
|
spin_lock_irq(&qp->r_lock);
|
|
|
|
spin_lock(&qp->s_lock);
|
|
|
|
clear_mr_refs(qp, 1);
|
2016-01-23 04:56:52 +08:00
|
|
|
rvt_reset_qp(&dev->rdi, qp, ibqp->qp_type);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IB_QPS_RTR:
|
|
|
|
/* Allow event to retrigger if QP set to RTR more than once */
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->r_flags &= ~RVT_R_COMM_EST;
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->state = new_state;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IB_QPS_SQD:
|
|
|
|
qp->s_draining = qp->s_last != qp->s_cur;
|
|
|
|
qp->state = new_state;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IB_QPS_SQE:
|
|
|
|
if (qp->ibqp.qp_type == IB_QPT_RC)
|
|
|
|
goto inval;
|
|
|
|
qp->state = new_state;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IB_QPS_ERR:
|
|
|
|
lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
qp->state = new_state;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_PKEY_INDEX)
|
|
|
|
qp->s_pkey_index = attr->pkey_index;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_PORT)
|
|
|
|
qp->port_num = attr->port_num;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_DEST_QPN)
|
|
|
|
qp->remote_qpn = attr->dest_qp_num;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_SQ_PSN) {
|
|
|
|
qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
|
|
|
|
qp->s_psn = qp->s_next_psn;
|
|
|
|
qp->s_sending_psn = qp->s_next_psn;
|
|
|
|
qp->s_last_psn = qp->s_next_psn - 1;
|
|
|
|
qp->s_sending_hpsn = qp->s_last_psn;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_RQ_PSN)
|
|
|
|
qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_ACCESS_FLAGS)
|
|
|
|
qp->qp_access_flags = attr->qp_access_flags;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_AV) {
|
|
|
|
qp->remote_ah_attr = attr->ah_attr;
|
|
|
|
qp->s_srate = attr->ah_attr.static_rate;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_ALT_PATH) {
|
|
|
|
qp->alt_ah_attr = attr->alt_ah_attr;
|
|
|
|
qp->s_alt_pkey_index = attr->alt_pkey_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_PATH_MIG_STATE) {
|
|
|
|
qp->s_mig_state = attr->path_mig_state;
|
|
|
|
if (mig) {
|
|
|
|
qp->remote_ah_attr = qp->alt_ah_attr;
|
|
|
|
qp->port_num = qp->alt_ah_attr.port_num;
|
|
|
|
qp->s_pkey_index = qp->s_alt_pkey_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-24 01:16:34 +08:00
|
|
|
if (attr_mask & IB_QP_PATH_MTU) {
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->path_mtu = pmtu;
|
2011-09-24 01:16:34 +08:00
|
|
|
qp->pmtu = ib_mtu_enum_to_int(pmtu);
|
|
|
|
}
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
if (attr_mask & IB_QP_RETRY_CNT) {
|
|
|
|
qp->s_retry_cnt = attr->retry_cnt;
|
|
|
|
qp->s_retry = attr->retry_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_RNR_RETRY) {
|
|
|
|
qp->s_rnr_retry_cnt = attr->rnr_retry;
|
|
|
|
qp->s_rnr_retry = attr->rnr_retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_MIN_RNR_TIMER)
|
|
|
|
qp->r_min_rnr_timer = attr->min_rnr_timer;
|
|
|
|
|
2011-09-24 01:16:49 +08:00
|
|
|
if (attr_mask & IB_QP_TIMEOUT) {
|
2010-05-24 12:44:54 +08:00
|
|
|
qp->timeout = attr->timeout;
|
2011-09-24 01:16:49 +08:00
|
|
|
qp->timeout_jiffies =
|
|
|
|
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
|
|
|
|
1000UL);
|
|
|
|
}
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
if (attr_mask & IB_QP_QKEY)
|
|
|
|
qp->qkey = attr->qkey;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
|
|
|
|
qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
|
|
|
|
|
|
|
|
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
|
|
|
|
qp->s_max_rd_atomic = attr->max_rd_atomic;
|
|
|
|
|
|
|
|
spin_unlock(&qp->s_lock);
|
|
|
|
spin_unlock_irq(&qp->r_lock);
|
|
|
|
|
|
|
|
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
|
|
|
|
insert_qp(dev, qp);
|
|
|
|
|
|
|
|
if (lastwqe) {
|
|
|
|
ev.device = qp->ibqp.device;
|
|
|
|
ev.element.qp = &qp->ibqp;
|
|
|
|
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
|
|
|
|
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
|
|
|
}
|
|
|
|
if (mig) {
|
|
|
|
ev.device = qp->ibqp.device;
|
|
|
|
ev.element.qp = &qp->ibqp;
|
|
|
|
ev.event = IB_EVENT_PATH_MIG;
|
|
|
|
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
goto bail;
|
|
|
|
|
|
|
|
inval:
|
|
|
|
spin_unlock(&qp->s_lock);
|
|
|
|
spin_unlock_irq(&qp->r_lock);
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
|
|
|
bail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_compute_aeth - compute the AETH (syndrome + MSN)
|
|
|
|
* @qp: the queue pair to compute the AETH for
|
|
|
|
*
|
|
|
|
* Returns the AETH.
|
|
|
|
*/
|
2016-01-23 04:45:59 +08:00
|
|
|
__be32 qib_compute_aeth(struct rvt_qp *qp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
|
|
|
u32 aeth = qp->r_msn & QIB_MSN_MASK;
|
|
|
|
|
|
|
|
if (qp->ibqp.srq) {
|
|
|
|
/*
|
|
|
|
* Shared receive queues don't generate credits.
|
|
|
|
* Set the credit field to the invalid value.
|
|
|
|
*/
|
|
|
|
aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
|
|
|
|
} else {
|
|
|
|
u32 min, max, x;
|
|
|
|
u32 credits;
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_rwq *wq = qp->r_rq.wq;
|
2010-05-24 12:44:54 +08:00
|
|
|
u32 head;
|
|
|
|
u32 tail;
|
|
|
|
|
|
|
|
/* sanity check pointers before trusting them */
|
|
|
|
head = wq->head;
|
|
|
|
if (head >= qp->r_rq.size)
|
|
|
|
head = 0;
|
|
|
|
tail = wq->tail;
|
|
|
|
if (tail >= qp->r_rq.size)
|
|
|
|
tail = 0;
|
|
|
|
/*
|
|
|
|
* Compute the number of credits available (RWQEs).
|
|
|
|
* XXX Not holding the r_rq.lock here so there is a small
|
|
|
|
* chance that the pair of reads are not atomic.
|
|
|
|
*/
|
|
|
|
credits = head - tail;
|
|
|
|
if ((int)credits < 0)
|
|
|
|
credits += qp->r_rq.size;
|
|
|
|
/*
|
|
|
|
* Binary search the credit table to find the code to
|
|
|
|
* use.
|
|
|
|
*/
|
|
|
|
min = 0;
|
|
|
|
max = 31;
|
|
|
|
for (;;) {
|
|
|
|
x = (min + max) / 2;
|
|
|
|
if (credit_table[x] == credits)
|
|
|
|
break;
|
|
|
|
if (credit_table[x] > credits)
|
|
|
|
max = x;
|
|
|
|
else if (min == x)
|
|
|
|
break;
|
|
|
|
else
|
|
|
|
min = x;
|
|
|
|
}
|
|
|
|
aeth |= x << QIB_AETH_CREDIT_SHIFT;
|
|
|
|
}
|
|
|
|
return cpu_to_be32(aeth);
|
|
|
|
}
|
|
|
|
|
2016-01-23 04:56:52 +08:00
|
|
|
void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
2016-01-23 04:45:11 +08:00
|
|
|
struct qib_qp_priv *priv;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:52 +08:00
|
|
|
priv = kzalloc(sizeof(*priv), gfp);
|
|
|
|
if (!priv)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
priv->owner = qp;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:52 +08:00
|
|
|
priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
|
|
|
|
if (!priv->s_hdr) {
|
|
|
|
kfree(priv);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
2016-01-23 04:56:52 +08:00
|
|
|
init_waitqueue_head(&priv->wait_dma);
|
2016-01-23 05:07:42 +08:00
|
|
|
INIT_WORK(&priv->s_work, _qib_do_send);
|
2016-01-23 04:56:52 +08:00
|
|
|
INIT_LIST_HEAD(&priv->iowait);
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:52 +08:00
|
|
|
return priv;
|
|
|
|
}
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:56:52 +08:00
|
|
|
void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
|
|
|
|
{
|
|
|
|
struct qib_qp_priv *priv = qp->priv;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
2016-01-23 04:45:11 +08:00
|
|
|
kfree(priv->s_hdr);
|
|
|
|
kfree(priv);
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_destroy_qp - destroy a queue pair
|
|
|
|
* @ibqp: the queue pair to destroy
|
|
|
|
*
|
|
|
|
* Returns 0 on success.
|
|
|
|
*
|
|
|
|
* Note that this can be called while the QP is actively sending or
|
|
|
|
* receiving!
|
|
|
|
*/
|
|
|
|
int qib_destroy_qp(struct ib_qp *ibqp)
|
|
|
|
{
|
2016-01-23 05:07:42 +08:00
|
|
|
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
|
2010-05-24 12:44:54 +08:00
|
|
|
struct qib_ibdev *dev = to_idev(ibqp->device);
|
2016-01-23 04:45:11 +08:00
|
|
|
struct qib_qp_priv *priv = qp->priv;
|
2010-05-24 12:44:54 +08:00
|
|
|
|
|
|
|
/* Make sure HW and driver activity is stopped. */
|
|
|
|
spin_lock_irq(&qp->s_lock);
|
|
|
|
if (qp->state != IB_QPS_RESET) {
|
|
|
|
qp->state = IB_QPS_RESET;
|
2016-01-23 04:56:14 +08:00
|
|
|
spin_lock(&dev->rdi.pending_lock);
|
2016-01-23 04:45:11 +08:00
|
|
|
if (!list_empty(&priv->iowait))
|
|
|
|
list_del_init(&priv->iowait);
|
2016-01-23 04:56:14 +08:00
|
|
|
spin_unlock(&dev->rdi.pending_lock);
|
2016-01-23 04:56:46 +08:00
|
|
|
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
|
2010-05-24 12:44:54 +08:00
|
|
|
spin_unlock_irq(&qp->s_lock);
|
2016-01-23 04:45:11 +08:00
|
|
|
cancel_work_sync(&priv->s_work);
|
2010-05-24 12:44:54 +08:00
|
|
|
del_timer_sync(&qp->s_timer);
|
2016-01-23 04:45:11 +08:00
|
|
|
wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
|
|
|
|
if (priv->s_tx) {
|
|
|
|
qib_put_txreq(priv->s_tx);
|
|
|
|
priv->s_tx = NULL;
|
2010-05-24 12:44:54 +08:00
|
|
|
}
|
|
|
|
remove_qp(dev, qp);
|
|
|
|
wait_event(qp->wait, !atomic_read(&qp->refcount));
|
|
|
|
clear_mr_refs(qp, 1);
|
|
|
|
} else
|
|
|
|
spin_unlock_irq(&qp->s_lock);
|
|
|
|
|
|
|
|
/* all user's cleaned up, mark it available */
|
2016-01-23 04:56:27 +08:00
|
|
|
free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
|
2010-05-24 12:44:54 +08:00
|
|
|
spin_lock(&dev->n_qps_lock);
|
|
|
|
dev->n_qps_allocated--;
|
|
|
|
spin_unlock(&dev->n_qps_lock);
|
|
|
|
|
|
|
|
if (qp->ip)
|
2016-01-23 04:56:14 +08:00
|
|
|
kref_put(&qp->ip->ref, rvt_release_mmap_info);
|
2010-05-24 12:44:54 +08:00
|
|
|
else
|
|
|
|
vfree(qp->r_rq.wq);
|
|
|
|
vfree(qp->s_wq);
|
2016-01-23 04:45:11 +08:00
|
|
|
kfree(priv->s_hdr);
|
|
|
|
kfree(priv);
|
2010-05-24 12:44:54 +08:00
|
|
|
kfree(qp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qib_get_credit - flush the send work queue of a QP
|
|
|
|
* @qp: the qp who's send work queue to flush
|
|
|
|
* @aeth: the Acknowledge Extended Transport Header
|
|
|
|
*
|
|
|
|
* The QP s_lock should be held.
|
|
|
|
*/
|
2016-01-23 04:45:59 +08:00
|
|
|
void qib_get_credit(struct rvt_qp *qp, u32 aeth)
|
2010-05-24 12:44:54 +08:00
|
|
|
{
|
|
|
|
u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the credit is invalid, we can send
|
|
|
|
* as many packets as we like. Otherwise, we have to
|
|
|
|
* honor the credit field.
|
|
|
|
*/
|
|
|
|
if (credit == QIB_AETH_CREDIT_INVAL) {
|
2016-01-23 04:56:46 +08:00
|
|
|
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
|
|
|
qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
|
|
|
|
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
|
|
|
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
2010-05-24 12:44:54 +08:00
|
|
|
qib_schedule_send(qp);
|
|
|
|
}
|
|
|
|
}
|
2016-01-23 04:56:46 +08:00
|
|
|
} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
2010-05-24 12:44:54 +08:00
|
|
|
/* Compute new LSN (i.e., MSN + credit) */
|
|
|
|
credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
|
|
|
|
if (qib_cmp24(credit, qp->s_lsn) > 0) {
|
|
|
|
qp->s_lsn = credit;
|
2016-01-23 04:56:46 +08:00
|
|
|
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
|
|
|
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
2010-05-24 12:44:54 +08:00
|
|
|
qib_schedule_send(qp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-16 05:07:14 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
|
|
|
|
struct qib_qp_iter {
|
|
|
|
struct qib_ibdev *dev;
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_qp *qp;
|
2013-06-16 05:07:14 +08:00
|
|
|
int n;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
|
|
|
|
{
|
|
|
|
struct qib_qp_iter *iter;
|
|
|
|
|
|
|
|
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
|
|
|
if (!iter)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
iter->dev = dev;
|
|
|
|
if (qib_qp_iter_next(iter)) {
|
|
|
|
kfree(iter);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qib_qp_iter_next(struct qib_qp_iter *iter)
|
|
|
|
{
|
|
|
|
struct qib_ibdev *dev = iter->dev;
|
|
|
|
int n = iter->n;
|
|
|
|
int ret = 1;
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_qp *pqp = iter->qp;
|
|
|
|
struct rvt_qp *qp;
|
2013-06-16 05:07:14 +08:00
|
|
|
|
2016-01-23 04:56:27 +08:00
|
|
|
for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
|
2013-06-16 05:07:14 +08:00
|
|
|
if (pqp)
|
|
|
|
qp = rcu_dereference(pqp->next);
|
|
|
|
else
|
2016-01-23 04:56:27 +08:00
|
|
|
qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
|
2013-06-16 05:07:14 +08:00
|
|
|
pqp = qp;
|
|
|
|
if (qp) {
|
|
|
|
iter->qp = qp;
|
|
|
|
iter->n = n;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char * const qp_type_str[] = {
|
|
|
|
"SMI", "GSI", "RC", "UC", "UD",
|
|
|
|
};
|
|
|
|
|
|
|
|
void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
|
|
|
|
{
|
2016-01-23 04:45:59 +08:00
|
|
|
struct rvt_swqe *wqe;
|
|
|
|
struct rvt_qp *qp = iter->qp;
|
2016-01-23 04:45:11 +08:00
|
|
|
struct qib_qp_priv *priv = qp->priv;
|
2013-06-16 05:07:14 +08:00
|
|
|
|
2016-01-23 05:07:42 +08:00
|
|
|
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
|
2013-06-16 05:07:14 +08:00
|
|
|
seq_printf(s,
|
|
|
|
"N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
|
|
|
|
iter->n,
|
|
|
|
qp->ibqp.qp_num,
|
|
|
|
qp_type_str[qp->ibqp.qp_type],
|
|
|
|
qp->state,
|
|
|
|
wqe->wr.opcode,
|
|
|
|
qp->s_hdrwords,
|
|
|
|
qp->s_flags,
|
2016-01-23 04:45:11 +08:00
|
|
|
atomic_read(&priv->s_dma_busy),
|
|
|
|
!list_empty(&priv->iowait),
|
2013-06-16 05:07:14 +08:00
|
|
|
qp->timeout,
|
|
|
|
wqe->ssn,
|
|
|
|
qp->s_lsn,
|
|
|
|
qp->s_last_psn,
|
|
|
|
qp->s_psn, qp->s_next_psn,
|
|
|
|
qp->s_sending_psn, qp->s_sending_hpsn,
|
|
|
|
qp->s_last, qp->s_acked, qp->s_cur,
|
|
|
|
qp->s_tail, qp->s_head, qp->s_size,
|
|
|
|
qp->remote_qpn,
|
|
|
|
qp->remote_ah_attr.dlid);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|