Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/cma: Replace global lock in rdma_destroy_id() with id-specific one IB/cm: Cancel pending LAP message when exiting IB_CM_ESTABLISH state IB/cm: Bump reference count on cm_id before invoking callback RDMA/cma: Fix crash in request handlers IB/ipath: Don't reset disabled devices IB/qib: Fix M_Key field in SubnGet and SubnGetResp MADs IB/qib: Set default LE2 value for active cables to 0 RDMA/cxgb4: Debugfs dump_qp() updates RDMA/cxgb4: Dispatch FATAL event on EEH errors RDMA/cxgb4: Use ULP_MODE_TCPDDP RDMA/cxgb4: Enable on-chip SQ support by default RDMA/cxgb4: Do CIDX_INC updates every 1/16 CQ depth CQE reaps RDMA/cxgb4: Remove db_drop_task RDMA/cxgb4: Turn on delayed ACK IB/qib: Return correct MAD when setting link width to 255
This commit is contained in:
commit
48d5f67318
|
@ -1988,6 +1988,10 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (cm_id->lap_state == IB_CM_LAP_SENT ||
|
||||
cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
|
||||
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
||||
|
||||
ret = cm_alloc_msg(cm_id_priv, &msg);
|
||||
if (ret) {
|
||||
cm_enter_timewait(cm_id_priv);
|
||||
|
@ -2129,6 +2133,10 @@ static int cm_dreq_handler(struct cm_work *work)
|
|||
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
||||
break;
|
||||
case IB_CM_ESTABLISHED:
|
||||
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
|
||||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
|
||||
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
||||
break;
|
||||
case IB_CM_MRA_REP_RCVD:
|
||||
break;
|
||||
case IB_CM_TIMEWAIT:
|
||||
|
@ -2349,9 +2357,18 @@ static int cm_rej_handler(struct cm_work *work)
|
|||
/* fall through */
|
||||
case IB_CM_REP_RCVD:
|
||||
case IB_CM_MRA_REP_SENT:
|
||||
case IB_CM_ESTABLISHED:
|
||||
cm_enter_timewait(cm_id_priv);
|
||||
break;
|
||||
case IB_CM_ESTABLISHED:
|
||||
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
|
||||
cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
|
||||
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
|
||||
ib_cancel_mad(cm_id_priv->av.port->mad_agent,
|
||||
cm_id_priv->msg);
|
||||
cm_enter_timewait(cm_id_priv);
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
ret = -EINVAL;
|
||||
|
@ -2989,6 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
|
|||
goto out; /* No match. */
|
||||
}
|
||||
atomic_inc(&cur_cm_id_priv->refcount);
|
||||
atomic_inc(&cm_id_priv->refcount);
|
||||
spin_unlock_irq(&cm.lock);
|
||||
|
||||
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
|
||||
|
|
|
@ -308,11 +308,13 @@ static inline void release_mc(struct kref *kref)
|
|||
kfree(mc);
|
||||
}
|
||||
|
||||
static void cma_detach_from_dev(struct rdma_id_private *id_priv)
|
||||
static void cma_release_dev(struct rdma_id_private *id_priv)
|
||||
{
|
||||
mutex_lock(&lock);
|
||||
list_del(&id_priv->list);
|
||||
cma_deref_dev(id_priv->cma_dev);
|
||||
id_priv->cma_dev = NULL;
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
static int cma_set_qkey(struct rdma_id_private *id_priv)
|
||||
|
@ -373,6 +375,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
|
|||
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
|
||||
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
|
||||
|
||||
mutex_lock(&lock);
|
||||
iboe_addr_get_sgid(dev_addr, &iboe_gid);
|
||||
memcpy(&gid, dev_addr->src_dev_addr +
|
||||
rdma_addr_gid_offset(dev_addr), sizeof gid);
|
||||
|
@ -398,6 +401,7 @@ out:
|
|||
if (!ret)
|
||||
cma_attach_to_dev(id_priv, cma_dev);
|
||||
|
||||
mutex_unlock(&lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -904,9 +908,14 @@ void rdma_destroy_id(struct rdma_cm_id *id)
|
|||
state = cma_exch(id_priv, CMA_DESTROYING);
|
||||
cma_cancel_operation(id_priv, state);
|
||||
|
||||
mutex_lock(&lock);
|
||||
/*
|
||||
* Wait for any active callback to finish. New callbacks will find
|
||||
* the id_priv state set to destroying and abort.
|
||||
*/
|
||||
mutex_lock(&id_priv->handler_mutex);
|
||||
mutex_unlock(&id_priv->handler_mutex);
|
||||
|
||||
if (id_priv->cma_dev) {
|
||||
mutex_unlock(&lock);
|
||||
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
|
||||
|
@ -920,10 +929,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
|
|||
break;
|
||||
}
|
||||
cma_leave_mc_groups(id_priv);
|
||||
mutex_lock(&lock);
|
||||
cma_detach_from_dev(id_priv);
|
||||
cma_release_dev(id_priv);
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
|
||||
cma_release_port(id_priv);
|
||||
cma_deref_id(id_priv);
|
||||
|
@ -1200,9 +1207,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|||
}
|
||||
|
||||
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
|
||||
mutex_lock(&lock);
|
||||
ret = cma_acquire_dev(conn_id);
|
||||
mutex_unlock(&lock);
|
||||
if (ret)
|
||||
goto release_conn_id;
|
||||
|
||||
|
@ -1210,6 +1215,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|||
cm_id->context = conn_id;
|
||||
cm_id->cm_handler = cma_ib_handler;
|
||||
|
||||
/*
|
||||
* Protect against the user destroying conn_id from another thread
|
||||
* until we're done accessing it.
|
||||
*/
|
||||
atomic_inc(&conn_id->refcount);
|
||||
ret = conn_id->id.event_handler(&conn_id->id, &event);
|
||||
if (!ret) {
|
||||
/*
|
||||
|
@ -1222,8 +1232,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|||
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
|
||||
mutex_unlock(&lock);
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
cma_deref_id(conn_id);
|
||||
goto out;
|
||||
}
|
||||
cma_deref_id(conn_id);
|
||||
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
conn_id->cm_id.ib = NULL;
|
||||
|
@ -1394,9 +1406,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
|||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&lock);
|
||||
ret = cma_acquire_dev(conn_id);
|
||||
mutex_unlock(&lock);
|
||||
if (ret) {
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
rdma_destroy_id(new_cm_id);
|
||||
|
@ -1425,17 +1435,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
|||
event.param.conn.private_data_len = iw_event->private_data_len;
|
||||
event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
|
||||
event.param.conn.responder_resources = attr.max_qp_rd_atom;
|
||||
|
||||
/*
|
||||
* Protect against the user destroying conn_id from another thread
|
||||
* until we're done accessing it.
|
||||
*/
|
||||
atomic_inc(&conn_id->refcount);
|
||||
ret = conn_id->id.event_handler(&conn_id->id, &event);
|
||||
if (ret) {
|
||||
/* User wants to destroy the CM ID */
|
||||
conn_id->cm_id.iw = NULL;
|
||||
cma_exch(conn_id, CMA_DESTROYING);
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
cma_deref_id(conn_id);
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
cma_deref_id(conn_id);
|
||||
|
||||
out:
|
||||
if (dev)
|
||||
|
@ -1951,20 +1969,11 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
|||
|
||||
memset(&event, 0, sizeof event);
|
||||
mutex_lock(&id_priv->handler_mutex);
|
||||
|
||||
/*
|
||||
* Grab mutex to block rdma_destroy_id() from removing the device while
|
||||
* we're trying to acquire it.
|
||||
*/
|
||||
mutex_lock(&lock);
|
||||
if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
|
||||
mutex_unlock(&lock);
|
||||
if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!status && !id_priv->cma_dev)
|
||||
status = cma_acquire_dev(id_priv);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
if (status) {
|
||||
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
|
||||
|
@ -2265,9 +2274,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
|||
if (ret)
|
||||
goto err1;
|
||||
|
||||
mutex_lock(&lock);
|
||||
ret = cma_acquire_dev(id_priv);
|
||||
mutex_unlock(&lock);
|
||||
if (ret)
|
||||
goto err1;
|
||||
}
|
||||
|
@ -2279,11 +2286,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
|||
|
||||
return 0;
|
||||
err2:
|
||||
if (id_priv->cma_dev) {
|
||||
mutex_lock(&lock);
|
||||
cma_detach_from_dev(id_priv);
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
if (id_priv->cma_dev)
|
||||
cma_release_dev(id_priv);
|
||||
err1:
|
||||
cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
|
||||
return ret;
|
||||
|
|
|
@ -61,9 +61,9 @@ static char *states[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static int dack_mode;
|
||||
static int dack_mode = 1;
|
||||
module_param(dack_mode, int, 0644);
|
||||
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
|
||||
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
|
||||
|
||||
int c4iw_max_read_depth = 8;
|
||||
module_param(c4iw_max_read_depth, int, 0644);
|
||||
|
@ -482,6 +482,7 @@ static int send_connect(struct c4iw_ep *ep)
|
|||
TX_CHAN(ep->tx_chan) |
|
||||
SMAC_SEL(ep->smac_idx) |
|
||||
DSCP(ep->tos) |
|
||||
ULP_MODE(ULP_MODE_TCPDDP) |
|
||||
RCV_BUFSIZ(rcv_win>>10);
|
||||
opt2 = RX_CHANNEL(0) |
|
||||
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
|
||||
|
@ -1274,6 +1275,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
|
|||
TX_CHAN(ep->tx_chan) |
|
||||
SMAC_SEL(ep->smac_idx) |
|
||||
DSCP(ep->tos) |
|
||||
ULP_MODE(ULP_MODE_TCPDDP) |
|
||||
RCV_BUFSIZ(rcv_win>>10);
|
||||
opt2 = RX_CHANNEL(0) |
|
||||
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
|
||||
|
|
|
@ -87,17 +87,22 @@ static int dump_qp(int id, void *p, void *data)
|
|||
return 1;
|
||||
|
||||
if (qp->ep)
|
||||
cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
|
||||
cc = snprintf(qpd->buf + qpd->pos, space,
|
||||
"qp sq id %u rq id %u state %u onchip %u "
|
||||
"ep tid %u state %u %pI4:%u->%pI4:%u\n",
|
||||
qp->wq.sq.qid, (int)qp->attr.state,
|
||||
qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
|
||||
qp->wq.sq.flags & T4_SQ_ONCHIP,
|
||||
qp->ep->hwtid, (int)qp->ep->com.state,
|
||||
&qp->ep->com.local_addr.sin_addr.s_addr,
|
||||
ntohs(qp->ep->com.local_addr.sin_port),
|
||||
&qp->ep->com.remote_addr.sin_addr.s_addr,
|
||||
ntohs(qp->ep->com.remote_addr.sin_port));
|
||||
else
|
||||
cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
|
||||
qp->wq.sq.qid, (int)qp->attr.state);
|
||||
cc = snprintf(qpd->buf + qpd->pos, space,
|
||||
"qp sq id %u rq id %u state %u onchip %u\n",
|
||||
qp->wq.sq.qid, qp->wq.rq.qid,
|
||||
(int)qp->attr.state,
|
||||
qp->wq.sq.flags & T4_SQ_ONCHIP);
|
||||
if (cc < space)
|
||||
qpd->pos += cc;
|
||||
return 0;
|
||||
|
@ -368,7 +373,6 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
|||
static void c4iw_remove(struct c4iw_dev *dev)
|
||||
{
|
||||
PDBG("%s c4iw_dev %p\n", __func__, dev);
|
||||
cancel_delayed_work_sync(&dev->db_drop_task);
|
||||
list_del(&dev->entry);
|
||||
if (dev->registered)
|
||||
c4iw_unregister_device(dev);
|
||||
|
@ -523,8 +527,16 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
|||
case CXGB4_STATE_START_RECOVERY:
|
||||
printk(KERN_INFO MOD "%s: Fatal Error\n",
|
||||
pci_name(dev->rdev.lldi.pdev));
|
||||
if (dev->registered)
|
||||
dev->rdev.flags |= T4_FATAL_ERROR;
|
||||
if (dev->registered) {
|
||||
struct ib_event event;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
event.event = IB_EVENT_DEVICE_FATAL;
|
||||
event.device = &dev->ibdev;
|
||||
ib_dispatch_event(&event);
|
||||
c4iw_unregister_device(dev);
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DETACH:
|
||||
printk(KERN_INFO MOD "%s: Detach\n",
|
||||
|
|
|
@ -176,7 +176,6 @@ struct c4iw_dev {
|
|||
struct idr mmidr;
|
||||
spinlock_t lock;
|
||||
struct list_head entry;
|
||||
struct delayed_work db_drop_task;
|
||||
struct dentry *debugfs_root;
|
||||
u8 registered;
|
||||
};
|
||||
|
|
|
@ -31,9 +31,9 @@
|
|||
*/
|
||||
#include "iw_cxgb4.h"
|
||||
|
||||
static int ocqp_support;
|
||||
static int ocqp_support = 1;
|
||||
module_param(ocqp_support, int, 0644);
|
||||
MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)");
|
||||
MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
|
||||
|
||||
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
|
||||
{
|
||||
|
|
|
@ -507,8 +507,14 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
|
|||
static inline void t4_hwcq_consume(struct t4_cq *cq)
|
||||
{
|
||||
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
|
||||
if (++cq->cidx_inc == cq->size)
|
||||
if (++cq->cidx_inc == (cq->size >> 4)) {
|
||||
u32 val;
|
||||
|
||||
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
|
||||
INGRESSQID(cq->cqid);
|
||||
writel(val, cq->gts);
|
||||
cq->cidx_inc = 0;
|
||||
}
|
||||
if (++cq->cidx == cq->size) {
|
||||
cq->cidx = 0;
|
||||
cq->gen ^= 1;
|
||||
|
|
|
@ -557,6 +557,7 @@ static ssize_t store_reset(struct device *dev,
|
|||
dev_info(dev,"Unit %d is disabled, can't reset\n",
|
||||
dd->ipath_unit);
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
ret = ipath_reset_device(dd->ipath_unit);
|
||||
bail:
|
||||
|
|
|
@ -5582,9 +5582,16 @@ static void qsfp_7322_event(struct work_struct *work)
|
|||
* even on failure to read cable information. We don't
|
||||
* get here for QME, so IS_QME check not needed here.
|
||||
*/
|
||||
le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
|
||||
!ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
|
||||
LE2_5m : LE2_DEFAULT;
|
||||
if (!ret && !ppd->dd->cspec->r1) {
|
||||
if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
|
||||
le2 = LE2_QME;
|
||||
else if (qd->cache.atten[1] >= qib_long_atten &&
|
||||
QSFP_IS_CU(qd->cache.tech))
|
||||
le2 = LE2_5m;
|
||||
else
|
||||
le2 = LE2_DEFAULT;
|
||||
} else
|
||||
le2 = LE2_DEFAULT;
|
||||
ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
|
||||
init_txdds_table(ppd, 0);
|
||||
}
|
||||
|
|
|
@ -464,8 +464,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
memset(smp->data, 0, sizeof(smp->data));
|
||||
|
||||
/* Only return the mkey if the protection field allows it. */
|
||||
if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
|
||||
ibp->mkeyprot == 0)
|
||||
if (!(smp->method == IB_MGMT_METHOD_GET &&
|
||||
ibp->mkey != smp->mkey &&
|
||||
ibp->mkeyprot == 1))
|
||||
pip->mkey = ibp->mkey;
|
||||
pip->gid_prefix = ibp->gid_prefix;
|
||||
lid = ppd->lid;
|
||||
|
@ -705,7 +706,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
lwe = pip->link_width_enabled;
|
||||
if (lwe) {
|
||||
if (lwe == 0xFF)
|
||||
lwe = ppd->link_width_supported;
|
||||
set_link_width_enabled(ppd, ppd->link_width_supported);
|
||||
else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
else if (lwe != ppd->link_width_enabled)
|
||||
|
@ -720,7 +721,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
* speeds.
|
||||
*/
|
||||
if (lse == 15)
|
||||
lse = ppd->link_speed_supported;
|
||||
set_link_speed_enabled(ppd,
|
||||
ppd->link_speed_supported);
|
||||
else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
else if (lse != ppd->link_speed_enabled)
|
||||
|
@ -849,7 +851,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
if (clientrereg)
|
||||
pip->clientrereg_resv_subnetto |= 0x80;
|
||||
|
||||
goto done;
|
||||
goto get_only;
|
||||
|
||||
err:
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
|
|
|
@ -79,6 +79,8 @@
|
|||
extern const char *const qib_qsfp_devtech[16];
|
||||
/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
|
||||
#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
|
||||
/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
|
||||
#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
|
||||
/* Attenuation should be valid for copper other than full/near Eq */
|
||||
#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
|
||||
/* Length is only valid if technology is "copper" */
|
||||
|
|
|
@ -123,6 +123,7 @@ enum {
|
|||
ULP_MODE_NONE = 0,
|
||||
ULP_MODE_ISCSI = 2,
|
||||
ULP_MODE_RDMA = 4,
|
||||
ULP_MODE_TCPDDP = 5,
|
||||
ULP_MODE_FCOE = 6,
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue