IB/Verbs: Reform cm related part in IB-core cma/ucm
Use raw management helpers to reform cm related part in IB-core cma/ucm. Few checks focus on the device cm type rather than the port capability, directly pass port 1 works currently, but can't support mixing cm type device in future. Signed-off-by: Michael Wang <yun.wang@profitbricks.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Tested-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Sean Hefty <sean.hefty@intel.com> Reviewed-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> Tested-by: Doug Ledford <dledford@redhat.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
55045b2577
commit
21655afc62
|
@ -735,8 +735,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
|||
int ret = 0;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_ib_or_iboe(id->device, id->port_num)) {
|
||||
if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
|
||||
ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
|
||||
else
|
||||
|
@ -745,19 +744,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
|||
|
||||
if (qp_attr->qp_state == IB_QPS_RTR)
|
||||
qp_attr->rq_psn = id_priv->seq_num;
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_protocol_iwarp(id->device, id->port_num)) {
|
||||
if (!id_priv->cm_id.iw) {
|
||||
qp_attr->qp_access_flags = 0;
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
|
||||
} else
|
||||
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
|
||||
qp_attr_mask);
|
||||
break;
|
||||
default:
|
||||
} else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1044,17 +1039,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
|
|||
mutex_unlock(&id_priv->handler_mutex);
|
||||
|
||||
if (id_priv->cma_dev) {
|
||||
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_ib_or_iboe(id_priv->id.device, 1)) {
|
||||
if (id_priv->cm_id.ib)
|
||||
ib_destroy_cm_id(id_priv->cm_id.ib);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_protocol_iwarp(id_priv->id.device, 1)) {
|
||||
if (id_priv->cm_id.iw)
|
||||
iw_destroy_cm_id(id_priv->cm_id.iw);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
cma_leave_mc_groups(id_priv);
|
||||
cma_release_dev(id_priv);
|
||||
|
@ -1633,7 +1623,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
|
|||
int ret;
|
||||
|
||||
if (cma_family(id_priv) == AF_IB &&
|
||||
rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
||||
!rdma_ib_or_iboe(cma_dev->device, 1))
|
||||
return;
|
||||
|
||||
id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
|
||||
|
@ -2035,7 +2025,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
|
|||
mutex_lock(&lock);
|
||||
list_for_each_entry(cur_dev, &dev_list, list) {
|
||||
if (cma_family(id_priv) == AF_IB &&
|
||||
rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
||||
!rdma_ib_or_iboe(cur_dev->device, 1))
|
||||
continue;
|
||||
|
||||
if (!cma_dev)
|
||||
|
@ -2067,7 +2057,7 @@ port_found:
|
|||
goto out;
|
||||
|
||||
id_priv->id.route.addr.dev_addr.dev_type =
|
||||
(rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
|
||||
(rdma_protocol_ib(cma_dev->device, p)) ?
|
||||
ARPHRD_INFINIBAND : ARPHRD_ETHER;
|
||||
|
||||
rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
|
||||
|
@ -2544,18 +2534,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
|
|||
|
||||
id_priv->backlog = backlog;
|
||||
if (id->device) {
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_ib_or_iboe(id->device, 1)) {
|
||||
ret = cma_ib_listen(id_priv);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_protocol_iwarp(id->device, 1)) {
|
||||
ret = cma_iw_listen(id_priv, backlog);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
} else {
|
||||
ret = -ENOSYS;
|
||||
goto err;
|
||||
}
|
||||
|
@ -2891,20 +2878,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|||
id_priv->srq = conn_param->srq;
|
||||
}
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_ib_or_iboe(id->device, id->port_num)) {
|
||||
if (id->qp_type == IB_QPT_UD)
|
||||
ret = cma_resolve_ib_udp(id_priv, conn_param);
|
||||
else
|
||||
ret = cma_connect_ib(id_priv, conn_param);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_protocol_iwarp(id->device, id->port_num))
|
||||
ret = cma_connect_iw(id_priv, conn_param);
|
||||
break;
|
||||
default:
|
||||
else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -3007,8 +2989,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|||
id_priv->srq = conn_param->srq;
|
||||
}
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_ib_or_iboe(id->device, id->port_num)) {
|
||||
if (id->qp_type == IB_QPT_UD) {
|
||||
if (conn_param)
|
||||
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
|
||||
|
@ -3024,14 +3005,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|||
else
|
||||
ret = cma_rep_recv(id_priv);
|
||||
}
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_protocol_iwarp(id->device, id->port_num))
|
||||
ret = cma_accept_iw(id_priv, conn_param);
|
||||
break;
|
||||
default:
|
||||
else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto reject;
|
||||
|
@ -3075,8 +3052,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
|||
if (!id_priv->cm_id.ib)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_ib_or_iboe(id->device, id->port_num)) {
|
||||
if (id->qp_type == IB_QPT_UD)
|
||||
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
|
||||
private_data, private_data_len);
|
||||
|
@ -3084,15 +3060,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
|||
ret = ib_send_cm_rej(id_priv->cm_id.ib,
|
||||
IB_CM_REJ_CONSUMER_DEFINED, NULL,
|
||||
0, private_data, private_data_len);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_protocol_iwarp(id->device, id->port_num)) {
|
||||
ret = iw_cm_reject(id_priv->cm_id.iw,
|
||||
private_data, private_data_len);
|
||||
break;
|
||||
default:
|
||||
} else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_reject);
|
||||
|
@ -3106,22 +3079,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
|
|||
if (!id_priv->cm_id.ib)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_ib_or_iboe(id->device, id->port_num)) {
|
||||
ret = cma_modify_qp_err(id_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
/* Initiate or respond to a disconnect. */
|
||||
if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
|
||||
ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_protocol_iwarp(id->device, id->port_num)) {
|
||||
ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
|
||||
break;
|
||||
default:
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device)
|
|||
dev_t base;
|
||||
struct ib_ucm_device *ucm_dev;
|
||||
|
||||
if (!device->alloc_ucontext ||
|
||||
rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
|
||||
if (!device->alloc_ucontext || !rdma_ib_or_iboe(device, 1))
|
||||
return;
|
||||
|
||||
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
|
||||
|
|
Loading…
Reference in New Issue