RDMA/cma: Remove padding arrays by using struct sockaddr_storage

There are a few places where the RDMA CM code handles IPv6 by doing

	struct sockaddr		addr;
	u8			pad[sizeof(struct sockaddr_in6) -
				    sizeof(struct sockaddr)];

This is fragile and ugly; handle this in a better way with just

	struct sockaddr_storage	addr;

[ Also roll in patch from Aleksey Senin <alekseys@voltaire.com> to
  switch to struct sockaddr_storage and get rid of padding arrays in
  struct rdma_addr. ]

Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Roland Dreier 2008-08-04 11:02:14 -07:00
parent 6e86841d05
commit 3f44675439
3 changed files with 26 additions and 33 deletions

View File

@ -155,9 +155,7 @@ struct cma_multicast {
} multicast; } multicast;
struct list_head list; struct list_head list;
void *context; void *context;
struct sockaddr addr; struct sockaddr_storage addr;
u8 pad[sizeof(struct sockaddr_in6) -
sizeof(struct sockaddr)];
}; };
struct cma_work { struct cma_work {
@ -786,8 +784,8 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
cma_cancel_route(id_priv); cma_cancel_route(id_priv);
break; break;
case CMA_LISTEN: case CMA_LISTEN:
if (cma_any_addr(&id_priv->id.route.addr.src_addr) && if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
!id_priv->cma_dev) && !id_priv->cma_dev)
cma_cancel_listens(id_priv); cma_cancel_listens(id_priv);
break; break;
default: default:
@ -1026,7 +1024,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
ret = rdma_translate_ip(&id->route.addr.src_addr, ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
&id->route.addr.dev_addr); &id->route.addr.dev_addr);
if (ret) if (ret)
goto destroy_id; goto destroy_id;
@ -1064,7 +1062,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
cma_save_net_info(&id->route.addr, &listen_id->route.addr, cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst); ip_ver, port, src, dst);
ret = rdma_translate_ip(&id->route.addr.src_addr, ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
&id->route.addr.dev_addr); &id->route.addr.dev_addr);
if (ret) if (ret)
goto err; goto err;
@ -1377,7 +1375,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
if (IS_ERR(id_priv->cm_id.ib)) if (IS_ERR(id_priv->cm_id.ib))
return PTR_ERR(id_priv->cm_id.ib); return PTR_ERR(id_priv->cm_id.ib);
addr = &id_priv->id.route.addr.src_addr; addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
svc_id = cma_get_service_id(id_priv->id.ps, addr); svc_id = cma_get_service_id(id_priv->id.ps, addr);
if (cma_any_addr(addr)) if (cma_any_addr(addr))
ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
@ -1443,7 +1441,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
dev_id_priv->state = CMA_ADDR_BOUND; dev_id_priv->state = CMA_ADDR_BOUND;
memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
ip_addr_size(&id_priv->id.route.addr.src_addr)); ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
cma_attach_to_dev(dev_id_priv, cma_dev); cma_attach_to_dev(dev_id_priv, cma_dev);
list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
@ -1563,13 +1561,14 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
path_rec.numb_path = 1; path_rec.numb_path = 1;
path_rec.reversible = 1; path_rec.reversible = 1;
path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); path_rec.service_id = cma_get_service_id(id_priv->id.ps,
(struct sockaddr *) &addr->dst_addr);
comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
if (addr->src_addr.sa_family == AF_INET) { if (addr->src_addr.ss_family == AF_INET) {
path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
comp_mask |= IB_SA_PATH_REC_QOS_CLASS; comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
} else { } else {
@ -1848,7 +1847,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
src_in->sin_family = dst_in->sin_family; src_in->sin_family = dst_in->sin_family;
@ -1897,7 +1896,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (cma_any_addr(dst_addr)) if (cma_any_addr(dst_addr))
ret = cma_resolve_loopback(id_priv); ret = cma_resolve_loopback(id_priv);
else else
ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
dst_addr, &id->route.addr.dev_addr, dst_addr, &id->route.addr.dev_addr,
timeout_ms, addr_handler, id_priv); timeout_ms, addr_handler, id_priv);
if (ret) if (ret)
@ -2021,11 +2020,11 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
* We don't support binding to any address if anyone is bound to * We don't support binding to any address if anyone is bound to
* a specific address on the same port. * a specific address on the same port.
*/ */
if (cma_any_addr(&id_priv->id.route.addr.src_addr)) if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
if (cma_any_addr(&cur_id->id.route.addr.src_addr)) if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
@ -2060,7 +2059,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
} }
mutex_lock(&lock); mutex_lock(&lock);
if (cma_any_port(&id_priv->id.route.addr.src_addr)) if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
ret = cma_alloc_any_port(ps, id_priv); ret = cma_alloc_any_port(ps, id_priv);
else else
ret = cma_use_port(ps, id_priv); ret = cma_use_port(ps, id_priv);
@ -2232,7 +2231,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.path = route->path_rec; req.path = route->path_rec;
req.service_id = cma_get_service_id(id_priv->id.ps, req.service_id = cma_get_service_id(id_priv->id.ps,
&route->addr.dst_addr); (struct sockaddr *) &route->addr.dst_addr);
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req.max_cm_retries = CMA_MAX_CM_RETRIES; req.max_cm_retries = CMA_MAX_CM_RETRIES;
@ -2283,7 +2282,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.alternate_path = &route->path_rec[1]; req.alternate_path = &route->path_rec[1];
req.service_id = cma_get_service_id(id_priv->id.ps, req.service_id = cma_get_service_id(id_priv->id.ps,
&route->addr.dst_addr); (struct sockaddr *) &route->addr.dst_addr);
req.qp_num = id_priv->qp_num; req.qp_num = id_priv->qp_num;
req.qp_type = IB_QPT_RC; req.qp_type = IB_QPT_RC;
req.starting_psn = id_priv->seq_num; req.starting_psn = id_priv->seq_num;
@ -2667,7 +2666,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (ret) if (ret)
return ret; return ret;
cma_set_mgid(id_priv, &mc->addr, &rec.mgid); cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
if (id_priv->id.ps == RDMA_PS_UDP) if (id_priv->id.ps == RDMA_PS_UDP)
rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
ib_addr_get_sgid(dev_addr, &rec.port_gid); ib_addr_get_sgid(dev_addr, &rec.port_gid);

View File

@ -81,9 +81,7 @@ struct ucma_multicast {
u64 uid; u64 uid;
struct list_head list; struct list_head list;
struct sockaddr addr; struct sockaddr_storage addr;
u8 pad[sizeof(struct sockaddr_in6) -
sizeof(struct sockaddr)];
}; };
struct ucma_event { struct ucma_event {
@ -603,11 +601,11 @@ static ssize_t ucma_query_route(struct ucma_file *file,
return PTR_ERR(ctx); return PTR_ERR(ctx);
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
addr = &ctx->cm_id->route.addr.src_addr; addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6)); sizeof(struct sockaddr_in6));
addr = &ctx->cm_id->route.addr.dst_addr; addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6)); sizeof(struct sockaddr_in6));
@ -913,7 +911,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
mc->uid = cmd.uid; mc->uid = cmd.uid;
memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
if (ret) if (ret)
goto err2; goto err2;
@ -929,7 +927,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
return 0; return 0;
err3: err3:
rdma_leave_multicast(ctx->cm_id, &mc->addr); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
err2: err2:
mutex_lock(&mut); mutex_lock(&mut);
@ -975,7 +973,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
goto out; goto out;
} }
rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_lock(&mc->ctx->file->mut); mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
list_del(&mc->list); list_del(&mc->list);

View File

@ -71,12 +71,8 @@ enum rdma_port_space {
}; };
struct rdma_addr { struct rdma_addr {
struct sockaddr src_addr; struct sockaddr_storage src_addr;
u8 src_pad[sizeof(struct sockaddr_in6) - struct sockaddr_storage dst_addr;
sizeof(struct sockaddr)];
struct sockaddr dst_addr;
u8 dst_pad[sizeof(struct sockaddr_in6) -
sizeof(struct sockaddr)];
struct rdma_dev_addr dev_addr; struct rdma_dev_addr dev_addr;
}; };