RDMA/cma: Split apart the multiple uses of the same list heads

Two list heads in the rdma_id_private are being used for multiple
purposes, to save a few bytes of memory. Give the different purposes
different names and union the memory that is clearly exclusive.

list splits into device_item and listen_any_item. device_item is threaded
onto the cma_device's list and listen_any goes onto the
listen_any_list. IDs doing any listen cannot have devices.

listen_list splits into listen_item and listen_list. listen_list is on the
parent listen any rdma_id_private and listen_item is on child listen that
is bound to a specific cma_dev.

Which name should be used in which case depends on the state and other
factors of the rdma_id_private. Remap all the confusing references to make
sense with the new names, so at least there is some hope of matching the
necessary preconditions with each access.

Link: https://lore.kernel.org/r/0-v1-a5ead4a0c19d+c3a-cma_list_head_jgg@nvidia.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe 2021-09-15 13:25:19 -03:00
parent c78d218fc5
commit 99cfddb8a8
2 changed files with 27 additions and 18 deletions

View File

@ -453,7 +453,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.device = cma_dev->device; id_priv->id.device = cma_dev->device;
id_priv->id.route.addr.dev_addr.transport = id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type); rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list); list_add_tail(&id_priv->device_item, &cma_dev->id_list);
trace_cm_id_attach(id_priv, cma_dev->device); trace_cm_id_attach(id_priv, cma_dev->device);
} }
@ -470,7 +470,7 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
static void cma_release_dev(struct rdma_id_private *id_priv) static void cma_release_dev(struct rdma_id_private *id_priv)
{ {
mutex_lock(&lock); mutex_lock(&lock);
list_del(&id_priv->list); list_del_init(&id_priv->device_item);
cma_dev_put(id_priv->cma_dev); cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL; id_priv->cma_dev = NULL;
id_priv->id.device = NULL; id_priv->id.device = NULL;
@ -854,6 +854,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
init_completion(&id_priv->comp); init_completion(&id_priv->comp);
refcount_set(&id_priv->refcount, 1); refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex); mutex_init(&id_priv->handler_mutex);
INIT_LIST_HEAD(&id_priv->device_item);
INIT_LIST_HEAD(&id_priv->listen_list); INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list); INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@ -1647,7 +1648,7 @@ static struct rdma_id_private *cma_find_listener(
return id_priv; return id_priv;
list_for_each_entry(id_priv_dev, list_for_each_entry(id_priv_dev,
&id_priv->listen_list, &id_priv->listen_list,
listen_list) { listen_item) {
if (id_priv_dev->id.device == cm_id->device && if (id_priv_dev->id.device == cm_id->device &&
cma_match_net_dev(&id_priv_dev->id, cma_match_net_dev(&id_priv_dev->id,
net_dev, req)) net_dev, req))
@ -1756,14 +1757,15 @@ static void _cma_cancel_listens(struct rdma_id_private *id_priv)
* Remove from listen_any_list to prevent added devices from spawning * Remove from listen_any_list to prevent added devices from spawning
* additional listen requests. * additional listen requests.
*/ */
list_del(&id_priv->list); list_del_init(&id_priv->listen_any_item);
while (!list_empty(&id_priv->listen_list)) { while (!list_empty(&id_priv->listen_list)) {
dev_id_priv = list_entry(id_priv->listen_list.next, dev_id_priv =
struct rdma_id_private, listen_list); list_first_entry(&id_priv->listen_list,
struct rdma_id_private, listen_item);
/* sync with device removal to avoid duplicate destruction */ /* sync with device removal to avoid duplicate destruction */
list_del_init(&dev_id_priv->list); list_del_init(&dev_id_priv->device_item);
list_del(&dev_id_priv->listen_list); list_del_init(&dev_id_priv->listen_item);
mutex_unlock(&lock); mutex_unlock(&lock);
rdma_destroy_id(&dev_id_priv->id); rdma_destroy_id(&dev_id_priv->id);
@ -2564,7 +2566,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
if (ret) if (ret)
goto err_listen; goto err_listen;
list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
return 0; return 0;
err_listen: err_listen:
/* Caller must destroy this after releasing lock */ /* Caller must destroy this after releasing lock */
@ -2580,13 +2582,13 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
int ret; int ret;
mutex_lock(&lock); mutex_lock(&lock);
list_add_tail(&id_priv->list, &listen_any_list); list_add_tail(&id_priv->listen_any_item, &listen_any_list);
list_for_each_entry(cma_dev, &dev_list, list) { list_for_each_entry(cma_dev, &dev_list, list) {
ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
if (ret) { if (ret) {
/* Prevent racing with cma_process_remove() */ /* Prevent racing with cma_process_remove() */
if (to_destroy) if (to_destroy)
list_del_init(&to_destroy->list); list_del_init(&to_destroy->device_item);
goto err_listen; goto err_listen;
} }
} }
@ -4895,7 +4897,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
mutex_lock(&lock); mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list) list_for_each_entry(cma_dev, &dev_list, list)
list_for_each_entry(id_priv, &cma_dev->id_list, list) { list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
ret = cma_netdev_change(ndev, id_priv); ret = cma_netdev_change(ndev, id_priv);
if (ret) if (ret)
goto out; goto out;
@ -4955,10 +4957,10 @@ static void cma_process_remove(struct cma_device *cma_dev)
mutex_lock(&lock); mutex_lock(&lock);
while (!list_empty(&cma_dev->id_list)) { while (!list_empty(&cma_dev->id_list)) {
struct rdma_id_private *id_priv = list_first_entry( struct rdma_id_private *id_priv = list_first_entry(
&cma_dev->id_list, struct rdma_id_private, list); &cma_dev->id_list, struct rdma_id_private, device_item);
list_del(&id_priv->listen_list); list_del_init(&id_priv->listen_item);
list_del_init(&id_priv->list); list_del_init(&id_priv->device_item);
cma_id_get(id_priv); cma_id_get(id_priv);
mutex_unlock(&lock); mutex_unlock(&lock);
@ -5035,7 +5037,7 @@ static int cma_add_one(struct ib_device *device)
mutex_lock(&lock); mutex_lock(&lock);
list_add_tail(&cma_dev->list, &dev_list); list_add_tail(&cma_dev->list, &dev_list);
list_for_each_entry(id_priv, &listen_any_list, list) { list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
if (ret) if (ret)
goto free_listen; goto free_listen;

View File

@ -55,8 +55,15 @@ struct rdma_id_private {
struct rdma_bind_list *bind_list; struct rdma_bind_list *bind_list;
struct hlist_node node; struct hlist_node node;
struct list_head list; /* listen_any_list or cma_device.list */ union {
struct list_head listen_list; /* per device listens */ struct list_head device_item; /* On cma_device->id_list */
struct list_head listen_any_item; /* On listen_any_list */
};
union {
/* On rdma_id_private->listen_list */
struct list_head listen_item;
struct list_head listen_list;
};
struct cma_device *cma_dev; struct cma_device *cma_dev;
struct list_head mc_list; struct list_head mc_list;