OSDN Git Service

RDMA/cma: Consolidate the destruction of a cma_multicast in one place
authorJason Gunthorpe <jgg@nvidia.com>
Wed, 2 Sep 2020 08:11:21 +0000 (11:11 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 17 Sep 2020 12:09:24 +0000 (09:09 -0300)
Two places were open coding this sequence, and also pull in
cma_leave_roce_mc_group() which was called only once.

Link: https://lore.kernel.org/r/20200902081122.745412-8-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/core/cma.c

index 79d14fb..906717c 100644 (file)
@@ -1775,19 +1775,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
        mutex_unlock(&lock);
 }
 
-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
-                                   struct cma_multicast *mc)
+static void destroy_mc(struct rdma_id_private *id_priv,
+                      struct cma_multicast *mc)
 {
-       struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
-       struct net_device *ndev = NULL;
+       if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) {
+               ib_sa_free_multicast(mc->multicast.ib);
+               kfree(mc);
+               return;
+       }
 
-       if (dev_addr->bound_dev_if)
-               ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
-       if (ndev) {
-               cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
-               dev_put(ndev);
+       if (rdma_protocol_roce(id_priv->id.device,
+                                     id_priv->id.port_num)) {
+               struct rdma_dev_addr *dev_addr =
+                       &id_priv->id.route.addr.dev_addr;
+               struct net_device *ndev = NULL;
+
+               if (dev_addr->bound_dev_if)
+                       ndev = dev_get_by_index(dev_addr->net,
+                                               dev_addr->bound_dev_if);
+               if (ndev) {
+                       cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
+                       dev_put(ndev);
+               }
+               kref_put(&mc->mcref, release_mc);
        }
-       kref_put(&mc->mcref, release_mc);
 }
 
 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
@@ -1795,16 +1806,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
        struct cma_multicast *mc;
 
        while (!list_empty(&id_priv->mc_list)) {
-               mc = container_of(id_priv->mc_list.next,
-                                 struct cma_multicast, list);
+               mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
+                                     list);
                list_del(&mc->list);
-               if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
-                                     id_priv->id.port_num)) {
-                       ib_sa_free_multicast(mc->multicast.ib);
-                       kfree(mc);
-               } else {
-                       cma_leave_roce_mc_group(id_priv, mc);
-               }
+               destroy_mc(id_priv, mc);
        }
 }
 
@@ -4641,20 +4646,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
        id_priv = container_of(id, struct rdma_id_private, id);
        spin_lock_irq(&id_priv->lock);
        list_for_each_entry(mc, &id_priv->mc_list, list) {
-               if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
-                       list_del(&mc->list);
-                       spin_unlock_irq(&id_priv->lock);
-
-                       BUG_ON(id_priv->cma_dev->device != id->device);
+               if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
+                       continue;
+               list_del(&mc->list);
+               spin_unlock_irq(&id_priv->lock);
 
-                       if (rdma_cap_ib_mcast(id->device, id->port_num)) {
-                               ib_sa_free_multicast(mc->multicast.ib);
-                               kfree(mc);
-                       } else if (rdma_protocol_roce(id->device, id->port_num)) {
-                               cma_leave_roce_mc_group(id_priv, mc);
-                       }
-                       return;
-               }
+               WARN_ON(id_priv->cma_dev->device != id->device);
+               destroy_mc(id_priv, mc);
+               return;
        }
        spin_unlock_irq(&id_priv->lock);
 }