2 * Bridge multicast support.
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/if_ether.h>
16 #include <linux/igmp.h>
17 #include <linux/jhash.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/netdevice.h>
21 #include <linux/netfilter_bridge.h>
22 #include <linux/random.h>
23 #include <linux/rculist.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <linux/inetdevice.h>
28 #include <linux/mroute.h>
30 #include <net/switchdev.h>
31 #if IS_ENABLED(CONFIG_IPV6)
34 #include <net/ip6_checksum.h>
35 #include <net/addrconf.h>
38 #include "br_private.h"
40 static const struct rhashtable_params br_mdb_rht_params = {
41 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
42 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
43 .key_len = sizeof(struct br_ip),
44 .automatic_shrinking = true,
48 static void br_multicast_start_querier(struct net_bridge *br,
49 struct bridge_mcast_own_query *query);
50 static void br_multicast_add_router(struct net_bridge *br,
51 struct net_bridge_port *port);
52 static void br_ip4_multicast_leave_group(struct net_bridge *br,
53 struct net_bridge_port *port,
56 const unsigned char *src);
58 static void __del_port_router(struct net_bridge_port *p);
59 #if IS_ENABLED(CONFIG_IPV6)
60 static void br_ip6_multicast_leave_group(struct net_bridge *br,
61 struct net_bridge_port *port,
62 const struct in6_addr *group,
63 __u16 vid, const unsigned char *src);
66 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
68 if (a->proto != b->proto)
74 return a->u.ip4 == b->u.ip4;
75 #if IS_ENABLED(CONFIG_IPV6)
76 case htons(ETH_P_IPV6):
77 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
83 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
86 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
89 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
92 struct net_bridge_mdb_entry *ent;
94 lockdep_assert_held_once(&br->multicast_lock);
97 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
103 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
104 __be32 dst, __u16 vid)
108 memset(&br_dst, 0, sizeof(br_dst));
110 br_dst.proto = htons(ETH_P_IP);
113 return br_mdb_ip_get(br, &br_dst);
116 #if IS_ENABLED(CONFIG_IPV6)
117 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
118 const struct in6_addr *dst,
123 memset(&br_dst, 0, sizeof(br_dst));
125 br_dst.proto = htons(ETH_P_IPV6);
128 return br_mdb_ip_get(br, &br_dst);
132 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
133 struct sk_buff *skb, u16 vid)
137 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
140 if (BR_INPUT_SKB_CB(skb)->igmp)
143 memset(&ip, 0, sizeof(ip));
144 ip.proto = skb->protocol;
147 switch (skb->protocol) {
148 case htons(ETH_P_IP):
149 ip.u.ip4 = ip_hdr(skb)->daddr;
151 #if IS_ENABLED(CONFIG_IPV6)
152 case htons(ETH_P_IPV6):
153 ip.u.ip6 = ipv6_hdr(skb)->daddr;
160 return br_mdb_ip_get_rcu(br, &ip);
163 static void br_multicast_group_expired(struct timer_list *t)
165 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
166 struct net_bridge *br = mp->br;
168 spin_lock(&br->multicast_lock);
169 if (!netif_running(br->dev) || timer_pending(&mp->timer))
172 mp->host_joined = false;
173 br_mdb_notify(br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
178 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
180 hlist_del_rcu(&mp->mdb_node);
185 spin_unlock(&br->multicast_lock);
188 static void br_multicast_del_pg(struct net_bridge *br,
189 struct net_bridge_port_group *pg)
191 struct net_bridge_mdb_entry *mp;
192 struct net_bridge_port_group *p;
193 struct net_bridge_port_group __rcu **pp;
195 mp = br_mdb_ip_get(br, &pg->addr);
199 for (pp = &mp->ports;
200 (p = mlock_dereference(*pp, br)) != NULL;
205 rcu_assign_pointer(*pp, p->next);
206 hlist_del_init(&p->mglist);
207 del_timer(&p->timer);
208 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
212 if (!mp->ports && !mp->host_joined &&
213 netif_running(br->dev))
214 mod_timer(&mp->timer, jiffies);
222 static void br_multicast_port_group_expired(struct timer_list *t)
224 struct net_bridge_port_group *pg = from_timer(pg, t, timer);
225 struct net_bridge *br = pg->port->br;
227 spin_lock(&br->multicast_lock);
228 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
229 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
232 br_multicast_del_pg(br, pg);
235 spin_unlock(&br->multicast_lock);
238 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
242 struct igmpv3_query *ihv3;
243 size_t igmp_hdr_size;
249 igmp_hdr_size = sizeof(*ih);
250 if (br->multicast_igmp_version == 3)
251 igmp_hdr_size = sizeof(*ihv3);
252 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
257 skb->protocol = htons(ETH_P_IP);
259 skb_reset_mac_header(skb);
262 ether_addr_copy(eth->h_source, br->dev->dev_addr);
265 eth->h_dest[2] = 0x5e;
269 eth->h_proto = htons(ETH_P_IP);
270 skb_put(skb, sizeof(*eth));
272 skb_set_network_header(skb, skb->len);
278 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
280 iph->frag_off = htons(IP_DF);
282 iph->protocol = IPPROTO_IGMP;
283 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
284 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
285 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
286 ((u8 *)&iph[1])[0] = IPOPT_RA;
287 ((u8 *)&iph[1])[1] = 4;
288 ((u8 *)&iph[1])[2] = 0;
289 ((u8 *)&iph[1])[3] = 0;
293 skb_set_transport_header(skb, skb->len);
294 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
296 switch (br->multicast_igmp_version) {
299 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
300 ih->code = (group ? br->multicast_last_member_interval :
301 br->multicast_query_response_interval) /
302 (HZ / IGMP_TIMER_SCALE);
305 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
308 ihv3 = igmpv3_query_hdr(skb);
309 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
310 ihv3->code = (group ? br->multicast_last_member_interval :
311 br->multicast_query_response_interval) /
312 (HZ / IGMP_TIMER_SCALE);
314 ihv3->qqic = br->multicast_query_interval / HZ;
320 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
324 skb_put(skb, igmp_hdr_size);
325 __skb_pull(skb, sizeof(*eth));
331 #if IS_ENABLED(CONFIG_IPV6)
332 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
333 const struct in6_addr *grp,
336 struct mld2_query *mld2q;
337 unsigned long interval;
338 struct ipv6hdr *ip6h;
339 struct mld_msg *mldq;
345 mld_hdr_size = sizeof(*mldq);
346 if (br->multicast_mld_version == 2)
347 mld_hdr_size = sizeof(*mld2q);
348 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
353 skb->protocol = htons(ETH_P_IPV6);
355 /* Ethernet header */
356 skb_reset_mac_header(skb);
359 ether_addr_copy(eth->h_source, br->dev->dev_addr);
360 eth->h_proto = htons(ETH_P_IPV6);
361 skb_put(skb, sizeof(*eth));
363 /* IPv6 header + HbH option */
364 skb_set_network_header(skb, skb->len);
365 ip6h = ipv6_hdr(skb);
367 *(__force __be32 *)ip6h = htonl(0x60000000);
368 ip6h->payload_len = htons(8 + mld_hdr_size);
369 ip6h->nexthdr = IPPROTO_HOPOPTS;
371 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
372 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
375 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
379 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
380 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
382 hopopt = (u8 *)(ip6h + 1);
383 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
384 hopopt[1] = 0; /* length of HbH */
385 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
386 hopopt[3] = 2; /* Length of RA Option */
387 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
389 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
390 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
392 skb_put(skb, sizeof(*ip6h) + 8);
395 skb_set_transport_header(skb, skb->len);
396 interval = ipv6_addr_any(grp) ?
397 br->multicast_query_response_interval :
398 br->multicast_last_member_interval;
399 *igmp_type = ICMPV6_MGM_QUERY;
400 switch (br->multicast_mld_version) {
402 mldq = (struct mld_msg *)icmp6_hdr(skb);
403 mldq->mld_type = ICMPV6_MGM_QUERY;
406 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
407 mldq->mld_reserved = 0;
408 mldq->mld_mca = *grp;
409 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
410 sizeof(*mldq), IPPROTO_ICMPV6,
416 mld2q = (struct mld2_query *)icmp6_hdr(skb);
417 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
418 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
419 mld2q->mld2q_code = 0;
420 mld2q->mld2q_cksum = 0;
421 mld2q->mld2q_resv1 = 0;
422 mld2q->mld2q_resv2 = 0;
423 mld2q->mld2q_suppress = 0;
424 mld2q->mld2q_qrv = 2;
425 mld2q->mld2q_nsrcs = 0;
426 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
427 mld2q->mld2q_mca = *grp;
428 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
436 skb_put(skb, mld_hdr_size);
438 __skb_pull(skb, sizeof(*eth));
445 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
449 switch (addr->proto) {
450 case htons(ETH_P_IP):
451 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
452 #if IS_ENABLED(CONFIG_IPV6)
453 case htons(ETH_P_IPV6):
454 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
461 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
464 struct net_bridge_mdb_entry *mp;
467 mp = br_mdb_ip_get(br, group);
471 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
472 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
473 return ERR_PTR(-E2BIG);
476 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
478 return ERR_PTR(-ENOMEM);
482 timer_setup(&mp->timer, br_multicast_group_expired, 0);
483 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
489 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
495 struct net_bridge_port_group *br_multicast_new_port_group(
496 struct net_bridge_port *port,
498 struct net_bridge_port_group __rcu *next,
500 const unsigned char *src)
502 struct net_bridge_port_group *p;
504 p = kzalloc(sizeof(*p), GFP_ATOMIC);
511 rcu_assign_pointer(p->next, next);
512 hlist_add_head(&p->mglist, &port->mglist);
513 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
516 memcpy(p->eth_addr, src, ETH_ALEN);
518 memset(p->eth_addr, 0xff, ETH_ALEN);
523 static bool br_port_group_equal(struct net_bridge_port_group *p,
524 struct net_bridge_port *port,
525 const unsigned char *src)
530 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
533 return ether_addr_equal(src, p->eth_addr);
536 static int br_multicast_add_group(struct net_bridge *br,
537 struct net_bridge_port *port,
539 const unsigned char *src)
541 struct net_bridge_port_group __rcu **pp;
542 struct net_bridge_port_group *p;
543 struct net_bridge_mdb_entry *mp;
544 unsigned long now = jiffies;
547 spin_lock(&br->multicast_lock);
548 if (!netif_running(br->dev) ||
549 (port && port->state == BR_STATE_DISABLED))
552 mp = br_multicast_new_group(br, group);
558 if (!mp->host_joined) {
559 mp->host_joined = true;
560 br_mdb_notify(br->dev, NULL, &mp->addr, RTM_NEWMDB, 0);
562 mod_timer(&mp->timer, now + br->multicast_membership_interval);
566 for (pp = &mp->ports;
567 (p = mlock_dereference(*pp, br)) != NULL;
569 if (br_port_group_equal(p, port, src))
571 if ((unsigned long)p->port < (unsigned long)port)
575 p = br_multicast_new_port_group(port, group, *pp, 0, src);
578 rcu_assign_pointer(*pp, p);
579 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
582 mod_timer(&p->timer, now + br->multicast_membership_interval);
587 spin_unlock(&br->multicast_lock);
591 static int br_ip4_multicast_add_group(struct net_bridge *br,
592 struct net_bridge_port *port,
595 const unsigned char *src)
597 struct br_ip br_group;
599 if (ipv4_is_local_multicast(group))
602 br_group.u.ip4 = group;
603 br_group.proto = htons(ETH_P_IP);
606 return br_multicast_add_group(br, port, &br_group, src);
609 #if IS_ENABLED(CONFIG_IPV6)
610 static int br_ip6_multicast_add_group(struct net_bridge *br,
611 struct net_bridge_port *port,
612 const struct in6_addr *group,
614 const unsigned char *src)
616 struct br_ip br_group;
618 if (ipv6_addr_is_ll_all_nodes(group))
621 memset(&br_group, 0, sizeof(br_group));
622 br_group.u.ip6 = *group;
623 br_group.proto = htons(ETH_P_IPV6);
626 return br_multicast_add_group(br, port, &br_group, src);
630 static void br_multicast_router_expired(struct timer_list *t)
632 struct net_bridge_port *port =
633 from_timer(port, t, multicast_router_timer);
634 struct net_bridge *br = port->br;
636 spin_lock(&br->multicast_lock);
637 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
638 port->multicast_router == MDB_RTR_TYPE_PERM ||
639 timer_pending(&port->multicast_router_timer))
642 __del_port_router(port);
644 spin_unlock(&br->multicast_lock);
647 static void br_mc_router_state_change(struct net_bridge *p,
650 struct switchdev_attr attr = {
652 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
653 .flags = SWITCHDEV_F_DEFER,
654 .u.mrouter = is_mc_router,
657 switchdev_port_attr_set(p->dev, &attr);
660 static void br_multicast_local_router_expired(struct timer_list *t)
662 struct net_bridge *br = from_timer(br, t, multicast_router_timer);
664 spin_lock(&br->multicast_lock);
665 if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
666 br->multicast_router == MDB_RTR_TYPE_PERM ||
667 timer_pending(&br->multicast_router_timer))
670 br_mc_router_state_change(br, false);
672 spin_unlock(&br->multicast_lock);
675 static void br_multicast_querier_expired(struct net_bridge *br,
676 struct bridge_mcast_own_query *query)
678 spin_lock(&br->multicast_lock);
679 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
682 br_multicast_start_querier(br, query);
685 spin_unlock(&br->multicast_lock);
688 static void br_ip4_multicast_querier_expired(struct timer_list *t)
690 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
692 br_multicast_querier_expired(br, &br->ip4_own_query);
695 #if IS_ENABLED(CONFIG_IPV6)
696 static void br_ip6_multicast_querier_expired(struct timer_list *t)
698 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
700 br_multicast_querier_expired(br, &br->ip6_own_query);
704 static void br_multicast_select_own_querier(struct net_bridge *br,
708 if (ip->proto == htons(ETH_P_IP))
709 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
710 #if IS_ENABLED(CONFIG_IPV6)
712 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
716 static void __br_multicast_send_query(struct net_bridge *br,
717 struct net_bridge_port *port,
723 skb = br_multicast_alloc_query(br, ip, &igmp_type);
728 skb->dev = port->dev;
729 br_multicast_count(br, port, skb, igmp_type,
731 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
732 dev_net(port->dev), NULL, skb, NULL, skb->dev,
733 br_dev_queue_push_xmit);
735 br_multicast_select_own_querier(br, ip, skb);
736 br_multicast_count(br, port, skb, igmp_type,
742 static void br_multicast_send_query(struct net_bridge *br,
743 struct net_bridge_port *port,
744 struct bridge_mcast_own_query *own_query)
746 struct bridge_mcast_other_query *other_query = NULL;
747 struct br_ip br_group;
750 if (!netif_running(br->dev) ||
751 !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
752 !br_opt_get(br, BROPT_MULTICAST_QUERIER))
755 memset(&br_group.u, 0, sizeof(br_group.u));
757 if (port ? (own_query == &port->ip4_own_query) :
758 (own_query == &br->ip4_own_query)) {
759 other_query = &br->ip4_other_query;
760 br_group.proto = htons(ETH_P_IP);
761 #if IS_ENABLED(CONFIG_IPV6)
763 other_query = &br->ip6_other_query;
764 br_group.proto = htons(ETH_P_IPV6);
768 if (!other_query || timer_pending(&other_query->timer))
771 __br_multicast_send_query(br, port, &br_group);
774 time += own_query->startup_sent < br->multicast_startup_query_count ?
775 br->multicast_startup_query_interval :
776 br->multicast_query_interval;
777 mod_timer(&own_query->timer, time);
781 br_multicast_port_query_expired(struct net_bridge_port *port,
782 struct bridge_mcast_own_query *query)
784 struct net_bridge *br = port->br;
786 spin_lock(&br->multicast_lock);
787 if (port->state == BR_STATE_DISABLED ||
788 port->state == BR_STATE_BLOCKING)
791 if (query->startup_sent < br->multicast_startup_query_count)
792 query->startup_sent++;
794 br_multicast_send_query(port->br, port, query);
797 spin_unlock(&br->multicast_lock);
800 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
802 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
804 br_multicast_port_query_expired(port, &port->ip4_own_query);
807 #if IS_ENABLED(CONFIG_IPV6)
808 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
810 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
812 br_multicast_port_query_expired(port, &port->ip6_own_query);
816 static void br_mc_disabled_update(struct net_device *dev, bool value)
818 struct switchdev_attr attr = {
820 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
821 .flags = SWITCHDEV_F_DEFER,
822 .u.mc_disabled = !value,
825 switchdev_port_attr_set(dev, &attr);
828 int br_multicast_add_port(struct net_bridge_port *port)
830 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
832 timer_setup(&port->multicast_router_timer,
833 br_multicast_router_expired, 0);
834 timer_setup(&port->ip4_own_query.timer,
835 br_ip4_multicast_port_query_expired, 0);
836 #if IS_ENABLED(CONFIG_IPV6)
837 timer_setup(&port->ip6_own_query.timer,
838 br_ip6_multicast_port_query_expired, 0);
840 br_mc_disabled_update(port->dev,
841 br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
843 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
844 if (!port->mcast_stats)
850 void br_multicast_del_port(struct net_bridge_port *port)
852 struct net_bridge *br = port->br;
853 struct net_bridge_port_group *pg;
854 struct hlist_node *n;
856 /* Take care of the remaining groups, only perm ones should be left */
857 spin_lock_bh(&br->multicast_lock);
858 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
859 br_multicast_del_pg(br, pg);
860 spin_unlock_bh(&br->multicast_lock);
861 del_timer_sync(&port->multicast_router_timer);
862 free_percpu(port->mcast_stats);
865 static void br_multicast_enable(struct bridge_mcast_own_query *query)
867 query->startup_sent = 0;
869 if (try_to_del_timer_sync(&query->timer) >= 0 ||
870 del_timer(&query->timer))
871 mod_timer(&query->timer, jiffies);
874 static void __br_multicast_enable_port(struct net_bridge_port *port)
876 struct net_bridge *br = port->br;
878 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
881 br_multicast_enable(&port->ip4_own_query);
882 #if IS_ENABLED(CONFIG_IPV6)
883 br_multicast_enable(&port->ip6_own_query);
885 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
886 hlist_unhashed(&port->rlist))
887 br_multicast_add_router(br, port);
890 void br_multicast_enable_port(struct net_bridge_port *port)
892 struct net_bridge *br = port->br;
894 spin_lock(&br->multicast_lock);
895 __br_multicast_enable_port(port);
896 spin_unlock(&br->multicast_lock);
899 void br_multicast_disable_port(struct net_bridge_port *port)
901 struct net_bridge *br = port->br;
902 struct net_bridge_port_group *pg;
903 struct hlist_node *n;
905 spin_lock(&br->multicast_lock);
906 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
907 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
908 br_multicast_del_pg(br, pg);
910 __del_port_router(port);
912 del_timer(&port->multicast_router_timer);
913 del_timer(&port->ip4_own_query.timer);
914 #if IS_ENABLED(CONFIG_IPV6)
915 del_timer(&port->ip6_own_query.timer);
917 spin_unlock(&br->multicast_lock);
920 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
921 struct net_bridge_port *port,
925 const unsigned char *src;
926 struct igmpv3_report *ih;
927 struct igmpv3_grec *grec;
935 ih = igmpv3_report_hdr(skb);
936 num = ntohs(ih->ngrec);
937 len = skb_transport_offset(skb) + sizeof(*ih);
939 for (i = 0; i < num; i++) {
940 len += sizeof(*grec);
941 if (!pskb_may_pull(skb, len))
944 grec = (void *)(skb->data + len - sizeof(*grec));
945 group = grec->grec_mca;
946 type = grec->grec_type;
948 len += ntohs(grec->grec_nsrcs) * 4;
949 if (!pskb_may_pull(skb, len))
952 /* We treat this as an IGMPv2 report for now. */
954 case IGMPV3_MODE_IS_INCLUDE:
955 case IGMPV3_MODE_IS_EXCLUDE:
956 case IGMPV3_CHANGE_TO_INCLUDE:
957 case IGMPV3_CHANGE_TO_EXCLUDE:
958 case IGMPV3_ALLOW_NEW_SOURCES:
959 case IGMPV3_BLOCK_OLD_SOURCES:
966 src = eth_hdr(skb)->h_source;
967 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
968 type == IGMPV3_MODE_IS_INCLUDE) &&
969 ntohs(grec->grec_nsrcs) == 0) {
970 br_ip4_multicast_leave_group(br, port, group, vid, src);
972 err = br_ip4_multicast_add_group(br, port, group, vid,
982 #if IS_ENABLED(CONFIG_IPV6)
983 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
984 struct net_bridge_port *port,
988 const unsigned char *src;
989 struct icmp6hdr *icmp6h;
990 struct mld2_grec *grec;
996 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
999 icmp6h = icmp6_hdr(skb);
1000 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1001 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1003 for (i = 0; i < num; i++) {
1004 __be16 *nsrcs, _nsrcs;
1006 nsrcs = skb_header_pointer(skb,
1007 len + offsetof(struct mld2_grec,
1009 sizeof(_nsrcs), &_nsrcs);
1013 if (!pskb_may_pull(skb,
1014 len + sizeof(*grec) +
1015 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1018 grec = (struct mld2_grec *)(skb->data + len);
1019 len += sizeof(*grec) +
1020 sizeof(struct in6_addr) * ntohs(*nsrcs);
1022 /* We treat these as MLDv1 reports for now. */
1023 switch (grec->grec_type) {
1024 case MLD2_MODE_IS_INCLUDE:
1025 case MLD2_MODE_IS_EXCLUDE:
1026 case MLD2_CHANGE_TO_INCLUDE:
1027 case MLD2_CHANGE_TO_EXCLUDE:
1028 case MLD2_ALLOW_NEW_SOURCES:
1029 case MLD2_BLOCK_OLD_SOURCES:
1036 src = eth_hdr(skb)->h_source;
1037 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1038 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1039 ntohs(*nsrcs) == 0) {
1040 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1043 err = br_ip6_multicast_add_group(br, port,
1044 &grec->grec_mca, vid,
1055 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1056 struct net_bridge_port *port,
1059 if (!timer_pending(&br->ip4_own_query.timer) &&
1060 !timer_pending(&br->ip4_other_query.timer))
1063 if (!br->ip4_querier.addr.u.ip4)
1066 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1072 br->ip4_querier.addr.u.ip4 = saddr;
1074 /* update protected by general multicast_lock by caller */
1075 rcu_assign_pointer(br->ip4_querier.port, port);
1080 #if IS_ENABLED(CONFIG_IPV6)
1081 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1082 struct net_bridge_port *port,
1083 struct in6_addr *saddr)
1085 if (!timer_pending(&br->ip6_own_query.timer) &&
1086 !timer_pending(&br->ip6_other_query.timer))
1089 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1095 br->ip6_querier.addr.u.ip6 = *saddr;
1097 /* update protected by general multicast_lock by caller */
1098 rcu_assign_pointer(br->ip6_querier.port, port);
1104 static bool br_multicast_select_querier(struct net_bridge *br,
1105 struct net_bridge_port *port,
1106 struct br_ip *saddr)
1108 switch (saddr->proto) {
1109 case htons(ETH_P_IP):
1110 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1111 #if IS_ENABLED(CONFIG_IPV6)
1112 case htons(ETH_P_IPV6):
1113 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1121 br_multicast_update_query_timer(struct net_bridge *br,
1122 struct bridge_mcast_other_query *query,
1123 unsigned long max_delay)
1125 if (!timer_pending(&query->timer))
1126 query->delay_time = jiffies + max_delay;
1128 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1131 static void br_port_mc_router_state_change(struct net_bridge_port *p,
1134 struct switchdev_attr attr = {
1136 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1137 .flags = SWITCHDEV_F_DEFER,
1138 .u.mrouter = is_mc_router,
1141 switchdev_port_attr_set(p->dev, &attr);
1145 * Add port to router_list
1146 * list is maintained ordered by pointer value
1147 * and locked by br->multicast_lock and RCU
1149 static void br_multicast_add_router(struct net_bridge *br,
1150 struct net_bridge_port *port)
1152 struct net_bridge_port *p;
1153 struct hlist_node *slot = NULL;
1155 if (!hlist_unhashed(&port->rlist))
1158 hlist_for_each_entry(p, &br->router_list, rlist) {
1159 if ((unsigned long) port >= (unsigned long) p)
1165 hlist_add_behind_rcu(&port->rlist, slot);
1167 hlist_add_head_rcu(&port->rlist, &br->router_list);
1168 br_rtr_notify(br->dev, port, RTM_NEWMDB);
1169 br_port_mc_router_state_change(port, true);
1172 static void br_multicast_mark_router(struct net_bridge *br,
1173 struct net_bridge_port *port)
1175 unsigned long now = jiffies;
1178 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1179 if (!timer_pending(&br->multicast_router_timer))
1180 br_mc_router_state_change(br, true);
1181 mod_timer(&br->multicast_router_timer,
1182 now + br->multicast_querier_interval);
1187 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1188 port->multicast_router == MDB_RTR_TYPE_PERM)
1191 br_multicast_add_router(br, port);
1193 mod_timer(&port->multicast_router_timer,
1194 now + br->multicast_querier_interval);
1197 static void br_multicast_query_received(struct net_bridge *br,
1198 struct net_bridge_port *port,
1199 struct bridge_mcast_other_query *query,
1200 struct br_ip *saddr,
1201 unsigned long max_delay)
1203 if (!br_multicast_select_querier(br, port, saddr))
1206 br_multicast_update_query_timer(br, query, max_delay);
1208 /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
1209 * the arrival port for IGMP Queries where the source address
1210 * is 0.0.0.0 should not be added to router port list.
1212 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
1213 saddr->proto == htons(ETH_P_IPV6))
1214 br_multicast_mark_router(br, port);
1217 static void br_ip4_multicast_query(struct net_bridge *br,
1218 struct net_bridge_port *port,
1219 struct sk_buff *skb,
1222 const struct iphdr *iph = ip_hdr(skb);
1223 struct igmphdr *ih = igmp_hdr(skb);
1224 struct net_bridge_mdb_entry *mp;
1225 struct igmpv3_query *ih3;
1226 struct net_bridge_port_group *p;
1227 struct net_bridge_port_group __rcu **pp;
1229 unsigned long max_delay;
1230 unsigned long now = jiffies;
1231 unsigned int offset = skb_transport_offset(skb);
1234 spin_lock(&br->multicast_lock);
1235 if (!netif_running(br->dev) ||
1236 (port && port->state == BR_STATE_DISABLED))
1241 if (skb->len == offset + sizeof(*ih)) {
1242 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1245 max_delay = 10 * HZ;
1248 } else if (skb->len >= offset + sizeof(*ih3)) {
1249 ih3 = igmpv3_query_hdr(skb);
1253 max_delay = ih3->code ?
1254 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1260 saddr.proto = htons(ETH_P_IP);
1261 saddr.u.ip4 = iph->saddr;
1263 br_multicast_query_received(br, port, &br->ip4_other_query,
1268 mp = br_mdb_ip4_get(br, group, vid);
1272 max_delay *= br->multicast_last_member_count;
1274 if (mp->host_joined &&
1275 (timer_pending(&mp->timer) ?
1276 time_after(mp->timer.expires, now + max_delay) :
1277 try_to_del_timer_sync(&mp->timer) >= 0))
1278 mod_timer(&mp->timer, now + max_delay);
1280 for (pp = &mp->ports;
1281 (p = mlock_dereference(*pp, br)) != NULL;
1283 if (timer_pending(&p->timer) ?
1284 time_after(p->timer.expires, now + max_delay) :
1285 try_to_del_timer_sync(&p->timer) >= 0)
1286 mod_timer(&p->timer, now + max_delay);
1290 spin_unlock(&br->multicast_lock);
1293 #if IS_ENABLED(CONFIG_IPV6)
1294 static int br_ip6_multicast_query(struct net_bridge *br,
1295 struct net_bridge_port *port,
1296 struct sk_buff *skb,
1299 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1300 struct mld_msg *mld;
1301 struct net_bridge_mdb_entry *mp;
1302 struct mld2_query *mld2q;
1303 struct net_bridge_port_group *p;
1304 struct net_bridge_port_group __rcu **pp;
1306 unsigned long max_delay;
1307 unsigned long now = jiffies;
1308 unsigned int offset = skb_transport_offset(skb);
1309 const struct in6_addr *group = NULL;
1310 bool is_general_query;
1313 spin_lock(&br->multicast_lock);
1314 if (!netif_running(br->dev) ||
1315 (port && port->state == BR_STATE_DISABLED))
1318 if (skb->len == offset + sizeof(*mld)) {
1319 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1323 mld = (struct mld_msg *) icmp6_hdr(skb);
1324 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1326 group = &mld->mld_mca;
1328 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1332 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1333 if (!mld2q->mld2q_nsrcs)
1334 group = &mld2q->mld2q_mca;
1336 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1339 is_general_query = group && ipv6_addr_any(group);
1341 if (is_general_query) {
1342 saddr.proto = htons(ETH_P_IPV6);
1343 saddr.u.ip6 = ip6h->saddr;
1345 br_multicast_query_received(br, port, &br->ip6_other_query,
1348 } else if (!group) {
1352 mp = br_mdb_ip6_get(br, group, vid);
1356 max_delay *= br->multicast_last_member_count;
1357 if (mp->host_joined &&
1358 (timer_pending(&mp->timer) ?
1359 time_after(mp->timer.expires, now + max_delay) :
1360 try_to_del_timer_sync(&mp->timer) >= 0))
1361 mod_timer(&mp->timer, now + max_delay);
1363 for (pp = &mp->ports;
1364 (p = mlock_dereference(*pp, br)) != NULL;
1366 if (timer_pending(&p->timer) ?
1367 time_after(p->timer.expires, now + max_delay) :
1368 try_to_del_timer_sync(&p->timer) >= 0)
1369 mod_timer(&p->timer, now + max_delay);
1373 spin_unlock(&br->multicast_lock);
1379 br_multicast_leave_group(struct net_bridge *br,
1380 struct net_bridge_port *port,
1381 struct br_ip *group,
1382 struct bridge_mcast_other_query *other_query,
1383 struct bridge_mcast_own_query *own_query,
1384 const unsigned char *src)
1386 struct net_bridge_mdb_entry *mp;
1387 struct net_bridge_port_group *p;
1391 spin_lock(&br->multicast_lock);
1392 if (!netif_running(br->dev) ||
1393 (port && port->state == BR_STATE_DISABLED))
1396 mp = br_mdb_ip_get(br, group);
1400 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1401 struct net_bridge_port_group __rcu **pp;
1403 for (pp = &mp->ports;
1404 (p = mlock_dereference(*pp, br)) != NULL;
1406 if (!br_port_group_equal(p, port, src))
1409 rcu_assign_pointer(*pp, p->next);
1410 hlist_del_init(&p->mglist);
1411 del_timer(&p->timer);
1413 br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1416 if (!mp->ports && !mp->host_joined &&
1417 netif_running(br->dev))
1418 mod_timer(&mp->timer, jiffies);
1423 if (timer_pending(&other_query->timer))
1426 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1427 __br_multicast_send_query(br, port, &mp->addr);
1429 time = jiffies + br->multicast_last_member_count *
1430 br->multicast_last_member_interval;
1432 mod_timer(&own_query->timer, time);
1434 for (p = mlock_dereference(mp->ports, br);
1436 p = mlock_dereference(p->next, br)) {
1437 if (!br_port_group_equal(p, port, src))
1440 if (!hlist_unhashed(&p->mglist) &&
1441 (timer_pending(&p->timer) ?
1442 time_after(p->timer.expires, time) :
1443 try_to_del_timer_sync(&p->timer) >= 0)) {
1444 mod_timer(&p->timer, time);
1452 time = now + br->multicast_last_member_count *
1453 br->multicast_last_member_interval;
1456 if (mp->host_joined &&
1457 (timer_pending(&mp->timer) ?
1458 time_after(mp->timer.expires, time) :
1459 try_to_del_timer_sync(&mp->timer) >= 0)) {
1460 mod_timer(&mp->timer, time);
1466 for (p = mlock_dereference(mp->ports, br);
1468 p = mlock_dereference(p->next, br)) {
1469 if (p->port != port)
1472 if (!hlist_unhashed(&p->mglist) &&
1473 (timer_pending(&p->timer) ?
1474 time_after(p->timer.expires, time) :
1475 try_to_del_timer_sync(&p->timer) >= 0)) {
1476 mod_timer(&p->timer, time);
1482 spin_unlock(&br->multicast_lock);
1485 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1486 struct net_bridge_port *port,
1489 const unsigned char *src)
1491 struct br_ip br_group;
1492 struct bridge_mcast_own_query *own_query;
1494 if (ipv4_is_local_multicast(group))
1497 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1499 br_group.u.ip4 = group;
1500 br_group.proto = htons(ETH_P_IP);
1503 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1507 #if IS_ENABLED(CONFIG_IPV6)
1508 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1509 struct net_bridge_port *port,
1510 const struct in6_addr *group,
1512 const unsigned char *src)
1514 struct br_ip br_group;
1515 struct bridge_mcast_own_query *own_query;
1517 if (ipv6_addr_is_ll_all_nodes(group))
1520 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1522 br_group.u.ip6 = *group;
1523 br_group.proto = htons(ETH_P_IPV6);
1526 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1531 static void br_multicast_err_count(const struct net_bridge *br,
1532 const struct net_bridge_port *p,
1535 struct bridge_mcast_stats __percpu *stats;
1536 struct bridge_mcast_stats *pstats;
1538 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1542 stats = p->mcast_stats;
1544 stats = br->mcast_stats;
1545 if (WARN_ON(!stats))
1548 pstats = this_cpu_ptr(stats);
1550 u64_stats_update_begin(&pstats->syncp);
1552 case htons(ETH_P_IP):
1553 pstats->mstats.igmp_parse_errors++;
1555 #if IS_ENABLED(CONFIG_IPV6)
1556 case htons(ETH_P_IPV6):
1557 pstats->mstats.mld_parse_errors++;
1561 u64_stats_update_end(&pstats->syncp);
1564 static void br_multicast_pim(struct net_bridge *br,
1565 struct net_bridge_port *port,
1566 const struct sk_buff *skb)
1568 unsigned int offset = skb_transport_offset(skb);
1569 struct pimhdr *pimhdr, _pimhdr;
1571 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1572 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1573 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1576 br_multicast_mark_router(br, port);
1579 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1580 struct net_bridge_port *port,
1581 struct sk_buff *skb,
1584 struct sk_buff *skb_trimmed = NULL;
1585 const unsigned char *src;
1589 err = ip_mc_check_igmp(skb, &skb_trimmed);
1591 if (err == -ENOMSG) {
1592 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1593 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1594 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1595 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1596 br_multicast_pim(br, port, skb);
1599 } else if (err < 0) {
1600 br_multicast_err_count(br, port, skb->protocol);
1605 src = eth_hdr(skb)->h_source;
1606 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1609 case IGMP_HOST_MEMBERSHIP_REPORT:
1610 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1611 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1612 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1614 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1615 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
1617 case IGMP_HOST_MEMBERSHIP_QUERY:
1618 br_ip4_multicast_query(br, port, skb_trimmed, vid);
1620 case IGMP_HOST_LEAVE_MESSAGE:
1621 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1625 if (skb_trimmed && skb_trimmed != skb)
1626 kfree_skb(skb_trimmed);
1628 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1634 #if IS_ENABLED(CONFIG_IPV6)
1635 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1636 struct net_bridge_port *port,
1637 struct sk_buff *skb,
1640 struct sk_buff *skb_trimmed = NULL;
1641 const unsigned char *src;
1642 struct mld_msg *mld;
1645 err = ipv6_mc_check_mld(skb, &skb_trimmed);
1647 if (err == -ENOMSG) {
1648 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1649 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1651 } else if (err < 0) {
1652 br_multicast_err_count(br, port, skb->protocol);
1656 mld = (struct mld_msg *)skb_transport_header(skb);
1657 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1659 switch (mld->mld_type) {
1660 case ICMPV6_MGM_REPORT:
1661 src = eth_hdr(skb)->h_source;
1662 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1663 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1666 case ICMPV6_MLD2_REPORT:
1667 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
1669 case ICMPV6_MGM_QUERY:
1670 err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
1672 case ICMPV6_MGM_REDUCTION:
1673 src = eth_hdr(skb)->h_source;
1674 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1678 if (skb_trimmed && skb_trimmed != skb)
1679 kfree_skb(skb_trimmed);
1681 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1688 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1689 struct sk_buff *skb, u16 vid)
1693 BR_INPUT_SKB_CB(skb)->igmp = 0;
1694 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1696 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1699 switch (skb->protocol) {
1700 case htons(ETH_P_IP):
1701 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1703 #if IS_ENABLED(CONFIG_IPV6)
1704 case htons(ETH_P_IPV6):
1705 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1713 static void br_multicast_query_expired(struct net_bridge *br,
1714 struct bridge_mcast_own_query *query,
1715 struct bridge_mcast_querier *querier)
1717 spin_lock(&br->multicast_lock);
1718 if (query->startup_sent < br->multicast_startup_query_count)
1719 query->startup_sent++;
1721 RCU_INIT_POINTER(querier->port, NULL);
1722 br_multicast_send_query(br, NULL, query);
1723 spin_unlock(&br->multicast_lock);
1726 static void br_ip4_multicast_query_expired(struct timer_list *t)
1728 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
1730 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1733 #if IS_ENABLED(CONFIG_IPV6)
1734 static void br_ip6_multicast_query_expired(struct timer_list *t)
1736 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
1738 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1742 void br_multicast_init(struct net_bridge *br)
1744 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
1746 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1747 br->multicast_last_member_count = 2;
1748 br->multicast_startup_query_count = 2;
1750 br->multicast_last_member_interval = HZ;
1751 br->multicast_query_response_interval = 10 * HZ;
1752 br->multicast_startup_query_interval = 125 * HZ / 4;
1753 br->multicast_query_interval = 125 * HZ;
1754 br->multicast_querier_interval = 255 * HZ;
1755 br->multicast_membership_interval = 260 * HZ;
1757 br->ip4_other_query.delay_time = 0;
1758 br->ip4_querier.port = NULL;
1759 br->multicast_igmp_version = 2;
1760 #if IS_ENABLED(CONFIG_IPV6)
1761 br->multicast_mld_version = 1;
1762 br->ip6_other_query.delay_time = 0;
1763 br->ip6_querier.port = NULL;
1765 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
1766 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
1768 spin_lock_init(&br->multicast_lock);
1769 timer_setup(&br->multicast_router_timer,
1770 br_multicast_local_router_expired, 0);
1771 timer_setup(&br->ip4_other_query.timer,
1772 br_ip4_multicast_querier_expired, 0);
1773 timer_setup(&br->ip4_own_query.timer,
1774 br_ip4_multicast_query_expired, 0);
1775 #if IS_ENABLED(CONFIG_IPV6)
1776 timer_setup(&br->ip6_other_query.timer,
1777 br_ip6_multicast_querier_expired, 0);
1778 timer_setup(&br->ip6_own_query.timer,
1779 br_ip6_multicast_query_expired, 0);
1781 INIT_HLIST_HEAD(&br->mdb_list);
1784 static void __br_multicast_open(struct net_bridge *br,
1785 struct bridge_mcast_own_query *query)
1787 query->startup_sent = 0;
1789 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1792 mod_timer(&query->timer, jiffies);
1795 void br_multicast_open(struct net_bridge *br)
1797 __br_multicast_open(br, &br->ip4_own_query);
1798 #if IS_ENABLED(CONFIG_IPV6)
1799 __br_multicast_open(br, &br->ip6_own_query);
1803 void br_multicast_stop(struct net_bridge *br)
1805 del_timer_sync(&br->multicast_router_timer);
1806 del_timer_sync(&br->ip4_other_query.timer);
1807 del_timer_sync(&br->ip4_own_query.timer);
1808 #if IS_ENABLED(CONFIG_IPV6)
1809 del_timer_sync(&br->ip6_other_query.timer);
1810 del_timer_sync(&br->ip6_own_query.timer);
1814 void br_multicast_dev_del(struct net_bridge *br)
1816 struct net_bridge_mdb_entry *mp;
1817 struct hlist_node *tmp;
1819 spin_lock_bh(&br->multicast_lock);
1820 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
1821 del_timer(&mp->timer);
1822 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
1824 hlist_del_rcu(&mp->mdb_node);
1827 spin_unlock_bh(&br->multicast_lock);
1832 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1836 spin_lock_bh(&br->multicast_lock);
1839 case MDB_RTR_TYPE_DISABLED:
1840 case MDB_RTR_TYPE_PERM:
1841 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
1842 del_timer(&br->multicast_router_timer);
1843 br->multicast_router = val;
1846 case MDB_RTR_TYPE_TEMP_QUERY:
1847 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
1848 br_mc_router_state_change(br, false);
1849 br->multicast_router = val;
1854 spin_unlock_bh(&br->multicast_lock);
1859 static void __del_port_router(struct net_bridge_port *p)
1861 if (hlist_unhashed(&p->rlist))
1863 hlist_del_init_rcu(&p->rlist);
1864 br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1865 br_port_mc_router_state_change(p, false);
1867 /* don't allow timer refresh */
1868 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1869 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1872 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1874 struct net_bridge *br = p->br;
1875 unsigned long now = jiffies;
1878 spin_lock(&br->multicast_lock);
1879 if (p->multicast_router == val) {
1880 /* Refresh the temp router port timer */
1881 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1882 mod_timer(&p->multicast_router_timer,
1883 now + br->multicast_querier_interval);
1888 case MDB_RTR_TYPE_DISABLED:
1889 p->multicast_router = MDB_RTR_TYPE_DISABLED;
1890 __del_port_router(p);
1891 del_timer(&p->multicast_router_timer);
1893 case MDB_RTR_TYPE_TEMP_QUERY:
1894 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1895 __del_port_router(p);
1897 case MDB_RTR_TYPE_PERM:
1898 p->multicast_router = MDB_RTR_TYPE_PERM;
1899 del_timer(&p->multicast_router_timer);
1900 br_multicast_add_router(br, p);
1902 case MDB_RTR_TYPE_TEMP:
1903 p->multicast_router = MDB_RTR_TYPE_TEMP;
1904 br_multicast_mark_router(br, p);
1911 spin_unlock(&br->multicast_lock);
1916 static void br_multicast_start_querier(struct net_bridge *br,
1917 struct bridge_mcast_own_query *query)
1919 struct net_bridge_port *port;
1921 __br_multicast_open(br, query);
1923 list_for_each_entry(port, &br->port_list, list) {
1924 if (port->state == BR_STATE_DISABLED ||
1925 port->state == BR_STATE_BLOCKING)
1928 if (query == &br->ip4_own_query)
1929 br_multicast_enable(&port->ip4_own_query);
1930 #if IS_ENABLED(CONFIG_IPV6)
1932 br_multicast_enable(&port->ip6_own_query);
1937 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1939 struct net_bridge_port *port;
1941 spin_lock_bh(&br->multicast_lock);
1942 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
1945 br_mc_disabled_update(br->dev, val);
1946 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
1947 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1950 if (!netif_running(br->dev))
1953 br_multicast_open(br);
1954 list_for_each_entry(port, &br->port_list, list)
1955 __br_multicast_enable_port(port);
1958 spin_unlock_bh(&br->multicast_lock);
1963 bool br_multicast_enabled(const struct net_device *dev)
1965 struct net_bridge *br = netdev_priv(dev);
1967 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
1969 EXPORT_SYMBOL_GPL(br_multicast_enabled);
1971 bool br_multicast_router(const struct net_device *dev)
1973 struct net_bridge *br = netdev_priv(dev);
1976 spin_lock_bh(&br->multicast_lock);
1977 is_router = br_multicast_is_router(br);
1978 spin_unlock_bh(&br->multicast_lock);
1981 EXPORT_SYMBOL_GPL(br_multicast_router);
1983 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1985 unsigned long max_delay;
1989 spin_lock_bh(&br->multicast_lock);
1990 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
1993 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
1997 max_delay = br->multicast_query_response_interval;
1999 if (!timer_pending(&br->ip4_other_query.timer))
2000 br->ip4_other_query.delay_time = jiffies + max_delay;
2002 br_multicast_start_querier(br, &br->ip4_own_query);
2004 #if IS_ENABLED(CONFIG_IPV6)
2005 if (!timer_pending(&br->ip6_other_query.timer))
2006 br->ip6_other_query.delay_time = jiffies + max_delay;
2008 br_multicast_start_querier(br, &br->ip6_own_query);
2012 spin_unlock_bh(&br->multicast_lock);
2017 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2019 /* Currently we support only version 2 and 3 */
2028 spin_lock_bh(&br->multicast_lock);
2029 br->multicast_igmp_version = val;
2030 spin_unlock_bh(&br->multicast_lock);
2035 #if IS_ENABLED(CONFIG_IPV6)
2036 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2038 /* Currently we support version 1 and 2 */
2047 spin_lock_bh(&br->multicast_lock);
2048 br->multicast_mld_version = val;
2049 spin_unlock_bh(&br->multicast_lock);
2056 * br_multicast_list_adjacent - Returns snooped multicast addresses
2057 * @dev: The bridge port adjacent to which to retrieve addresses
2058 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2060 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2061 * snooping feature on all bridge ports of dev's bridge device, excluding
2062 * the addresses from dev itself.
2064 * Returns the number of items added to br_ip_list.
2067 * - br_ip_list needs to be initialized by caller
2068 * - br_ip_list might contain duplicates in the end
2069 * (needs to be taken care of by caller)
2070 * - br_ip_list needs to be freed by caller
2072 int br_multicast_list_adjacent(struct net_device *dev,
2073 struct list_head *br_ip_list)
2075 struct net_bridge *br;
2076 struct net_bridge_port *port;
2077 struct net_bridge_port_group *group;
2078 struct br_ip_list *entry;
2082 if (!br_ip_list || !br_port_exists(dev))
2085 port = br_port_get_rcu(dev);
2086 if (!port || !port->br)
2091 list_for_each_entry_rcu(port, &br->port_list, list) {
2092 if (!port->dev || port->dev == dev)
2095 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2096 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2100 entry->addr = group->addr;
2101 list_add(&entry->list, br_ip_list);
2110 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2113 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2114 * @dev: The bridge port providing the bridge on which to check for a querier
2115 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2117 * Checks whether the given interface has a bridge on top and if so returns
2118 * true if a valid querier exists anywhere on the bridged link layer.
2119 * Otherwise returns false.
2121 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2123 struct net_bridge *br;
2124 struct net_bridge_port *port;
2129 if (!br_port_exists(dev))
2132 port = br_port_get_rcu(dev);
2133 if (!port || !port->br)
2138 memset(ð, 0, sizeof(eth));
2139 eth.h_proto = htons(proto);
2141 ret = br_multicast_querier_exists(br, ð);
2147 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2150 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2151 * @dev: The bridge port adjacent to which to check for a querier
2152 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2154 * Checks whether the given interface has a bridge on top and if so returns
2155 * true if a selected querier is behind one of the other ports of this
2156 * bridge. Otherwise returns false.
2158 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2160 struct net_bridge *br;
2161 struct net_bridge_port *port;
2165 if (!br_port_exists(dev))
2168 port = br_port_get_rcu(dev);
2169 if (!port || !port->br)
2176 if (!timer_pending(&br->ip4_other_query.timer) ||
2177 rcu_dereference(br->ip4_querier.port) == port)
2180 #if IS_ENABLED(CONFIG_IPV6)
2182 if (!timer_pending(&br->ip6_other_query.timer) ||
2183 rcu_dereference(br->ip6_querier.port) == port)
2196 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2198 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2199 const struct sk_buff *skb, u8 type, u8 dir)
2201 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2202 __be16 proto = skb->protocol;
2205 u64_stats_update_begin(&pstats->syncp);
2207 case htons(ETH_P_IP):
2208 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2210 case IGMP_HOST_MEMBERSHIP_REPORT:
2211 pstats->mstats.igmp_v1reports[dir]++;
2213 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2214 pstats->mstats.igmp_v2reports[dir]++;
2216 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2217 pstats->mstats.igmp_v3reports[dir]++;
2219 case IGMP_HOST_MEMBERSHIP_QUERY:
2220 if (t_len != sizeof(struct igmphdr)) {
2221 pstats->mstats.igmp_v3queries[dir]++;
2223 unsigned int offset = skb_transport_offset(skb);
2224 struct igmphdr *ih, _ihdr;
2226 ih = skb_header_pointer(skb, offset,
2227 sizeof(_ihdr), &_ihdr);
2231 pstats->mstats.igmp_v1queries[dir]++;
2233 pstats->mstats.igmp_v2queries[dir]++;
2236 case IGMP_HOST_LEAVE_MESSAGE:
2237 pstats->mstats.igmp_leaves[dir]++;
2241 #if IS_ENABLED(CONFIG_IPV6)
2242 case htons(ETH_P_IPV6):
2243 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2244 sizeof(struct ipv6hdr);
2245 t_len -= skb_network_header_len(skb);
2247 case ICMPV6_MGM_REPORT:
2248 pstats->mstats.mld_v1reports[dir]++;
2250 case ICMPV6_MLD2_REPORT:
2251 pstats->mstats.mld_v2reports[dir]++;
2253 case ICMPV6_MGM_QUERY:
2254 if (t_len != sizeof(struct mld_msg))
2255 pstats->mstats.mld_v2queries[dir]++;
2257 pstats->mstats.mld_v1queries[dir]++;
2259 case ICMPV6_MGM_REDUCTION:
2260 pstats->mstats.mld_leaves[dir]++;
2264 #endif /* CONFIG_IPV6 */
2266 u64_stats_update_end(&pstats->syncp);
2269 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2270 const struct sk_buff *skb, u8 type, u8 dir)
2272 struct bridge_mcast_stats __percpu *stats;
2274 /* if multicast_disabled is true then igmp type can't be set */
2275 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2279 stats = p->mcast_stats;
2281 stats = br->mcast_stats;
2282 if (WARN_ON(!stats))
2285 br_mcast_stats_add(stats, skb, type, dir);
2288 int br_multicast_init_stats(struct net_bridge *br)
2290 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2291 if (!br->mcast_stats)
2297 void br_multicast_uninit_stats(struct net_bridge *br)
2299 free_percpu(br->mcast_stats);
2302 static void mcast_stats_add_dir(u64 *dst, u64 *src)
2304 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2305 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2308 void br_multicast_get_stats(const struct net_bridge *br,
2309 const struct net_bridge_port *p,
2310 struct br_mcast_stats *dest)
2312 struct bridge_mcast_stats __percpu *stats;
2313 struct br_mcast_stats tdst;
2316 memset(dest, 0, sizeof(*dest));
2318 stats = p->mcast_stats;
2320 stats = br->mcast_stats;
2321 if (WARN_ON(!stats))
2324 memset(&tdst, 0, sizeof(tdst));
2325 for_each_possible_cpu(i) {
2326 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2327 struct br_mcast_stats temp;
2331 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2332 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2333 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2335 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2336 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2337 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2338 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2339 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2340 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2341 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2342 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2344 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2345 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2346 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2347 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2348 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2349 tdst.mld_parse_errors += temp.mld_parse_errors;
2351 memcpy(dest, &tdst, sizeof(*dest));
2354 int br_mdb_hash_init(struct net_bridge *br)
2356 return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
2359 void br_mdb_hash_fini(struct net_bridge *br)
2361 rhashtable_destroy(&br->mdb_hash_tbl);