2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
58 struct list_head list;
61 struct sock *mroute6_sk;
62 struct timer_list ipmr_expire_timer;
63 struct list_head mfc6_unres_queue;
64 struct list_head mfc6_cache_array[MFC6_LINES];
65 struct mif_device vif6_table[MAXMIFS];
67 atomic_t cache_resolve_queue_len;
68 bool mroute_do_assert;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num;
76 struct fib_rule common;
80 struct mr6_table *mrt;
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
87 static DEFINE_RWLOCK(mrt_lock);
90 * Multicast router control variables
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock);
98 /* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
103 In this case data path is free of exclusive locks at all.
106 static struct kmem_cache *mrt_cachep __read_mostly;
108 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109 static void ip6mr_free_table(struct mr6_table *mrt);
111 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 struct sk_buff *skb, struct mfc6_cache *cache);
113 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
114 mifi_t mifi, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 struct mfc6_cache *c, struct rtmsg *rtm);
117 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
119 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
120 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
121 struct netlink_callback *cb);
122 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
123 static void ipmr_expire_process(struct timer_list *t);
125 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
126 #define ip6mr_for_each_table(mrt, net) \
127 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
129 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
131 struct mr6_table *mrt;
133 ip6mr_for_each_table(mrt, net) {
140 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
141 struct mr6_table **mrt)
144 struct ip6mr_result res;
145 struct fib_lookup_arg arg = {
147 .flags = FIB_LOOKUP_NOREF,
150 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
151 flowi6_to_flowi(flp6), 0, &arg);
158 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
159 int flags, struct fib_lookup_arg *arg)
161 struct ip6mr_result *res = arg->result;
162 struct mr6_table *mrt;
164 switch (rule->action) {
167 case FR_ACT_UNREACHABLE:
169 case FR_ACT_PROHIBIT:
171 case FR_ACT_BLACKHOLE:
176 mrt = ip6mr_get_table(rule->fr_net, rule->table);
183 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
188 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
192 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
193 struct fib_rule_hdr *frh, struct nlattr **tb)
198 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
204 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
205 struct fib_rule_hdr *frh)
213 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
214 .family = RTNL_FAMILY_IP6MR,
215 .rule_size = sizeof(struct ip6mr_rule),
216 .addr_size = sizeof(struct in6_addr),
217 .action = ip6mr_rule_action,
218 .match = ip6mr_rule_match,
219 .configure = ip6mr_rule_configure,
220 .compare = ip6mr_rule_compare,
221 .fill = ip6mr_rule_fill,
222 .nlgroup = RTNLGRP_IPV6_RULE,
223 .policy = ip6mr_rule_policy,
224 .owner = THIS_MODULE,
227 static int __net_init ip6mr_rules_init(struct net *net)
229 struct fib_rules_ops *ops;
230 struct mr6_table *mrt;
233 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
237 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
239 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
245 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
249 net->ipv6.mr6_rules_ops = ops;
253 ip6mr_free_table(mrt);
255 fib_rules_unregister(ops);
259 static void __net_exit ip6mr_rules_exit(struct net *net)
261 struct mr6_table *mrt, *next;
264 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
265 list_del(&mrt->list);
266 ip6mr_free_table(mrt);
268 fib_rules_unregister(net->ipv6.mr6_rules_ops);
272 #define ip6mr_for_each_table(mrt, net) \
273 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
275 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
277 return net->ipv6.mrt6;
280 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
281 struct mr6_table **mrt)
283 *mrt = net->ipv6.mrt6;
287 static int __net_init ip6mr_rules_init(struct net *net)
289 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
290 return net->ipv6.mrt6 ? 0 : -ENOMEM;
293 static void __net_exit ip6mr_rules_exit(struct net *net)
296 ip6mr_free_table(net->ipv6.mrt6);
297 net->ipv6.mrt6 = NULL;
302 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
304 struct mr6_table *mrt;
307 mrt = ip6mr_get_table(net, id);
311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
315 write_pnet(&mrt->net, net);
317 /* Forwarding cache */
318 for (i = 0; i < MFC6_LINES; i++)
319 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
321 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
323 timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
325 #ifdef CONFIG_IPV6_PIMSM_V2
326 mrt->mroute_reg_vif_num = -1;
328 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
329 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
334 static void ip6mr_free_table(struct mr6_table *mrt)
336 del_timer_sync(&mrt->ipmr_expire_timer);
337 mroute_clean_tables(mrt, true);
341 #ifdef CONFIG_PROC_FS
343 struct ipmr_mfc_iter {
344 struct seq_net_private p;
345 struct mr6_table *mrt;
346 struct list_head *cache;
351 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
352 struct ipmr_mfc_iter *it, loff_t pos)
354 struct mr6_table *mrt = it->mrt;
355 struct mfc6_cache *mfc;
357 read_lock(&mrt_lock);
358 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
359 it->cache = &mrt->mfc6_cache_array[it->ct];
360 list_for_each_entry(mfc, it->cache, list)
364 read_unlock(&mrt_lock);
366 spin_lock_bh(&mfc_unres_lock);
367 it->cache = &mrt->mfc6_unres_queue;
368 list_for_each_entry(mfc, it->cache, list)
371 spin_unlock_bh(&mfc_unres_lock);
378 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
381 struct ipmr_vif_iter {
382 struct seq_net_private p;
383 struct mr6_table *mrt;
387 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
388 struct ipmr_vif_iter *iter,
391 struct mr6_table *mrt = iter->mrt;
393 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
394 if (!MIF_EXISTS(mrt, iter->ct))
397 return &mrt->vif6_table[iter->ct];
402 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
405 struct ipmr_vif_iter *iter = seq->private;
406 struct net *net = seq_file_net(seq);
407 struct mr6_table *mrt;
409 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
411 return ERR_PTR(-ENOENT);
415 read_lock(&mrt_lock);
416 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
420 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
422 struct ipmr_vif_iter *iter = seq->private;
423 struct net *net = seq_file_net(seq);
424 struct mr6_table *mrt = iter->mrt;
427 if (v == SEQ_START_TOKEN)
428 return ip6mr_vif_seq_idx(net, iter, 0);
430 while (++iter->ct < mrt->maxvif) {
431 if (!MIF_EXISTS(mrt, iter->ct))
433 return &mrt->vif6_table[iter->ct];
438 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
441 read_unlock(&mrt_lock);
444 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
446 struct ipmr_vif_iter *iter = seq->private;
447 struct mr6_table *mrt = iter->mrt;
449 if (v == SEQ_START_TOKEN) {
451 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
453 const struct mif_device *vif = v;
454 const char *name = vif->dev ? vif->dev->name : "none";
457 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
458 vif - mrt->vif6_table,
459 name, vif->bytes_in, vif->pkt_in,
460 vif->bytes_out, vif->pkt_out,
466 static const struct seq_operations ip6mr_vif_seq_ops = {
467 .start = ip6mr_vif_seq_start,
468 .next = ip6mr_vif_seq_next,
469 .stop = ip6mr_vif_seq_stop,
470 .show = ip6mr_vif_seq_show,
473 static int ip6mr_vif_open(struct inode *inode, struct file *file)
475 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
476 sizeof(struct ipmr_vif_iter));
479 static const struct file_operations ip6mr_vif_fops = {
480 .owner = THIS_MODULE,
481 .open = ip6mr_vif_open,
484 .release = seq_release_net,
487 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
489 struct ipmr_mfc_iter *it = seq->private;
490 struct net *net = seq_file_net(seq);
491 struct mr6_table *mrt;
493 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
495 return ERR_PTR(-ENOENT);
498 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
502 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
504 struct mfc6_cache *mfc = v;
505 struct ipmr_mfc_iter *it = seq->private;
506 struct net *net = seq_file_net(seq);
507 struct mr6_table *mrt = it->mrt;
511 if (v == SEQ_START_TOKEN)
512 return ipmr_mfc_seq_idx(net, seq->private, 0);
514 if (mfc->list.next != it->cache)
515 return list_entry(mfc->list.next, struct mfc6_cache, list);
517 if (it->cache == &mrt->mfc6_unres_queue)
520 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
522 while (++it->ct < MFC6_LINES) {
523 it->cache = &mrt->mfc6_cache_array[it->ct];
524 if (list_empty(it->cache))
526 return list_first_entry(it->cache, struct mfc6_cache, list);
529 /* exhausted cache_array, show unresolved */
530 read_unlock(&mrt_lock);
531 it->cache = &mrt->mfc6_unres_queue;
534 spin_lock_bh(&mfc_unres_lock);
535 if (!list_empty(it->cache))
536 return list_first_entry(it->cache, struct mfc6_cache, list);
539 spin_unlock_bh(&mfc_unres_lock);
545 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
547 struct ipmr_mfc_iter *it = seq->private;
548 struct mr6_table *mrt = it->mrt;
550 if (it->cache == &mrt->mfc6_unres_queue)
551 spin_unlock_bh(&mfc_unres_lock);
552 else if (it->cache == &mrt->mfc6_cache_array[it->ct])
553 read_unlock(&mrt_lock);
556 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
560 if (v == SEQ_START_TOKEN) {
564 "Iif Pkts Bytes Wrong Oifs\n");
566 const struct mfc6_cache *mfc = v;
567 const struct ipmr_mfc_iter *it = seq->private;
568 struct mr6_table *mrt = it->mrt;
570 seq_printf(seq, "%pI6 %pI6 %-3hd",
571 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
574 if (it->cache != &mrt->mfc6_unres_queue) {
575 seq_printf(seq, " %8lu %8lu %8lu",
577 mfc->mfc_un.res.bytes,
578 mfc->mfc_un.res.wrong_if);
579 for (n = mfc->mfc_un.res.minvif;
580 n < mfc->mfc_un.res.maxvif; n++) {
581 if (MIF_EXISTS(mrt, n) &&
582 mfc->mfc_un.res.ttls[n] < 255)
585 n, mfc->mfc_un.res.ttls[n]);
588 /* unresolved mfc_caches don't contain
589 * pkt, bytes and wrong_if values
591 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
598 static const struct seq_operations ipmr_mfc_seq_ops = {
599 .start = ipmr_mfc_seq_start,
600 .next = ipmr_mfc_seq_next,
601 .stop = ipmr_mfc_seq_stop,
602 .show = ipmr_mfc_seq_show,
605 static int ipmr_mfc_open(struct inode *inode, struct file *file)
607 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
608 sizeof(struct ipmr_mfc_iter));
611 static const struct file_operations ip6mr_mfc_fops = {
612 .owner = THIS_MODULE,
613 .open = ipmr_mfc_open,
616 .release = seq_release_net,
620 #ifdef CONFIG_IPV6_PIMSM_V2
622 static int pim6_rcv(struct sk_buff *skb)
624 struct pimreghdr *pim;
625 struct ipv6hdr *encap;
626 struct net_device *reg_dev = NULL;
627 struct net *net = dev_net(skb->dev);
628 struct mr6_table *mrt;
629 struct flowi6 fl6 = {
630 .flowi6_iif = skb->dev->ifindex,
631 .flowi6_mark = skb->mark,
635 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
638 pim = (struct pimreghdr *)skb_transport_header(skb);
639 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
640 (pim->flags & PIM_NULL_REGISTER) ||
641 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
642 sizeof(*pim), IPPROTO_PIM,
643 csum_partial((void *)pim, sizeof(*pim), 0)) &&
644 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
647 /* check if the inner packet is destined to mcast group */
648 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
651 if (!ipv6_addr_is_multicast(&encap->daddr) ||
652 encap->payload_len == 0 ||
653 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
656 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
658 reg_vif_num = mrt->mroute_reg_vif_num;
660 read_lock(&mrt_lock);
661 if (reg_vif_num >= 0)
662 reg_dev = mrt->vif6_table[reg_vif_num].dev;
665 read_unlock(&mrt_lock);
670 skb->mac_header = skb->network_header;
671 skb_pull(skb, (u8 *)encap - skb->data);
672 skb_reset_network_header(skb);
673 skb->protocol = htons(ETH_P_IPV6);
674 skb->ip_summed = CHECKSUM_NONE;
676 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
687 static const struct inet6_protocol pim6_protocol = {
691 /* Service routines creating virtual interfaces: PIMREG */
693 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
694 struct net_device *dev)
696 struct net *net = dev_net(dev);
697 struct mr6_table *mrt;
698 struct flowi6 fl6 = {
699 .flowi6_oif = dev->ifindex,
700 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
701 .flowi6_mark = skb->mark,
705 err = ip6mr_fib_lookup(net, &fl6, &mrt);
711 read_lock(&mrt_lock);
712 dev->stats.tx_bytes += skb->len;
713 dev->stats.tx_packets++;
714 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
715 read_unlock(&mrt_lock);
720 static int reg_vif_get_iflink(const struct net_device *dev)
725 static const struct net_device_ops reg_vif_netdev_ops = {
726 .ndo_start_xmit = reg_vif_xmit,
727 .ndo_get_iflink = reg_vif_get_iflink,
730 static void reg_vif_setup(struct net_device *dev)
732 dev->type = ARPHRD_PIMREG;
733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
734 dev->flags = IFF_NOARP;
735 dev->netdev_ops = ®_vif_netdev_ops;
736 dev->needs_free_netdev = true;
737 dev->features |= NETIF_F_NETNS_LOCAL;
740 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
742 struct net_device *dev;
745 if (mrt->id == RT6_TABLE_DFLT)
746 sprintf(name, "pim6reg");
748 sprintf(name, "pim6reg%u", mrt->id);
750 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
754 dev_net_set(dev, net);
756 if (register_netdevice(dev)) {
768 unregister_netdevice(dev);
777 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
778 struct list_head *head)
780 struct mif_device *v;
781 struct net_device *dev;
782 struct inet6_dev *in6_dev;
784 if (vifi < 0 || vifi >= mrt->maxvif)
785 return -EADDRNOTAVAIL;
787 v = &mrt->vif6_table[vifi];
789 write_lock_bh(&mrt_lock);
794 write_unlock_bh(&mrt_lock);
795 return -EADDRNOTAVAIL;
798 #ifdef CONFIG_IPV6_PIMSM_V2
799 if (vifi == mrt->mroute_reg_vif_num)
800 mrt->mroute_reg_vif_num = -1;
803 if (vifi + 1 == mrt->maxvif) {
805 for (tmp = vifi - 1; tmp >= 0; tmp--) {
806 if (MIF_EXISTS(mrt, tmp))
809 mrt->maxvif = tmp + 1;
812 write_unlock_bh(&mrt_lock);
814 dev_set_allmulti(dev, -1);
816 in6_dev = __in6_dev_get(dev);
818 in6_dev->cnf.mc_forwarding--;
819 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
820 NETCONFA_MC_FORWARDING,
821 dev->ifindex, &in6_dev->cnf);
824 if ((v->flags & MIFF_REGISTER) && !notify)
825 unregister_netdevice_queue(dev, head);
831 static inline void ip6mr_cache_free(struct mfc6_cache *c)
833 kmem_cache_free(mrt_cachep, c);
836 /* Destroy an unresolved cache entry, killing queued skbs
837 and reporting error to netlink readers.
840 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
842 struct net *net = read_pnet(&mrt->net);
845 atomic_dec(&mrt->cache_resolve_queue_len);
847 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
848 if (ipv6_hdr(skb)->version == 0) {
849 struct nlmsghdr *nlh = skb_pull(skb,
850 sizeof(struct ipv6hdr));
851 nlh->nlmsg_type = NLMSG_ERROR;
852 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
853 skb_trim(skb, nlh->nlmsg_len);
854 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
855 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
864 /* Timer process for all the unresolved queue. */
866 static void ipmr_do_expire_process(struct mr6_table *mrt)
868 unsigned long now = jiffies;
869 unsigned long expires = 10 * HZ;
870 struct mfc6_cache *c, *next;
872 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
873 if (time_after(c->mfc_un.unres.expires, now)) {
875 unsigned long interval = c->mfc_un.unres.expires - now;
876 if (interval < expires)
882 mr6_netlink_event(mrt, c, RTM_DELROUTE);
883 ip6mr_destroy_unres(mrt, c);
886 if (!list_empty(&mrt->mfc6_unres_queue))
887 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
890 static void ipmr_expire_process(struct timer_list *t)
892 struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
894 if (!spin_trylock(&mfc_unres_lock)) {
895 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
899 if (!list_empty(&mrt->mfc6_unres_queue))
900 ipmr_do_expire_process(mrt);
902 spin_unlock(&mfc_unres_lock);
905 /* Fill oifs list. It is called under write locked mrt_lock. */
907 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
912 cache->mfc_un.res.minvif = MAXMIFS;
913 cache->mfc_un.res.maxvif = 0;
914 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
916 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
917 if (MIF_EXISTS(mrt, vifi) &&
918 ttls[vifi] && ttls[vifi] < 255) {
919 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
920 if (cache->mfc_un.res.minvif > vifi)
921 cache->mfc_un.res.minvif = vifi;
922 if (cache->mfc_un.res.maxvif <= vifi)
923 cache->mfc_un.res.maxvif = vifi + 1;
926 cache->mfc_un.res.lastuse = jiffies;
929 static int mif6_add(struct net *net, struct mr6_table *mrt,
930 struct mif6ctl *vifc, int mrtsock)
932 int vifi = vifc->mif6c_mifi;
933 struct mif_device *v = &mrt->vif6_table[vifi];
934 struct net_device *dev;
935 struct inet6_dev *in6_dev;
939 if (MIF_EXISTS(mrt, vifi))
942 switch (vifc->mif6c_flags) {
943 #ifdef CONFIG_IPV6_PIMSM_V2
946 * Special Purpose VIF in PIM
947 * All the packets will be sent to the daemon
949 if (mrt->mroute_reg_vif_num >= 0)
951 dev = ip6mr_reg_vif(net, mrt);
954 err = dev_set_allmulti(dev, 1);
956 unregister_netdevice(dev);
963 dev = dev_get_by_index(net, vifc->mif6c_pifi);
965 return -EADDRNOTAVAIL;
966 err = dev_set_allmulti(dev, 1);
976 in6_dev = __in6_dev_get(dev);
978 in6_dev->cnf.mc_forwarding++;
979 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
980 NETCONFA_MC_FORWARDING,
981 dev->ifindex, &in6_dev->cnf);
985 * Fill in the VIF structures
987 v->rate_limit = vifc->vifc_rate_limit;
988 v->flags = vifc->mif6c_flags;
990 v->flags |= VIFF_STATIC;
991 v->threshold = vifc->vifc_threshold;
996 v->link = dev->ifindex;
997 if (v->flags & MIFF_REGISTER)
998 v->link = dev_get_iflink(dev);
1000 /* And finish update writing critical data */
1001 write_lock_bh(&mrt_lock);
1003 #ifdef CONFIG_IPV6_PIMSM_V2
1004 if (v->flags & MIFF_REGISTER)
1005 mrt->mroute_reg_vif_num = vifi;
1007 if (vifi + 1 > mrt->maxvif)
1008 mrt->maxvif = vifi + 1;
1009 write_unlock_bh(&mrt_lock);
1013 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1014 const struct in6_addr *origin,
1015 const struct in6_addr *mcastgrp)
1017 int line = MFC6_HASH(mcastgrp, origin);
1018 struct mfc6_cache *c;
1020 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1021 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1022 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1028 /* Look for a (*,*,oif) entry */
1029 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1032 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1033 struct mfc6_cache *c;
1035 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1036 if (ipv6_addr_any(&c->mf6c_origin) &&
1037 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1038 (c->mfc_un.res.ttls[mifi] < 255))
1044 /* Look for a (*,G) entry */
1045 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1046 struct in6_addr *mcastgrp,
1049 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1050 struct mfc6_cache *c, *proxy;
1052 if (ipv6_addr_any(mcastgrp))
1055 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1056 if (ipv6_addr_any(&c->mf6c_origin) &&
1057 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1058 if (c->mfc_un.res.ttls[mifi] < 255)
1061 /* It's ok if the mifi is part of the static tree */
1062 proxy = ip6mr_cache_find_any_parent(mrt,
1064 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1069 return ip6mr_cache_find_any_parent(mrt, mifi);
1073 * Allocate a multicast cache entry
1075 static struct mfc6_cache *ip6mr_cache_alloc(void)
1077 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1080 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1081 c->mfc_un.res.minvif = MAXMIFS;
1085 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1087 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1090 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1091 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1096 * A cache entry has gone into a resolved state from queued
1099 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1100 struct mfc6_cache *uc, struct mfc6_cache *c)
1102 struct sk_buff *skb;
1105 * Play the pending entries through our router
1108 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1109 if (ipv6_hdr(skb)->version == 0) {
1110 struct nlmsghdr *nlh = skb_pull(skb,
1111 sizeof(struct ipv6hdr));
1113 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1114 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1116 nlh->nlmsg_type = NLMSG_ERROR;
1117 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1118 skb_trim(skb, nlh->nlmsg_len);
1119 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1121 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1123 ip6_mr_forward(net, mrt, skb, c);
1128 * Bounce a cache query up to pim6sd and netlink.
1130 * Called under mrt_lock.
1133 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1134 mifi_t mifi, int assert)
1136 struct sk_buff *skb;
1137 struct mrt6msg *msg;
1140 #ifdef CONFIG_IPV6_PIMSM_V2
1141 if (assert == MRT6MSG_WHOLEPKT)
1142 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1146 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1151 /* I suppose that internal messages
1152 * do not require checksums */
1154 skb->ip_summed = CHECKSUM_UNNECESSARY;
1156 #ifdef CONFIG_IPV6_PIMSM_V2
1157 if (assert == MRT6MSG_WHOLEPKT) {
1158 /* Ugly, but we have no choice with this interface.
1159 Duplicate old header, fix length etc.
1160 And all this only to mangle msg->im6_msgtype and
1161 to set msg->im6_mbz to "mbz" :-)
1163 skb_push(skb, -skb_network_offset(pkt));
1165 skb_push(skb, sizeof(*msg));
1166 skb_reset_transport_header(skb);
1167 msg = (struct mrt6msg *)skb_transport_header(skb);
1169 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1170 msg->im6_mif = mrt->mroute_reg_vif_num;
1172 msg->im6_src = ipv6_hdr(pkt)->saddr;
1173 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1175 skb->ip_summed = CHECKSUM_UNNECESSARY;
1180 * Copy the IP header
1183 skb_put(skb, sizeof(struct ipv6hdr));
1184 skb_reset_network_header(skb);
1185 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1190 skb_put(skb, sizeof(*msg));
1191 skb_reset_transport_header(skb);
1192 msg = (struct mrt6msg *)skb_transport_header(skb);
1195 msg->im6_msgtype = assert;
1196 msg->im6_mif = mifi;
1198 msg->im6_src = ipv6_hdr(pkt)->saddr;
1199 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1201 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1202 skb->ip_summed = CHECKSUM_UNNECESSARY;
1205 if (!mrt->mroute6_sk) {
1210 mrt6msg_netlink_event(mrt, skb);
1213 * Deliver to user space multicast routing algorithms
1215 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1217 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1225 * Queue a packet for resolution. It gets locked cache entry!
1229 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1233 struct mfc6_cache *c;
1235 spin_lock_bh(&mfc_unres_lock);
1236 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1237 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1238 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1246 * Create a new entry if allowable
1249 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1250 (c = ip6mr_cache_alloc_unres()) == NULL) {
1251 spin_unlock_bh(&mfc_unres_lock);
1258 * Fill in the new cache entry
1260 c->mf6c_parent = -1;
1261 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1262 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1265 * Reflect first query at pim6sd
1267 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1269 /* If the report failed throw the cache entry
1272 spin_unlock_bh(&mfc_unres_lock);
1274 ip6mr_cache_free(c);
1279 atomic_inc(&mrt->cache_resolve_queue_len);
1280 list_add(&c->list, &mrt->mfc6_unres_queue);
1281 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1283 ipmr_do_expire_process(mrt);
1287 * See if we can append the packet
1289 if (c->mfc_un.unres.unresolved.qlen > 3) {
1293 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1297 spin_unlock_bh(&mfc_unres_lock);
1302 * MFC6 cache manipulation by user space
1305 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1309 struct mfc6_cache *c, *next;
1311 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1313 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1314 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1315 ipv6_addr_equal(&c->mf6c_mcastgrp,
1316 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1317 (parent == -1 || parent == c->mf6c_parent)) {
1318 write_lock_bh(&mrt_lock);
1320 write_unlock_bh(&mrt_lock);
1322 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1323 ip6mr_cache_free(c);
1330 static int ip6mr_device_event(struct notifier_block *this,
1331 unsigned long event, void *ptr)
1333 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1334 struct net *net = dev_net(dev);
1335 struct mr6_table *mrt;
1336 struct mif_device *v;
1339 if (event != NETDEV_UNREGISTER)
1342 ip6mr_for_each_table(mrt, net) {
1343 v = &mrt->vif6_table[0];
1344 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1346 mif6_delete(mrt, ct, 1, NULL);
1353 static struct notifier_block ip6_mr_notifier = {
1354 .notifier_call = ip6mr_device_event
1358 * Setup for IP multicast routing
1361 static int __net_init ip6mr_net_init(struct net *net)
1365 err = ip6mr_rules_init(net);
1369 #ifdef CONFIG_PROC_FS
1371 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1373 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1374 goto proc_cache_fail;
1379 #ifdef CONFIG_PROC_FS
1381 remove_proc_entry("ip6_mr_vif", net->proc_net);
1383 ip6mr_rules_exit(net);
1389 static void __net_exit ip6mr_net_exit(struct net *net)
1391 #ifdef CONFIG_PROC_FS
1392 remove_proc_entry("ip6_mr_cache", net->proc_net);
1393 remove_proc_entry("ip6_mr_vif", net->proc_net);
1395 ip6mr_rules_exit(net);
1398 static struct pernet_operations ip6mr_net_ops = {
1399 .init = ip6mr_net_init,
1400 .exit = ip6mr_net_exit,
1403 int __init ip6_mr_init(void)
1407 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1408 sizeof(struct mfc6_cache),
1409 0, SLAB_HWCACHE_ALIGN,
1414 err = register_pernet_subsys(&ip6mr_net_ops);
1416 goto reg_pernet_fail;
1418 err = register_netdevice_notifier(&ip6_mr_notifier);
1420 goto reg_notif_fail;
1421 #ifdef CONFIG_IPV6_PIMSM_V2
1422 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1423 pr_err("%s: can't add PIM protocol\n", __func__);
1425 goto add_proto_fail;
1428 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1429 ip6mr_rtm_dumproute, 0);
1431 #ifdef CONFIG_IPV6_PIMSM_V2
1433 unregister_netdevice_notifier(&ip6_mr_notifier);
1436 unregister_pernet_subsys(&ip6mr_net_ops);
1438 kmem_cache_destroy(mrt_cachep);
1442 void ip6_mr_cleanup(void)
1444 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1445 #ifdef CONFIG_IPV6_PIMSM_V2
1446 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1448 unregister_netdevice_notifier(&ip6_mr_notifier);
1449 unregister_pernet_subsys(&ip6mr_net_ops);
1450 kmem_cache_destroy(mrt_cachep);
1453 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1454 struct mf6cctl *mfc, int mrtsock, int parent)
1458 struct mfc6_cache *uc, *c;
1459 unsigned char ttls[MAXMIFS];
1462 if (mfc->mf6cc_parent >= MAXMIFS)
1465 memset(ttls, 255, MAXMIFS);
1466 for (i = 0; i < MAXMIFS; i++) {
1467 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1472 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1474 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1475 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1476 ipv6_addr_equal(&c->mf6c_mcastgrp,
1477 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1478 (parent == -1 || parent == mfc->mf6cc_parent)) {
1485 write_lock_bh(&mrt_lock);
1486 c->mf6c_parent = mfc->mf6cc_parent;
1487 ip6mr_update_thresholds(mrt, c, ttls);
1489 c->mfc_flags |= MFC_STATIC;
1490 write_unlock_bh(&mrt_lock);
1491 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1495 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1496 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1499 c = ip6mr_cache_alloc();
1503 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1504 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1505 c->mf6c_parent = mfc->mf6cc_parent;
1506 ip6mr_update_thresholds(mrt, c, ttls);
1508 c->mfc_flags |= MFC_STATIC;
1510 write_lock_bh(&mrt_lock);
1511 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1512 write_unlock_bh(&mrt_lock);
1515 * Check to see if we resolved a queued list. If so we
1516 * need to send on the frames and tidy up.
1519 spin_lock_bh(&mfc_unres_lock);
1520 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1521 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1522 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1523 list_del(&uc->list);
1524 atomic_dec(&mrt->cache_resolve_queue_len);
1529 if (list_empty(&mrt->mfc6_unres_queue))
1530 del_timer(&mrt->ipmr_expire_timer);
1531 spin_unlock_bh(&mfc_unres_lock);
1534 ip6mr_cache_resolve(net, mrt, uc, c);
1535 ip6mr_cache_free(uc);
1537 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1542 * Close the multicast socket, and clear the vif tables etc
1545 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1549 struct mfc6_cache *c, *next;
1552 * Shut down all active vif entries
1554 for (i = 0; i < mrt->maxvif; i++) {
1555 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1557 mif6_delete(mrt, i, 0, &list);
1559 unregister_netdevice_many(&list);
1564 for (i = 0; i < MFC6_LINES; i++) {
1565 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1566 if (!all && (c->mfc_flags & MFC_STATIC))
1568 write_lock_bh(&mrt_lock);
1570 write_unlock_bh(&mrt_lock);
1572 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1573 ip6mr_cache_free(c);
1577 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1578 spin_lock_bh(&mfc_unres_lock);
1579 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1581 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1582 ip6mr_destroy_unres(mrt, c);
1584 spin_unlock_bh(&mfc_unres_lock);
1588 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1591 struct net *net = sock_net(sk);
1594 write_lock_bh(&mrt_lock);
1595 if (likely(mrt->mroute6_sk == NULL)) {
1596 mrt->mroute6_sk = sk;
1597 net->ipv6.devconf_all->mc_forwarding++;
1601 write_unlock_bh(&mrt_lock);
1604 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1605 NETCONFA_MC_FORWARDING,
1606 NETCONFA_IFINDEX_ALL,
1607 net->ipv6.devconf_all);
1613 int ip6mr_sk_done(struct sock *sk)
1616 struct net *net = sock_net(sk);
1617 struct mr6_table *mrt;
1619 if (sk->sk_type != SOCK_RAW ||
1620 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1624 ip6mr_for_each_table(mrt, net) {
1625 if (sk == mrt->mroute6_sk) {
1626 write_lock_bh(&mrt_lock);
1627 mrt->mroute6_sk = NULL;
1628 net->ipv6.devconf_all->mc_forwarding--;
1629 write_unlock_bh(&mrt_lock);
1630 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1631 NETCONFA_MC_FORWARDING,
1632 NETCONFA_IFINDEX_ALL,
1633 net->ipv6.devconf_all);
1635 mroute_clean_tables(mrt, false);
1645 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1647 struct mr6_table *mrt;
1648 struct flowi6 fl6 = {
1649 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1650 .flowi6_oif = skb->dev->ifindex,
1651 .flowi6_mark = skb->mark,
1654 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1657 return mrt->mroute6_sk;
1661 * Socket options and virtual interface manipulation. The whole
1662 * virtual interface system is a complete heap, but unfortunately
1663 * that's how BSD mrouted happens to think. Maybe one day with a proper
1664 * MOSPF/PIM router set up we can clean this up.
1667 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1669 int ret, parent = 0;
1673 struct net *net = sock_net(sk);
1674 struct mr6_table *mrt;
1676 if (sk->sk_type != SOCK_RAW ||
1677 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1680 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1684 if (optname != MRT6_INIT) {
1685 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1691 if (optlen < sizeof(int))
1694 return ip6mr_sk_init(mrt, sk);
1697 return ip6mr_sk_done(sk);
1700 if (optlen < sizeof(vif))
1702 if (copy_from_user(&vif, optval, sizeof(vif)))
1704 if (vif.mif6c_mifi >= MAXMIFS)
1707 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1712 if (optlen < sizeof(mifi_t))
1714 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1717 ret = mif6_delete(mrt, mifi, 0, NULL);
1722 * Manipulate the forwarding caches. These live
1723 * in a sort of kernel/user symbiosis.
1729 case MRT6_ADD_MFC_PROXY:
1730 case MRT6_DEL_MFC_PROXY:
1731 if (optlen < sizeof(mfc))
1733 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1736 parent = mfc.mf6cc_parent;
1738 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1739 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1741 ret = ip6mr_mfc_add(net, mrt, &mfc,
1742 sk == mrt->mroute6_sk, parent);
1747 * Control PIM assert (to activate pim will activate assert)
1753 if (optlen != sizeof(v))
1755 if (get_user(v, (int __user *)optval))
1757 mrt->mroute_do_assert = v;
1761 #ifdef CONFIG_IPV6_PIMSM_V2
1766 if (optlen != sizeof(v))
1768 if (get_user(v, (int __user *)optval))
1773 if (v != mrt->mroute_do_pim) {
1774 mrt->mroute_do_pim = v;
1775 mrt->mroute_do_assert = v;
1782 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1787 if (optlen != sizeof(u32))
1789 if (get_user(v, (u32 __user *)optval))
1791 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1792 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1794 if (sk == mrt->mroute6_sk)
1799 if (!ip6mr_new_table(net, v))
1801 raw6_sk(sk)->ip6mr_table = v;
1807 * Spurious command, or MRT6_VERSION which you cannot
1811 return -ENOPROTOOPT;
1816 * Getsock opt support for the multicast routing system.
1819 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1824 struct net *net = sock_net(sk);
1825 struct mr6_table *mrt;
1827 if (sk->sk_type != SOCK_RAW ||
1828 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1831 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1839 #ifdef CONFIG_IPV6_PIMSM_V2
1841 val = mrt->mroute_do_pim;
1845 val = mrt->mroute_do_assert;
1848 return -ENOPROTOOPT;
1851 if (get_user(olr, optlen))
1854 olr = min_t(int, olr, sizeof(int));
1858 if (put_user(olr, optlen))
1860 if (copy_to_user(optval, &val, olr))
1866 * The IP multicast ioctl support routines.
1869 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1871 struct sioc_sg_req6 sr;
1872 struct sioc_mif_req6 vr;
1873 struct mif_device *vif;
1874 struct mfc6_cache *c;
1875 struct net *net = sock_net(sk);
1876 struct mr6_table *mrt;
1878 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1883 case SIOCGETMIFCNT_IN6:
1884 if (copy_from_user(&vr, arg, sizeof(vr)))
1886 if (vr.mifi >= mrt->maxvif)
1888 read_lock(&mrt_lock);
1889 vif = &mrt->vif6_table[vr.mifi];
1890 if (MIF_EXISTS(mrt, vr.mifi)) {
1891 vr.icount = vif->pkt_in;
1892 vr.ocount = vif->pkt_out;
1893 vr.ibytes = vif->bytes_in;
1894 vr.obytes = vif->bytes_out;
1895 read_unlock(&mrt_lock);
1897 if (copy_to_user(arg, &vr, sizeof(vr)))
1901 read_unlock(&mrt_lock);
1902 return -EADDRNOTAVAIL;
1903 case SIOCGETSGCNT_IN6:
1904 if (copy_from_user(&sr, arg, sizeof(sr)))
1907 read_lock(&mrt_lock);
1908 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1910 sr.pktcnt = c->mfc_un.res.pkt;
1911 sr.bytecnt = c->mfc_un.res.bytes;
1912 sr.wrong_if = c->mfc_un.res.wrong_if;
1913 read_unlock(&mrt_lock);
1915 if (copy_to_user(arg, &sr, sizeof(sr)))
1919 read_unlock(&mrt_lock);
1920 return -EADDRNOTAVAIL;
1922 return -ENOIOCTLCMD;
1926 #ifdef CONFIG_COMPAT
1927 struct compat_sioc_sg_req6 {
1928 struct sockaddr_in6 src;
1929 struct sockaddr_in6 grp;
1930 compat_ulong_t pktcnt;
1931 compat_ulong_t bytecnt;
1932 compat_ulong_t wrong_if;
1935 struct compat_sioc_mif_req6 {
1937 compat_ulong_t icount;
1938 compat_ulong_t ocount;
1939 compat_ulong_t ibytes;
1940 compat_ulong_t obytes;
1943 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1945 struct compat_sioc_sg_req6 sr;
1946 struct compat_sioc_mif_req6 vr;
1947 struct mif_device *vif;
1948 struct mfc6_cache *c;
1949 struct net *net = sock_net(sk);
1950 struct mr6_table *mrt;
1952 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1957 case SIOCGETMIFCNT_IN6:
1958 if (copy_from_user(&vr, arg, sizeof(vr)))
1960 if (vr.mifi >= mrt->maxvif)
1962 read_lock(&mrt_lock);
1963 vif = &mrt->vif6_table[vr.mifi];
1964 if (MIF_EXISTS(mrt, vr.mifi)) {
1965 vr.icount = vif->pkt_in;
1966 vr.ocount = vif->pkt_out;
1967 vr.ibytes = vif->bytes_in;
1968 vr.obytes = vif->bytes_out;
1969 read_unlock(&mrt_lock);
1971 if (copy_to_user(arg, &vr, sizeof(vr)))
1975 read_unlock(&mrt_lock);
1976 return -EADDRNOTAVAIL;
1977 case SIOCGETSGCNT_IN6:
1978 if (copy_from_user(&sr, arg, sizeof(sr)))
1981 read_lock(&mrt_lock);
1982 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1984 sr.pktcnt = c->mfc_un.res.pkt;
1985 sr.bytecnt = c->mfc_un.res.bytes;
1986 sr.wrong_if = c->mfc_un.res.wrong_if;
1987 read_unlock(&mrt_lock);
1989 if (copy_to_user(arg, &sr, sizeof(sr)))
1993 read_unlock(&mrt_lock);
1994 return -EADDRNOTAVAIL;
1996 return -ENOIOCTLCMD;
2001 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2003 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2004 IPSTATS_MIB_OUTFORWDATAGRAMS);
2005 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2006 IPSTATS_MIB_OUTOCTETS, skb->len);
2007 return dst_output(net, sk, skb);
2011 * Processing handlers for ip6mr_forward
2014 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2015 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2017 struct ipv6hdr *ipv6h;
2018 struct mif_device *vif = &mrt->vif6_table[vifi];
2019 struct net_device *dev;
2020 struct dst_entry *dst;
2026 #ifdef CONFIG_IPV6_PIMSM_V2
2027 if (vif->flags & MIFF_REGISTER) {
2029 vif->bytes_out += skb->len;
2030 vif->dev->stats.tx_bytes += skb->len;
2031 vif->dev->stats.tx_packets++;
2032 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2037 ipv6h = ipv6_hdr(skb);
2039 fl6 = (struct flowi6) {
2040 .flowi6_oif = vif->link,
2041 .daddr = ipv6h->daddr,
2044 dst = ip6_route_output(net, NULL, &fl6);
2051 skb_dst_set(skb, dst);
2054 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2055 * not only before forwarding, but after forwarding on all output
2056 * interfaces. It is clear, if mrouter runs a multicasting
2057 * program, it should receive packets not depending to what interface
2058 * program is joined.
2059 * If we will not make it, the program will have to join on all
2060 * interfaces. On the other hand, multihoming host (or router, but
2061 * not mrouter) cannot join to more than one interface - it will
2062 * result in receiving multiple packets.
2067 vif->bytes_out += skb->len;
2069 /* We are about to write */
2070 /* XXX: extension headers? */
2071 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2074 ipv6h = ipv6_hdr(skb);
2077 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2079 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2080 net, NULL, skb, skb->dev, dev,
2081 ip6mr_forward2_finish);
2088 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2092 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2093 if (mrt->vif6_table[ct].dev == dev)
2099 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2100 struct sk_buff *skb, struct mfc6_cache *cache)
2104 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2106 vif = cache->mf6c_parent;
2107 cache->mfc_un.res.pkt++;
2108 cache->mfc_un.res.bytes += skb->len;
2109 cache->mfc_un.res.lastuse = jiffies;
2111 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2112 struct mfc6_cache *cache_proxy;
2114 /* For an (*,G) entry, we only check that the incoming
2115 * interface is part of the static tree.
2117 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2119 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2124 * Wrong interface: drop packet and (maybe) send PIM assert.
2126 if (mrt->vif6_table[vif].dev != skb->dev) {
2127 cache->mfc_un.res.wrong_if++;
2129 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2130 /* pimsm uses asserts, when switching from RPT to SPT,
2131 so that we cannot check that packet arrived on an oif.
2132 It is bad, but otherwise we would need to move pretty
2133 large chunk of pimd to kernel. Ough... --ANK
2135 (mrt->mroute_do_pim ||
2136 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2138 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2139 cache->mfc_un.res.last_assert = jiffies;
2140 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2146 mrt->vif6_table[vif].pkt_in++;
2147 mrt->vif6_table[vif].bytes_in += skb->len;
2152 if (ipv6_addr_any(&cache->mf6c_origin) &&
2153 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2154 if (true_vifi >= 0 &&
2155 true_vifi != cache->mf6c_parent &&
2156 ipv6_hdr(skb)->hop_limit >
2157 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2158 /* It's an (*,*) entry and the packet is not coming from
2159 * the upstream: forward the packet to the upstream
2162 psend = cache->mf6c_parent;
2167 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2168 /* For (*,G) entry, don't forward to the incoming interface */
2169 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2170 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2172 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2174 ip6mr_forward2(net, mrt, skb2, cache, psend);
2181 ip6mr_forward2(net, mrt, skb, cache, psend);
2191 * Multicast packets for forwarding arrive here
2194 int ip6_mr_input(struct sk_buff *skb)
2196 struct mfc6_cache *cache;
2197 struct net *net = dev_net(skb->dev);
2198 struct mr6_table *mrt;
2199 struct flowi6 fl6 = {
2200 .flowi6_iif = skb->dev->ifindex,
2201 .flowi6_mark = skb->mark,
2205 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2211 read_lock(&mrt_lock);
2212 cache = ip6mr_cache_find(mrt,
2213 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2215 int vif = ip6mr_find_vif(mrt, skb->dev);
2218 cache = ip6mr_cache_find_any(mrt,
2219 &ipv6_hdr(skb)->daddr,
2224 * No usable cache entry
2229 vif = ip6mr_find_vif(mrt, skb->dev);
2231 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2232 read_unlock(&mrt_lock);
2236 read_unlock(&mrt_lock);
2241 ip6_mr_forward(net, mrt, skb, cache);
2243 read_unlock(&mrt_lock);
2249 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2250 struct mfc6_cache *c, struct rtmsg *rtm)
2252 struct rta_mfc_stats mfcs;
2253 struct nlattr *mp_attr;
2254 struct rtnexthop *nhp;
2255 unsigned long lastuse;
2258 /* If cache is unresolved, don't try to parse IIF and OIF */
2259 if (c->mf6c_parent >= MAXMIFS) {
2260 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2264 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2265 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2267 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2271 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2272 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2273 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2275 nla_nest_cancel(skb, mp_attr);
2279 nhp->rtnh_flags = 0;
2280 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2281 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2282 nhp->rtnh_len = sizeof(*nhp);
2286 nla_nest_end(skb, mp_attr);
2288 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2289 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2291 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2292 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2293 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2294 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2295 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2299 rtm->rtm_type = RTN_MULTICAST;
2303 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2307 struct mr6_table *mrt;
2308 struct mfc6_cache *cache;
2309 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2311 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2315 read_lock(&mrt_lock);
2316 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2317 if (!cache && skb->dev) {
2318 int vif = ip6mr_find_vif(mrt, skb->dev);
2321 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2326 struct sk_buff *skb2;
2327 struct ipv6hdr *iph;
2328 struct net_device *dev;
2332 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2333 read_unlock(&mrt_lock);
2337 /* really correct? */
2338 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2340 read_unlock(&mrt_lock);
2344 NETLINK_CB(skb2).portid = portid;
2345 skb_reset_transport_header(skb2);
2347 skb_put(skb2, sizeof(struct ipv6hdr));
2348 skb_reset_network_header(skb2);
2350 iph = ipv6_hdr(skb2);
2353 iph->flow_lbl[0] = 0;
2354 iph->flow_lbl[1] = 0;
2355 iph->flow_lbl[2] = 0;
2356 iph->payload_len = 0;
2357 iph->nexthdr = IPPROTO_NONE;
2359 iph->saddr = rt->rt6i_src.addr;
2360 iph->daddr = rt->rt6i_dst.addr;
2362 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2363 read_unlock(&mrt_lock);
2368 if (rtm->rtm_flags & RTM_F_NOTIFY)
2369 cache->mfc_flags |= MFC_NOTIFY;
2371 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2372 read_unlock(&mrt_lock);
2376 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2377 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2380 struct nlmsghdr *nlh;
2384 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2388 rtm = nlmsg_data(nlh);
2389 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2390 rtm->rtm_dst_len = 128;
2391 rtm->rtm_src_len = 128;
2393 rtm->rtm_table = mrt->id;
2394 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2395 goto nla_put_failure;
2396 rtm->rtm_type = RTN_MULTICAST;
2397 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2398 if (c->mfc_flags & MFC_STATIC)
2399 rtm->rtm_protocol = RTPROT_STATIC;
2401 rtm->rtm_protocol = RTPROT_MROUTED;
2404 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2405 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2406 goto nla_put_failure;
2407 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2408 /* do not break the dump if cache is unresolved */
2409 if (err < 0 && err != -ENOENT)
2410 goto nla_put_failure;
2412 nlmsg_end(skb, nlh);
2416 nlmsg_cancel(skb, nlh);
2420 static int mr6_msgsize(bool unresolved, int maxvif)
2423 NLMSG_ALIGN(sizeof(struct rtmsg))
2424 + nla_total_size(4) /* RTA_TABLE */
2425 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2426 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2431 + nla_total_size(4) /* RTA_IIF */
2432 + nla_total_size(0) /* RTA_MULTIPATH */
2433 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2435 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2441 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2444 struct net *net = read_pnet(&mrt->net);
2445 struct sk_buff *skb;
2448 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2453 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2457 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2463 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2466 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2469 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2470 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2471 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2472 /* IP6MRA_CREPORT_SRC_ADDR */
2473 + nla_total_size(sizeof(struct in6_addr))
2474 /* IP6MRA_CREPORT_DST_ADDR */
2475 + nla_total_size(sizeof(struct in6_addr))
2476 /* IP6MRA_CREPORT_PKT */
2477 + nla_total_size(payloadlen)
2483 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2485 struct net *net = read_pnet(&mrt->net);
2486 struct nlmsghdr *nlh;
2487 struct rtgenmsg *rtgenm;
2488 struct mrt6msg *msg;
2489 struct sk_buff *skb;
2493 payloadlen = pkt->len - sizeof(struct mrt6msg);
2494 msg = (struct mrt6msg *)skb_transport_header(pkt);
2496 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2500 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2501 sizeof(struct rtgenmsg), 0);
2504 rtgenm = nlmsg_data(nlh);
2505 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2506 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2507 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2508 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2510 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2512 goto nla_put_failure;
2514 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2515 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2516 nla_data(nla), payloadlen))
2517 goto nla_put_failure;
2519 nlmsg_end(skb, nlh);
2521 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2525 nlmsg_cancel(skb, nlh);
2528 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2531 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2533 struct net *net = sock_net(skb->sk);
2534 struct mr6_table *mrt;
2535 struct mfc6_cache *mfc;
2536 unsigned int t = 0, s_t;
2537 unsigned int h = 0, s_h;
2538 unsigned int e = 0, s_e;
2544 read_lock(&mrt_lock);
2545 ip6mr_for_each_table(mrt, net) {
2550 for (h = s_h; h < MFC6_LINES; h++) {
2551 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2554 if (ip6mr_fill_mroute(mrt, skb,
2555 NETLINK_CB(cb->skb).portid,
2565 spin_lock_bh(&mfc_unres_lock);
2566 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2569 if (ip6mr_fill_mroute(mrt, skb,
2570 NETLINK_CB(cb->skb).portid,
2574 spin_unlock_bh(&mfc_unres_lock);
2580 spin_unlock_bh(&mfc_unres_lock);
2587 read_unlock(&mrt_lock);