2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
62 #include <net/route.h>
63 #include <net/ip_fib.h>
64 #include <net/rtnetlink.h>
65 #include <net/net_namespace.h>
66 #include <net/addrconf.h>
68 #include "fib_lookup.h"
70 #define IPV6ONLY_FLAGS \
71 (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
72 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
73 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
75 static struct ipv4_devconf ipv4_devconf = {
77 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
78 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
79 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
80 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
81 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
82 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
86 static struct ipv4_devconf ipv4_devconf_dflt = {
88 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
89 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
90 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
91 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
92 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
93 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
94 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
98 #define IPV4_DEVCONF_DFLT(net, attr) \
99 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
101 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
102 [IFA_LOCAL] = { .type = NLA_U32 },
103 [IFA_ADDRESS] = { .type = NLA_U32 },
104 [IFA_BROADCAST] = { .type = NLA_U32 },
105 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
106 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
107 [IFA_FLAGS] = { .type = NLA_U32 },
110 #define IN4_ADDR_HSIZE_SHIFT 8
111 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
113 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
115 static u32 inet_addr_hash(const struct net *net, __be32 addr)
117 u32 val = (__force u32) addr ^ net_hash_mix(net);
119 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
122 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
124 u32 hash = inet_addr_hash(net, ifa->ifa_local);
127 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
130 static void inet_hash_remove(struct in_ifaddr *ifa)
133 hlist_del_init_rcu(&ifa->hash);
137 * __ip_dev_find - find the first device with a given source address.
138 * @net: the net namespace
139 * @addr: the source address
140 * @devref: if true, take a reference on the found device
142 * If a caller uses devref=false, it should be protected by RCU, or RTNL
144 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
146 u32 hash = inet_addr_hash(net, addr);
147 struct net_device *result = NULL;
148 struct in_ifaddr *ifa;
151 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
152 if (ifa->ifa_local == addr) {
153 struct net_device *dev = ifa->ifa_dev->dev;
155 if (!net_eq(dev_net(dev), net))
162 struct flowi4 fl4 = { .daddr = addr };
163 struct fib_result res = { 0 };
164 struct fib_table *local;
166 /* Fallback to FIB local table so that communication
167 * over loopback subnets work.
169 local = fib_get_table(net, RT_TABLE_LOCAL);
171 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
172 res.type == RTN_LOCAL)
173 result = FIB_RES_DEV(res);
175 if (result && devref)
180 EXPORT_SYMBOL(__ip_dev_find);
182 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
184 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
185 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
188 static int devinet_sysctl_register(struct in_device *idev);
189 static void devinet_sysctl_unregister(struct in_device *idev);
191 static int devinet_sysctl_register(struct in_device *idev)
195 static void devinet_sysctl_unregister(struct in_device *idev)
200 /* Locks all the inet devices. */
202 static struct in_ifaddr *inet_alloc_ifa(void)
204 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
207 static void inet_rcu_free_ifa(struct rcu_head *head)
209 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
211 in_dev_put(ifa->ifa_dev);
215 static void inet_free_ifa(struct in_ifaddr *ifa)
217 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
220 void in_dev_finish_destroy(struct in_device *idev)
222 struct net_device *dev = idev->dev;
224 WARN_ON(idev->ifa_list);
225 WARN_ON(idev->mc_list);
226 kfree(rcu_dereference_protected(idev->mc_hash, 1));
227 #ifdef NET_REFCNT_DEBUG
228 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
232 pr_err("Freeing alive in_device %p\n", idev);
236 EXPORT_SYMBOL(in_dev_finish_destroy);
238 static struct in_device *inetdev_init(struct net_device *dev)
240 struct in_device *in_dev;
245 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
248 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
249 sizeof(in_dev->cnf));
250 in_dev->cnf.sysctl = NULL;
252 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
253 if (!in_dev->arp_parms)
255 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
256 dev_disable_lro(dev);
257 /* Reference in_dev->dev */
259 /* Account for reference dev->ip_ptr (below) */
262 err = devinet_sysctl_register(in_dev);
269 ip_mc_init_dev(in_dev);
270 if (dev->flags & IFF_UP)
273 /* we can receive as soon as ip_ptr is set -- do this last */
274 rcu_assign_pointer(dev->ip_ptr, in_dev);
276 return in_dev ?: ERR_PTR(err);
283 static void in_dev_rcu_put(struct rcu_head *head)
285 struct in_device *idev = container_of(head, struct in_device, rcu_head);
289 static void inetdev_destroy(struct in_device *in_dev)
291 struct in_ifaddr *ifa;
292 struct net_device *dev;
300 ip_mc_destroy_dev(in_dev);
302 while ((ifa = in_dev->ifa_list) != NULL) {
303 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
307 RCU_INIT_POINTER(dev->ip_ptr, NULL);
309 devinet_sysctl_unregister(in_dev);
310 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
313 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
316 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
319 for_primary_ifa(in_dev) {
320 if (inet_ifa_match(a, ifa)) {
321 if (!b || inet_ifa_match(b, ifa)) {
326 } endfor_ifa(in_dev);
331 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
332 int destroy, struct nlmsghdr *nlh, u32 portid)
334 struct in_ifaddr *promote = NULL;
335 struct in_ifaddr *ifa, *ifa1 = *ifap;
336 struct in_ifaddr *last_prim = in_dev->ifa_list;
337 struct in_ifaddr *prev_prom = NULL;
338 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
345 /* 1. Deleting primary ifaddr forces deletion all secondaries
346 * unless alias promotion is set
349 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
350 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
352 while ((ifa = *ifap1) != NULL) {
353 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
354 ifa1->ifa_scope <= ifa->ifa_scope)
357 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
358 ifa1->ifa_mask != ifa->ifa_mask ||
359 !inet_ifa_match(ifa1->ifa_address, ifa)) {
360 ifap1 = &ifa->ifa_next;
366 inet_hash_remove(ifa);
367 *ifap1 = ifa->ifa_next;
369 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
370 blocking_notifier_call_chain(&inetaddr_chain,
380 /* On promotion all secondaries from subnet are changing
381 * the primary IP, we must remove all their routes silently
382 * and later to add them back with new prefsrc. Do this
383 * while all addresses are on the device list.
385 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
386 if (ifa1->ifa_mask == ifa->ifa_mask &&
387 inet_ifa_match(ifa1->ifa_address, ifa))
388 fib_del_ifaddr(ifa, ifa1);
394 *ifap = ifa1->ifa_next;
395 inet_hash_remove(ifa1);
397 /* 3. Announce address deletion */
399 /* Send message first, then call notifier.
400 At first sight, FIB update triggered by notifier
401 will refer to already deleted ifaddr, that could confuse
402 netlink listeners. It is not true: look, gated sees
403 that route deleted and if it still thinks that ifaddr
404 is valid, it will try to restore deleted routes... Grr.
405 So that, this order is correct.
407 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
408 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
411 struct in_ifaddr *next_sec = promote->ifa_next;
414 prev_prom->ifa_next = promote->ifa_next;
415 promote->ifa_next = last_prim->ifa_next;
416 last_prim->ifa_next = promote;
419 promote->ifa_flags &= ~IFA_F_SECONDARY;
420 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
421 blocking_notifier_call_chain(&inetaddr_chain,
423 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
424 if (ifa1->ifa_mask != ifa->ifa_mask ||
425 !inet_ifa_match(ifa1->ifa_address, ifa))
435 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
438 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
441 static void check_lifetime(struct work_struct *work);
443 static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
445 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
448 struct in_device *in_dev = ifa->ifa_dev;
449 struct in_ifaddr *ifa1, **ifap, **last_primary;
453 if (!ifa->ifa_local) {
458 ifa->ifa_flags &= ~IFA_F_SECONDARY;
459 last_primary = &in_dev->ifa_list;
461 /* Don't set IPv6 only flags to IPv4 addresses */
462 ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
464 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
465 ifap = &ifa1->ifa_next) {
466 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
467 ifa->ifa_scope <= ifa1->ifa_scope)
468 last_primary = &ifa1->ifa_next;
469 if (ifa1->ifa_mask == ifa->ifa_mask &&
470 inet_ifa_match(ifa1->ifa_address, ifa)) {
471 if (ifa1->ifa_local == ifa->ifa_local) {
475 if (ifa1->ifa_scope != ifa->ifa_scope) {
479 ifa->ifa_flags |= IFA_F_SECONDARY;
483 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
484 prandom_seed((__force u32) ifa->ifa_local);
488 ifa->ifa_next = *ifap;
491 inet_hash_insert(dev_net(in_dev->dev), ifa);
493 cancel_delayed_work(&check_lifetime_work);
494 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
496 /* Send message first, then call notifier.
497 Notifier will trigger FIB update, so that
498 listeners of netlink will know about new ifaddr */
499 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
500 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
505 static int inet_insert_ifa(struct in_ifaddr *ifa)
507 return __inet_insert_ifa(ifa, NULL, 0);
510 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
512 struct in_device *in_dev = __in_dev_get_rtnl(dev);
520 ipv4_devconf_setall(in_dev);
521 neigh_parms_data_state_setall(in_dev->arp_parms);
522 if (ifa->ifa_dev != in_dev) {
523 WARN_ON(ifa->ifa_dev);
525 ifa->ifa_dev = in_dev;
527 if (ipv4_is_loopback(ifa->ifa_local))
528 ifa->ifa_scope = RT_SCOPE_HOST;
529 return inet_insert_ifa(ifa);
532 /* Caller must hold RCU or RTNL :
533 * We dont take a reference on found in_device
535 struct in_device *inetdev_by_index(struct net *net, int ifindex)
537 struct net_device *dev;
538 struct in_device *in_dev = NULL;
541 dev = dev_get_by_index_rcu(net, ifindex);
543 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
547 EXPORT_SYMBOL(inetdev_by_index);
549 /* Called only from RTNL semaphored context. No locks. */
551 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
556 for_primary_ifa(in_dev) {
557 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
559 } endfor_ifa(in_dev);
563 static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
565 struct ip_mreqn mreq = {
566 .imr_multiaddr.s_addr = ifa->ifa_address,
567 .imr_ifindex = ifa->ifa_dev->dev->ifindex,
575 ret = ip_mc_join_group(sk, &mreq);
577 ret = ip_mc_leave_group(sk, &mreq);
583 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
585 struct net *net = sock_net(skb->sk);
586 struct nlattr *tb[IFA_MAX+1];
587 struct in_device *in_dev;
588 struct ifaddrmsg *ifm;
589 struct in_ifaddr *ifa, **ifap;
594 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
598 ifm = nlmsg_data(nlh);
599 in_dev = inetdev_by_index(net, ifm->ifa_index);
605 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
606 ifap = &ifa->ifa_next) {
608 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
611 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
614 if (tb[IFA_ADDRESS] &&
615 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
616 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
619 if (ipv4_is_multicast(ifa->ifa_address))
620 ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
621 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
625 err = -EADDRNOTAVAIL;
630 #define INFINITY_LIFE_TIME 0xFFFFFFFF
632 static void check_lifetime(struct work_struct *work)
634 unsigned long now, next, next_sec, next_sched;
635 struct in_ifaddr *ifa;
636 struct hlist_node *n;
640 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
642 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
643 bool change_needed = false;
646 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
649 if (ifa->ifa_flags & IFA_F_PERMANENT)
652 /* We try to batch several events at once. */
653 age = (now - ifa->ifa_tstamp +
654 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
656 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
657 age >= ifa->ifa_valid_lft) {
658 change_needed = true;
659 } else if (ifa->ifa_preferred_lft ==
660 INFINITY_LIFE_TIME) {
662 } else if (age >= ifa->ifa_preferred_lft) {
663 if (time_before(ifa->ifa_tstamp +
664 ifa->ifa_valid_lft * HZ, next))
665 next = ifa->ifa_tstamp +
666 ifa->ifa_valid_lft * HZ;
668 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
669 change_needed = true;
670 } else if (time_before(ifa->ifa_tstamp +
671 ifa->ifa_preferred_lft * HZ,
673 next = ifa->ifa_tstamp +
674 ifa->ifa_preferred_lft * HZ;
681 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
684 if (ifa->ifa_flags & IFA_F_PERMANENT)
687 /* We try to batch several events at once. */
688 age = (now - ifa->ifa_tstamp +
689 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
691 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
692 age >= ifa->ifa_valid_lft) {
693 struct in_ifaddr **ifap;
695 for (ifap = &ifa->ifa_dev->ifa_list;
696 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
698 inet_del_ifa(ifa->ifa_dev,
703 } else if (ifa->ifa_preferred_lft !=
704 INFINITY_LIFE_TIME &&
705 age >= ifa->ifa_preferred_lft &&
706 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
707 ifa->ifa_flags |= IFA_F_DEPRECATED;
708 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
714 next_sec = round_jiffies_up(next);
717 /* If rounded timeout is accurate enough, accept it. */
718 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
719 next_sched = next_sec;
722 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
723 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
724 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
726 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
730 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
733 unsigned long timeout;
735 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
737 timeout = addrconf_timeout_fixup(valid_lft, HZ);
738 if (addrconf_finite_timeout(timeout))
739 ifa->ifa_valid_lft = timeout;
741 ifa->ifa_flags |= IFA_F_PERMANENT;
743 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
744 if (addrconf_finite_timeout(timeout)) {
746 ifa->ifa_flags |= IFA_F_DEPRECATED;
747 ifa->ifa_preferred_lft = timeout;
749 ifa->ifa_tstamp = jiffies;
750 if (!ifa->ifa_cstamp)
751 ifa->ifa_cstamp = ifa->ifa_tstamp;
754 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
755 __u32 *pvalid_lft, __u32 *pprefered_lft)
757 struct nlattr *tb[IFA_MAX+1];
758 struct in_ifaddr *ifa;
759 struct ifaddrmsg *ifm;
760 struct net_device *dev;
761 struct in_device *in_dev;
764 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
768 ifm = nlmsg_data(nlh);
770 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
773 dev = __dev_get_by_index(net, ifm->ifa_index);
778 in_dev = __in_dev_get_rtnl(dev);
783 ifa = inet_alloc_ifa();
786 * A potential indev allocation can be left alive, it stays
787 * assigned to its device and is destroy with it.
791 ipv4_devconf_setall(in_dev);
792 neigh_parms_data_state_setall(in_dev->arp_parms);
795 if (!tb[IFA_ADDRESS])
796 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
798 INIT_HLIST_NODE(&ifa->hash);
799 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
800 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
801 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
803 ifa->ifa_scope = ifm->ifa_scope;
804 ifa->ifa_dev = in_dev;
806 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
807 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
809 if (tb[IFA_BROADCAST])
810 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
813 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
815 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
817 if (tb[IFA_CACHEINFO]) {
818 struct ifa_cacheinfo *ci;
820 ci = nla_data(tb[IFA_CACHEINFO]);
821 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
825 *pvalid_lft = ci->ifa_valid;
826 *pprefered_lft = ci->ifa_prefered;
837 static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
839 struct in_device *in_dev = ifa->ifa_dev;
840 struct in_ifaddr *ifa1, **ifap;
845 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
846 ifap = &ifa1->ifa_next) {
847 if (ifa1->ifa_mask == ifa->ifa_mask &&
848 inet_ifa_match(ifa1->ifa_address, ifa) &&
849 ifa1->ifa_local == ifa->ifa_local)
855 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
857 struct net *net = sock_net(skb->sk);
858 struct in_ifaddr *ifa;
859 struct in_ifaddr *ifa_existing;
860 __u32 valid_lft = INFINITY_LIFE_TIME;
861 __u32 prefered_lft = INFINITY_LIFE_TIME;
865 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
869 ifa_existing = find_matching_ifa(ifa);
871 /* It would be best to check for !NLM_F_CREATE here but
872 * userspace already relies on not having to provide this.
874 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
875 if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
876 int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
884 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
888 if (nlh->nlmsg_flags & NLM_F_EXCL ||
889 !(nlh->nlmsg_flags & NLM_F_REPLACE))
892 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
893 cancel_delayed_work(&check_lifetime_work);
894 queue_delayed_work(system_power_efficient_wq,
895 &check_lifetime_work, 0);
896 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
902 * Determine a default network mask, based on the IP address.
905 static int inet_abc_len(__be32 addr)
907 int rc = -1; /* Something else, probably a multicast. */
909 if (ipv4_is_zeronet(addr))
912 __u32 haddr = ntohl(addr);
914 if (IN_CLASSA(haddr))
916 else if (IN_CLASSB(haddr))
918 else if (IN_CLASSC(haddr))
926 int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
929 struct sockaddr_in sin_orig;
930 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
931 struct in_device *in_dev;
932 struct in_ifaddr **ifap = NULL;
933 struct in_ifaddr *ifa = NULL;
934 struct net_device *dev;
937 int tryaddrmatch = 0;
940 * Fetch the caller's info block into kernel space
943 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
945 ifr.ifr_name[IFNAMSIZ - 1] = 0;
947 /* save original address for comparison */
948 memcpy(&sin_orig, sin, sizeof(*sin));
950 colon = strchr(ifr.ifr_name, ':');
954 dev_load(net, ifr.ifr_name);
957 case SIOCGIFADDR: /* Get interface address */
958 case SIOCGIFBRDADDR: /* Get the broadcast address */
959 case SIOCGIFDSTADDR: /* Get the destination address */
960 case SIOCGIFNETMASK: /* Get the netmask for the interface */
961 /* Note that these ioctls will not sleep,
962 so that we do not impose a lock.
963 One day we will be forced to put shlock here (I mean SMP)
965 tryaddrmatch = (sin_orig.sin_family == AF_INET);
966 memset(sin, 0, sizeof(*sin));
967 sin->sin_family = AF_INET;
972 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
975 case SIOCSIFADDR: /* Set interface address (and family) */
976 case SIOCSIFBRDADDR: /* Set the broadcast address */
977 case SIOCSIFDSTADDR: /* Set the destination address */
978 case SIOCSIFNETMASK: /* Set the netmask for the interface */
980 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
983 if (sin->sin_family != AF_INET)
994 dev = __dev_get_by_name(net, ifr.ifr_name);
1001 in_dev = __in_dev_get_rtnl(dev);
1004 /* Matthias Andree */
1005 /* compare label and address (4.4BSD style) */
1006 /* note: we only do this for a limited set of ioctls
1007 and only if the original address family was AF_INET.
1008 This is checked above. */
1009 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1010 ifap = &ifa->ifa_next) {
1011 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
1012 sin_orig.sin_addr.s_addr ==
1018 /* we didn't get a match, maybe the application is
1019 4.3BSD-style and passed in junk so we fall back to
1020 comparing just the label */
1022 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1023 ifap = &ifa->ifa_next)
1024 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
1029 ret = -EADDRNOTAVAIL;
1030 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1034 case SIOCGIFADDR: /* Get interface address */
1035 sin->sin_addr.s_addr = ifa->ifa_local;
1038 case SIOCGIFBRDADDR: /* Get the broadcast address */
1039 sin->sin_addr.s_addr = ifa->ifa_broadcast;
1042 case SIOCGIFDSTADDR: /* Get the destination address */
1043 sin->sin_addr.s_addr = ifa->ifa_address;
1046 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1047 sin->sin_addr.s_addr = ifa->ifa_mask;
1052 ret = -EADDRNOTAVAIL;
1056 if (!(ifr.ifr_flags & IFF_UP))
1057 inet_del_ifa(in_dev, ifap, 1);
1060 ret = dev_change_flags(dev, ifr.ifr_flags);
1063 case SIOCSIFADDR: /* Set interface address (and family) */
1065 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1070 ifa = inet_alloc_ifa();
1073 INIT_HLIST_NODE(&ifa->hash);
1075 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
1077 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1080 if (ifa->ifa_local == sin->sin_addr.s_addr)
1082 inet_del_ifa(in_dev, ifap, 0);
1083 ifa->ifa_broadcast = 0;
1087 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1089 if (!(dev->flags & IFF_POINTOPOINT)) {
1090 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1091 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1092 if ((dev->flags & IFF_BROADCAST) &&
1093 ifa->ifa_prefixlen < 31)
1094 ifa->ifa_broadcast = ifa->ifa_address |
1097 ifa->ifa_prefixlen = 32;
1098 ifa->ifa_mask = inet_make_mask(32);
1100 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1101 ret = inet_set_ifa(dev, ifa);
1104 case SIOCSIFBRDADDR: /* Set the broadcast address */
1106 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1107 inet_del_ifa(in_dev, ifap, 0);
1108 ifa->ifa_broadcast = sin->sin_addr.s_addr;
1109 inet_insert_ifa(ifa);
1113 case SIOCSIFDSTADDR: /* Set the destination address */
1115 if (ifa->ifa_address == sin->sin_addr.s_addr)
1118 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1121 inet_del_ifa(in_dev, ifap, 0);
1122 ifa->ifa_address = sin->sin_addr.s_addr;
1123 inet_insert_ifa(ifa);
1126 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1129 * The mask we set must be legal.
1132 if (bad_mask(sin->sin_addr.s_addr, 0))
1135 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1136 __be32 old_mask = ifa->ifa_mask;
1137 inet_del_ifa(in_dev, ifap, 0);
1138 ifa->ifa_mask = sin->sin_addr.s_addr;
1139 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1141 /* See if current broadcast address matches
1142 * with current netmask, then recalculate
1143 * the broadcast address. Otherwise it's a
1144 * funny address, so don't touch it since
1145 * the user seems to know what (s)he's doing...
1147 if ((dev->flags & IFF_BROADCAST) &&
1148 (ifa->ifa_prefixlen < 31) &&
1149 (ifa->ifa_broadcast ==
1150 (ifa->ifa_local|~old_mask))) {
1151 ifa->ifa_broadcast = (ifa->ifa_local |
1152 ~sin->sin_addr.s_addr);
1154 inet_insert_ifa(ifa);
1164 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
1168 static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
1170 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1171 struct in_ifaddr *ifa;
1178 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1180 done += sizeof(ifr);
1183 if (len < (int) sizeof(ifr))
1185 memset(&ifr, 0, sizeof(struct ifreq));
1186 strcpy(ifr.ifr_name, ifa->ifa_label);
1188 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1189 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1192 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
1196 buf += sizeof(struct ifreq);
1197 len -= sizeof(struct ifreq);
1198 done += sizeof(struct ifreq);
1204 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1207 struct in_device *in_dev;
1208 struct net *net = dev_net(dev);
1212 in_dev = __in_dev_get_rcu(dev);
1216 for_primary_ifa(in_dev) {
1217 if (ifa->ifa_scope > scope)
1219 if (!dst || inet_ifa_match(dst, ifa)) {
1220 addr = ifa->ifa_local;
1224 addr = ifa->ifa_local;
1225 } endfor_ifa(in_dev);
1230 master_idx = l3mdev_master_ifindex_rcu(dev);
1232 /* For VRFs, the VRF device takes the place of the loopback device,
1233 * with addresses on it being preferred. Note in such cases the
1234 * loopback device will be among the devices that fail the master_idx
1235 * equality check in the loop below.
1238 (dev = dev_get_by_index_rcu(net, master_idx)) &&
1239 (in_dev = __in_dev_get_rcu(dev))) {
1240 for_primary_ifa(in_dev) {
1241 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1242 ifa->ifa_scope <= scope) {
1243 addr = ifa->ifa_local;
1246 } endfor_ifa(in_dev);
1249 /* Not loopback addresses on loopback should be preferred
1250 in this case. It is important that lo is the first interface
1253 for_each_netdev_rcu(net, dev) {
1254 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1257 in_dev = __in_dev_get_rcu(dev);
1261 for_primary_ifa(in_dev) {
1262 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1263 ifa->ifa_scope <= scope) {
1264 addr = ifa->ifa_local;
1267 } endfor_ifa(in_dev);
1273 EXPORT_SYMBOL(inet_select_addr);
1275 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1276 __be32 local, int scope)
1283 (local == ifa->ifa_local || !local) &&
1284 ifa->ifa_scope <= scope) {
1285 addr = ifa->ifa_local;
1290 same = (!local || inet_ifa_match(local, ifa)) &&
1291 (!dst || inet_ifa_match(dst, ifa));
1295 /* Is the selected addr into dst subnet? */
1296 if (inet_ifa_match(addr, ifa))
1298 /* No, then can we use new local src? */
1299 if (ifa->ifa_scope <= scope) {
1300 addr = ifa->ifa_local;
1303 /* search for large dst subnet for addr */
1307 } endfor_ifa(in_dev);
1309 return same ? addr : 0;
1313 * Confirm that local IP address exists using wildcards:
1314 * - net: netns to check, cannot be NULL
1315 * - in_dev: only on this interface, NULL=any interface
1316 * - dst: only in the same subnet as dst, 0=any dst
1317 * - local: address, 0=autoselect the local address
1318 * - scope: maximum allowed scope value for the local address
1320 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1321 __be32 dst, __be32 local, int scope)
1324 struct net_device *dev;
1327 return confirm_addr_indev(in_dev, dst, local, scope);
1330 for_each_netdev_rcu(net, dev) {
1331 in_dev = __in_dev_get_rcu(dev);
1333 addr = confirm_addr_indev(in_dev, dst, local, scope);
1342 EXPORT_SYMBOL(inet_confirm_addr);
1348 int register_inetaddr_notifier(struct notifier_block *nb)
1350 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1352 EXPORT_SYMBOL(register_inetaddr_notifier);
1354 int unregister_inetaddr_notifier(struct notifier_block *nb)
1356 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1358 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1360 /* Rename ifa_labels for a device name change. Make some effort to preserve
1361 * existing alias numbering and to create unique labels if possible.
1363 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1365 struct in_ifaddr *ifa;
1368 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1369 char old[IFNAMSIZ], *dot;
1371 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1372 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1375 dot = strchr(old, ':');
1377 sprintf(old, ":%d", named);
1380 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1381 strcat(ifa->ifa_label, dot);
1383 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1385 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1389 static bool inetdev_valid_mtu(unsigned int mtu)
1391 return mtu >= IPV4_MIN_MTU;
1394 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1395 struct in_device *in_dev)
1398 struct in_ifaddr *ifa;
1400 for (ifa = in_dev->ifa_list; ifa;
1401 ifa = ifa->ifa_next) {
1402 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1403 ifa->ifa_local, dev,
1404 ifa->ifa_local, NULL,
1405 dev->dev_addr, NULL);
1409 /* Called only under RTNL semaphore */
1411 static int inetdev_event(struct notifier_block *this, unsigned long event,
1414 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1415 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1420 if (event == NETDEV_REGISTER) {
1421 in_dev = inetdev_init(dev);
1423 return notifier_from_errno(PTR_ERR(in_dev));
1424 if (dev->flags & IFF_LOOPBACK) {
1425 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1426 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1428 } else if (event == NETDEV_CHANGEMTU) {
1429 /* Re-enabling IP */
1430 if (inetdev_valid_mtu(dev->mtu))
1431 in_dev = inetdev_init(dev);
1437 case NETDEV_REGISTER:
1438 pr_debug("%s: bug\n", __func__);
1439 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1442 if (!inetdev_valid_mtu(dev->mtu))
1444 if (dev->flags & IFF_LOOPBACK) {
1445 struct in_ifaddr *ifa = inet_alloc_ifa();
1448 INIT_HLIST_NODE(&ifa->hash);
1450 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1451 ifa->ifa_prefixlen = 8;
1452 ifa->ifa_mask = inet_make_mask(8);
1453 in_dev_hold(in_dev);
1454 ifa->ifa_dev = in_dev;
1455 ifa->ifa_scope = RT_SCOPE_HOST;
1456 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1457 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1458 INFINITY_LIFE_TIME);
1459 ipv4_devconf_setall(in_dev);
1460 neigh_parms_data_state_setall(in_dev->arp_parms);
1461 inet_insert_ifa(ifa);
1466 case NETDEV_CHANGEADDR:
1467 if (!IN_DEV_ARP_NOTIFY(in_dev))
1470 case NETDEV_NOTIFY_PEERS:
1471 /* Send gratuitous ARP to notify of link change */
1472 inetdev_send_gratuitous_arp(dev, in_dev);
1477 case NETDEV_PRE_TYPE_CHANGE:
1478 ip_mc_unmap(in_dev);
1480 case NETDEV_POST_TYPE_CHANGE:
1481 ip_mc_remap(in_dev);
1483 case NETDEV_CHANGEMTU:
1484 if (inetdev_valid_mtu(dev->mtu))
1486 /* disable IP when MTU is not enough */
1487 case NETDEV_UNREGISTER:
1488 inetdev_destroy(in_dev);
1490 case NETDEV_CHANGENAME:
1491 /* Do not notify about label change, this event is
1492 * not interesting to applications using netlink.
1494 inetdev_changename(dev, in_dev);
1496 devinet_sysctl_unregister(in_dev);
1497 devinet_sysctl_register(in_dev);
1504 static struct notifier_block ip_netdev_notifier = {
1505 .notifier_call = inetdev_event,
1508 static size_t inet_nlmsg_size(void)
1510 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1511 + nla_total_size(4) /* IFA_ADDRESS */
1512 + nla_total_size(4) /* IFA_LOCAL */
1513 + nla_total_size(4) /* IFA_BROADCAST */
1514 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1515 + nla_total_size(4) /* IFA_FLAGS */
1516 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1519 static inline u32 cstamp_delta(unsigned long cstamp)
1521 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1524 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1525 unsigned long tstamp, u32 preferred, u32 valid)
1527 struct ifa_cacheinfo ci;
1529 ci.cstamp = cstamp_delta(cstamp);
1530 ci.tstamp = cstamp_delta(tstamp);
1531 ci.ifa_prefered = preferred;
1532 ci.ifa_valid = valid;
1534 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1537 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1538 u32 portid, u32 seq, int event, unsigned int flags)
1540 struct ifaddrmsg *ifm;
1541 struct nlmsghdr *nlh;
1542 u32 preferred, valid;
1544 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1548 ifm = nlmsg_data(nlh);
1549 ifm->ifa_family = AF_INET;
1550 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1551 ifm->ifa_flags = ifa->ifa_flags;
1552 ifm->ifa_scope = ifa->ifa_scope;
1553 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1555 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1556 preferred = ifa->ifa_preferred_lft;
1557 valid = ifa->ifa_valid_lft;
1558 if (preferred != INFINITY_LIFE_TIME) {
1559 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1561 if (preferred > tval)
1565 if (valid != INFINITY_LIFE_TIME) {
1573 preferred = INFINITY_LIFE_TIME;
1574 valid = INFINITY_LIFE_TIME;
1576 if ((ifa->ifa_address &&
1577 nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1579 nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1580 (ifa->ifa_broadcast &&
1581 nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1582 (ifa->ifa_label[0] &&
1583 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1584 nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1585 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1587 goto nla_put_failure;
1589 nlmsg_end(skb, nlh);
1593 nlmsg_cancel(skb, nlh);
1597 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1599 struct net *net = sock_net(skb->sk);
1602 int ip_idx, s_ip_idx;
1603 struct net_device *dev;
1604 struct in_device *in_dev;
1605 struct in_ifaddr *ifa;
1606 struct hlist_head *head;
1609 s_idx = idx = cb->args[1];
1610 s_ip_idx = ip_idx = cb->args[2];
1612 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1614 head = &net->dev_index_head[h];
1616 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1618 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1621 if (h > s_h || idx > s_idx)
1623 in_dev = __in_dev_get_rcu(dev);
1627 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1628 ifa = ifa->ifa_next, ip_idx++) {
1629 if (ip_idx < s_ip_idx)
1631 if (inet_fill_ifaddr(skb, ifa,
1632 NETLINK_CB(cb->skb).portid,
1634 RTM_NEWADDR, NLM_F_MULTI) < 0) {
1638 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1649 cb->args[2] = ip_idx;
1654 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1657 struct sk_buff *skb;
1658 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1662 net = dev_net(ifa->ifa_dev->dev);
1663 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1667 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1669 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1670 WARN_ON(err == -EMSGSIZE);
1674 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1678 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1681 static size_t inet_get_link_af_size(const struct net_device *dev,
1682 u32 ext_filter_mask)
1684 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1689 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1692 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1693 u32 ext_filter_mask)
1695 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1702 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1706 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1707 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1712 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1713 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1716 static int inet_validate_link_af(const struct net_device *dev,
1717 const struct nlattr *nla)
1719 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1722 if (dev && !__in_dev_get_rtnl(dev))
1723 return -EAFNOSUPPORT;
1725 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1729 if (tb[IFLA_INET_CONF]) {
1730 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1731 int cfgid = nla_type(a);
1736 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1744 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1746 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1747 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1751 return -EAFNOSUPPORT;
1753 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1756 if (tb[IFLA_INET_CONF]) {
1757 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1758 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1764 static int inet_netconf_msgsize_devconf(int type)
1766 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1767 + nla_total_size(4); /* NETCONFA_IFINDEX */
1770 if (type == NETCONFA_ALL)
1773 if (all || type == NETCONFA_FORWARDING)
1774 size += nla_total_size(4);
1775 if (all || type == NETCONFA_RP_FILTER)
1776 size += nla_total_size(4);
1777 if (all || type == NETCONFA_MC_FORWARDING)
1778 size += nla_total_size(4);
1779 if (all || type == NETCONFA_PROXY_NEIGH)
1780 size += nla_total_size(4);
1781 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1782 size += nla_total_size(4);
1787 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1788 struct ipv4_devconf *devconf, u32 portid,
1789 u32 seq, int event, unsigned int flags,
1792 struct nlmsghdr *nlh;
1793 struct netconfmsg *ncm;
1796 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1801 if (type == NETCONFA_ALL)
1804 ncm = nlmsg_data(nlh);
1805 ncm->ncm_family = AF_INET;
1807 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1808 goto nla_put_failure;
1810 if ((all || type == NETCONFA_FORWARDING) &&
1811 nla_put_s32(skb, NETCONFA_FORWARDING,
1812 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1813 goto nla_put_failure;
1814 if ((all || type == NETCONFA_RP_FILTER) &&
1815 nla_put_s32(skb, NETCONFA_RP_FILTER,
1816 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1817 goto nla_put_failure;
1818 if ((all || type == NETCONFA_MC_FORWARDING) &&
1819 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1820 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1821 goto nla_put_failure;
1822 if ((all || type == NETCONFA_PROXY_NEIGH) &&
1823 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1824 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1825 goto nla_put_failure;
1826 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1827 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1828 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1829 goto nla_put_failure;
1831 nlmsg_end(skb, nlh);
1835 nlmsg_cancel(skb, nlh);
1839 void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1840 struct ipv4_devconf *devconf)
1842 struct sk_buff *skb;
1845 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
1849 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1850 RTM_NEWNETCONF, 0, type);
1852 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1853 WARN_ON(err == -EMSGSIZE);
1857 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
1861 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1864 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1865 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1866 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1867 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1868 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1869 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1872 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1873 struct nlmsghdr *nlh)
1875 struct net *net = sock_net(in_skb->sk);
1876 struct nlattr *tb[NETCONFA_MAX+1];
1877 struct netconfmsg *ncm;
1878 struct sk_buff *skb;
1879 struct ipv4_devconf *devconf;
1880 struct in_device *in_dev;
1881 struct net_device *dev;
1885 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1886 devconf_ipv4_policy);
1891 if (!tb[NETCONFA_IFINDEX])
1894 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1896 case NETCONFA_IFINDEX_ALL:
1897 devconf = net->ipv4.devconf_all;
1899 case NETCONFA_IFINDEX_DEFAULT:
1900 devconf = net->ipv4.devconf_dflt;
1903 dev = __dev_get_by_index(net, ifindex);
1906 in_dev = __in_dev_get_rtnl(dev);
1909 devconf = &in_dev->cnf;
1914 skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
1918 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1919 NETLINK_CB(in_skb).portid,
1920 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1923 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1924 WARN_ON(err == -EMSGSIZE);
1928 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1933 static int inet_netconf_dump_devconf(struct sk_buff *skb,
1934 struct netlink_callback *cb)
1936 struct net *net = sock_net(skb->sk);
1939 struct net_device *dev;
1940 struct in_device *in_dev;
1941 struct hlist_head *head;
1944 s_idx = idx = cb->args[1];
1946 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1948 head = &net->dev_index_head[h];
1950 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1952 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1955 in_dev = __in_dev_get_rcu(dev);
1959 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1961 NETLINK_CB(cb->skb).portid,
1965 NETCONFA_ALL) < 0) {
1969 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1975 if (h == NETDEV_HASHENTRIES) {
1976 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1977 net->ipv4.devconf_all,
1978 NETLINK_CB(cb->skb).portid,
1980 RTM_NEWNETCONF, NLM_F_MULTI,
1986 if (h == NETDEV_HASHENTRIES + 1) {
1987 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1988 net->ipv4.devconf_dflt,
1989 NETLINK_CB(cb->skb).portid,
1991 RTM_NEWNETCONF, NLM_F_MULTI,
2004 #ifdef CONFIG_SYSCTL
2006 static void devinet_copy_dflt_conf(struct net *net, int i)
2008 struct net_device *dev;
2011 for_each_netdev_rcu(net, dev) {
2012 struct in_device *in_dev;
2014 in_dev = __in_dev_get_rcu(dev);
2015 if (in_dev && !test_bit(i, in_dev->cnf.state))
2016 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2021 /* called with RTNL locked */
2022 static void inet_forward_change(struct net *net)
2024 struct net_device *dev;
2025 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2027 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2028 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2029 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2030 NETCONFA_IFINDEX_ALL,
2031 net->ipv4.devconf_all);
2032 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2033 NETCONFA_IFINDEX_DEFAULT,
2034 net->ipv4.devconf_dflt);
2036 for_each_netdev(net, dev) {
2037 struct in_device *in_dev;
2040 dev_disable_lro(dev);
2042 in_dev = __in_dev_get_rtnl(dev);
2044 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2045 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2046 dev->ifindex, &in_dev->cnf);
2051 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2053 if (cnf == net->ipv4.devconf_dflt)
2054 return NETCONFA_IFINDEX_DEFAULT;
2055 else if (cnf == net->ipv4.devconf_all)
2056 return NETCONFA_IFINDEX_ALL;
2058 struct in_device *idev
2059 = container_of(cnf, struct in_device, cnf);
2060 return idev->dev->ifindex;
2064 static int devinet_conf_proc(struct ctl_table *ctl, int write,
2065 void __user *buffer,
2066 size_t *lenp, loff_t *ppos)
2068 int old_value = *(int *)ctl->data;
2069 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2070 int new_value = *(int *)ctl->data;
2073 struct ipv4_devconf *cnf = ctl->extra1;
2074 struct net *net = ctl->extra2;
2075 int i = (int *)ctl->data - cnf->data;
2078 set_bit(i, cnf->state);
2080 if (cnf == net->ipv4.devconf_dflt)
2081 devinet_copy_dflt_conf(net, i);
2082 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2083 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2084 if ((new_value == 0) && (old_value != 0))
2085 rt_cache_flush(net);
2087 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2088 new_value != old_value) {
2089 ifindex = devinet_conf_ifindex(net, cnf);
2090 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
2093 if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2094 new_value != old_value) {
2095 ifindex = devinet_conf_ifindex(net, cnf);
2096 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2099 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2100 new_value != old_value) {
2101 ifindex = devinet_conf_ifindex(net, cnf);
2102 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2110 static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2111 void __user *buffer,
2112 size_t *lenp, loff_t *ppos)
2114 int *valp = ctl->data;
2117 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2119 if (write && *valp != val) {
2120 struct net *net = ctl->extra2;
2122 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2123 if (!rtnl_trylock()) {
2124 /* Restore the original values before restarting */
2127 return restart_syscall();
2129 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2130 inet_forward_change(net);
2132 struct ipv4_devconf *cnf = ctl->extra1;
2133 struct in_device *idev =
2134 container_of(cnf, struct in_device, cnf);
2136 dev_disable_lro(idev->dev);
2137 inet_netconf_notify_devconf(net,
2138 NETCONFA_FORWARDING,
2143 rt_cache_flush(net);
2145 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2146 NETCONFA_IFINDEX_DEFAULT,
2147 net->ipv4.devconf_dflt);
2153 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2154 void __user *buffer,
2155 size_t *lenp, loff_t *ppos)
2157 int *valp = ctl->data;
2159 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2160 struct net *net = ctl->extra2;
2162 if (write && *valp != val)
2163 rt_cache_flush(net);
2168 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2171 .data = ipv4_devconf.data + \
2172 IPV4_DEVCONF_ ## attr - 1, \
2173 .maxlen = sizeof(int), \
2175 .proc_handler = proc, \
2176 .extra1 = &ipv4_devconf, \
2179 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2180 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2182 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2183 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2185 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2186 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2188 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2189 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2191 static struct devinet_sysctl_table {
2192 struct ctl_table_header *sysctl_header;
2193 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2194 } devinet_sysctl = {
2196 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2197 devinet_sysctl_forward),
2198 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2200 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2201 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2202 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2203 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2204 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2205 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2206 "accept_source_route"),
2207 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2208 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2209 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2210 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2211 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2212 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2213 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2214 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2215 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2216 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2217 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2218 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2219 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2220 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2221 "force_igmp_version"),
2222 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2223 "igmpv2_unsolicited_report_interval"),
2224 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2225 "igmpv3_unsolicited_report_interval"),
2226 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2227 "ignore_routes_with_linkdown"),
2228 DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2229 "drop_gratuitous_arp"),
2231 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2232 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2233 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2234 "promote_secondaries"),
2235 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2237 DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2238 "drop_unicast_in_l2_multicast"),
2242 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2243 int ifindex, struct ipv4_devconf *p)
2246 struct devinet_sysctl_table *t;
2247 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2249 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2253 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2254 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2255 t->devinet_vars[i].extra1 = p;
2256 t->devinet_vars[i].extra2 = net;
2259 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2261 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2262 if (!t->sysctl_header)
2267 inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
2276 static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2278 struct devinet_sysctl_table *t = cnf->sysctl;
2284 unregister_net_sysctl_table(t->sysctl_header);
2288 static int devinet_sysctl_register(struct in_device *idev)
2292 if (!sysctl_dev_name_is_allowed(idev->dev->name))
2295 err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2298 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2299 idev->dev->ifindex, &idev->cnf);
2301 neigh_sysctl_unregister(idev->arp_parms);
2305 static void devinet_sysctl_unregister(struct in_device *idev)
2307 __devinet_sysctl_unregister(&idev->cnf);
2308 neigh_sysctl_unregister(idev->arp_parms);
2311 static struct ctl_table ctl_forward_entry[] = {
2313 .procname = "ip_forward",
2314 .data = &ipv4_devconf.data[
2315 IPV4_DEVCONF_FORWARDING - 1],
2316 .maxlen = sizeof(int),
2318 .proc_handler = devinet_sysctl_forward,
2319 .extra1 = &ipv4_devconf,
2320 .extra2 = &init_net,
2326 static __net_init int devinet_init_net(struct net *net)
2329 struct ipv4_devconf *all, *dflt;
2330 #ifdef CONFIG_SYSCTL
2331 struct ctl_table *tbl = ctl_forward_entry;
2332 struct ctl_table_header *forw_hdr;
2336 all = &ipv4_devconf;
2337 dflt = &ipv4_devconf_dflt;
2339 if (!net_eq(net, &init_net)) {
2340 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2344 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2346 goto err_alloc_dflt;
2348 #ifdef CONFIG_SYSCTL
2349 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2353 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2354 tbl[0].extra1 = all;
2355 tbl[0].extra2 = net;
2359 #ifdef CONFIG_SYSCTL
2360 err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2364 err = __devinet_sysctl_register(net, "default",
2365 NETCONFA_IFINDEX_DEFAULT, dflt);
2370 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2373 net->ipv4.forw_hdr = forw_hdr;
2376 net->ipv4.devconf_all = all;
2377 net->ipv4.devconf_dflt = dflt;
2380 #ifdef CONFIG_SYSCTL
2382 __devinet_sysctl_unregister(dflt);
2384 __devinet_sysctl_unregister(all);
2386 if (tbl != ctl_forward_entry)
2390 if (dflt != &ipv4_devconf_dflt)
2393 if (all != &ipv4_devconf)
2399 static __net_exit void devinet_exit_net(struct net *net)
2401 #ifdef CONFIG_SYSCTL
2402 struct ctl_table *tbl;
2404 tbl = net->ipv4.forw_hdr->ctl_table_arg;
2405 unregister_net_sysctl_table(net->ipv4.forw_hdr);
2406 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
2407 __devinet_sysctl_unregister(net->ipv4.devconf_all);
2410 kfree(net->ipv4.devconf_dflt);
2411 kfree(net->ipv4.devconf_all);
2414 static __net_initdata struct pernet_operations devinet_ops = {
2415 .init = devinet_init_net,
2416 .exit = devinet_exit_net,
2419 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2421 .fill_link_af = inet_fill_link_af,
2422 .get_link_af_size = inet_get_link_af_size,
2423 .validate_link_af = inet_validate_link_af,
2424 .set_link_af = inet_set_link_af,
2427 void __init devinet_init(void)
2431 for (i = 0; i < IN4_ADDR_HSIZE; i++)
2432 INIT_HLIST_HEAD(&inet_addr_lst[i]);
2434 register_pernet_subsys(&devinet_ops);
2436 register_gifconf(PF_INET, inet_gifconf);
2437 register_netdevice_notifier(&ip_netdev_notifier);
2439 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2441 rtnl_af_register(&inet_af_ops);
2443 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
2444 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2445 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2446 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2447 inet_netconf_dump_devconf, NULL);