2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
62 #include <net/route.h>
63 #include <net/ip_fib.h>
64 #include <net/rtnetlink.h>
65 #include <net/net_namespace.h>
66 #include <net/addrconf.h>
68 #include "fib_lookup.h"
70 #define IPV6ONLY_FLAGS \
71 (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
72 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
73 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
75 static struct ipv4_devconf ipv4_devconf = {
77 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
78 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
79 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
80 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
81 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
82 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
86 static struct ipv4_devconf ipv4_devconf_dflt = {
88 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
89 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
90 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
91 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
92 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
93 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
94 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
98 #define IPV4_DEVCONF_DFLT(net, attr) \
99 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
101 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
102 [IFA_LOCAL] = { .type = NLA_U32 },
103 [IFA_ADDRESS] = { .type = NLA_U32 },
104 [IFA_BROADCAST] = { .type = NLA_U32 },
105 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
106 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
107 [IFA_FLAGS] = { .type = NLA_U32 },
110 #define IN4_ADDR_HSIZE_SHIFT 8
111 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
113 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
115 static u32 inet_addr_hash(const struct net *net, __be32 addr)
117 u32 val = (__force u32) addr ^ net_hash_mix(net);
119 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
122 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
124 u32 hash = inet_addr_hash(net, ifa->ifa_local);
127 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
130 static void inet_hash_remove(struct in_ifaddr *ifa)
133 hlist_del_init_rcu(&ifa->hash);
137 * __ip_dev_find - find the first device with a given source address.
138 * @net: the net namespace
139 * @addr: the source address
140 * @devref: if true, take a reference on the found device
142 * If a caller uses devref=false, it should be protected by RCU, or RTNL
144 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
146 u32 hash = inet_addr_hash(net, addr);
147 struct net_device *result = NULL;
148 struct in_ifaddr *ifa;
151 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
152 if (ifa->ifa_local == addr) {
153 struct net_device *dev = ifa->ifa_dev->dev;
155 if (!net_eq(dev_net(dev), net))
162 struct flowi4 fl4 = { .daddr = addr };
163 struct fib_result res = { 0 };
164 struct fib_table *local;
166 /* Fallback to FIB local table so that communication
167 * over loopback subnets work.
169 local = fib_get_table(net, RT_TABLE_LOCAL);
171 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
172 res.type == RTN_LOCAL)
173 result = FIB_RES_DEV(res);
175 if (result && devref)
180 EXPORT_SYMBOL(__ip_dev_find);
182 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
184 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
185 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
188 static int devinet_sysctl_register(struct in_device *idev);
189 static void devinet_sysctl_unregister(struct in_device *idev);
191 static int devinet_sysctl_register(struct in_device *idev)
195 static void devinet_sysctl_unregister(struct in_device *idev)
200 /* Locks all the inet devices. */
202 static struct in_ifaddr *inet_alloc_ifa(void)
204 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
207 static void inet_rcu_free_ifa(struct rcu_head *head)
209 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
211 in_dev_put(ifa->ifa_dev);
215 static void inet_free_ifa(struct in_ifaddr *ifa)
217 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
220 void in_dev_finish_destroy(struct in_device *idev)
222 struct net_device *dev = idev->dev;
224 WARN_ON(idev->ifa_list);
225 WARN_ON(idev->mc_list);
226 kfree(rcu_dereference_protected(idev->mc_hash, 1));
227 #ifdef NET_REFCNT_DEBUG
228 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
232 pr_err("Freeing alive in_device %p\n", idev);
236 EXPORT_SYMBOL(in_dev_finish_destroy);
238 static struct in_device *inetdev_init(struct net_device *dev)
240 struct in_device *in_dev;
245 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
248 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
249 sizeof(in_dev->cnf));
250 in_dev->cnf.sysctl = NULL;
252 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
253 if (!in_dev->arp_parms)
255 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
256 dev_disable_lro(dev);
257 /* Reference in_dev->dev */
259 /* Account for reference dev->ip_ptr (below) */
262 err = devinet_sysctl_register(in_dev);
269 ip_mc_init_dev(in_dev);
270 if (dev->flags & IFF_UP)
273 /* we can receive as soon as ip_ptr is set -- do this last */
274 rcu_assign_pointer(dev->ip_ptr, in_dev);
276 return in_dev ?: ERR_PTR(err);
283 static void in_dev_rcu_put(struct rcu_head *head)
285 struct in_device *idev = container_of(head, struct in_device, rcu_head);
289 static void inetdev_destroy(struct in_device *in_dev)
291 struct in_ifaddr *ifa;
292 struct net_device *dev;
300 ip_mc_destroy_dev(in_dev);
302 while ((ifa = in_dev->ifa_list) != NULL) {
303 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
307 RCU_INIT_POINTER(dev->ip_ptr, NULL);
309 devinet_sysctl_unregister(in_dev);
310 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
313 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
316 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
319 for_primary_ifa(in_dev) {
320 if (inet_ifa_match(a, ifa)) {
321 if (!b || inet_ifa_match(b, ifa)) {
326 } endfor_ifa(in_dev);
331 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
332 int destroy, struct nlmsghdr *nlh, u32 portid)
334 struct in_ifaddr *promote = NULL;
335 struct in_ifaddr *ifa, *ifa1 = *ifap;
336 struct in_ifaddr *last_prim = in_dev->ifa_list;
337 struct in_ifaddr *prev_prom = NULL;
338 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
345 /* 1. Deleting primary ifaddr forces deletion all secondaries
346 * unless alias promotion is set
349 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
350 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
352 while ((ifa = *ifap1) != NULL) {
353 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
354 ifa1->ifa_scope <= ifa->ifa_scope)
357 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
358 ifa1->ifa_mask != ifa->ifa_mask ||
359 !inet_ifa_match(ifa1->ifa_address, ifa)) {
360 ifap1 = &ifa->ifa_next;
366 inet_hash_remove(ifa);
367 *ifap1 = ifa->ifa_next;
369 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
370 blocking_notifier_call_chain(&inetaddr_chain,
380 /* On promotion all secondaries from subnet are changing
381 * the primary IP, we must remove all their routes silently
382 * and later to add them back with new prefsrc. Do this
383 * while all addresses are on the device list.
385 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
386 if (ifa1->ifa_mask == ifa->ifa_mask &&
387 inet_ifa_match(ifa1->ifa_address, ifa))
388 fib_del_ifaddr(ifa, ifa1);
394 *ifap = ifa1->ifa_next;
395 inet_hash_remove(ifa1);
397 /* 3. Announce address deletion */
399 /* Send message first, then call notifier.
400 At first sight, FIB update triggered by notifier
401 will refer to already deleted ifaddr, that could confuse
402 netlink listeners. It is not true: look, gated sees
403 that route deleted and if it still thinks that ifaddr
404 is valid, it will try to restore deleted routes... Grr.
405 So that, this order is correct.
407 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
408 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
411 struct in_ifaddr *next_sec = promote->ifa_next;
414 prev_prom->ifa_next = promote->ifa_next;
415 promote->ifa_next = last_prim->ifa_next;
416 last_prim->ifa_next = promote;
419 promote->ifa_flags &= ~IFA_F_SECONDARY;
420 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
421 blocking_notifier_call_chain(&inetaddr_chain,
423 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
424 if (ifa1->ifa_mask != ifa->ifa_mask ||
425 !inet_ifa_match(ifa1->ifa_address, ifa))
435 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
438 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
441 static void check_lifetime(struct work_struct *work);
443 static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
445 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
448 struct in_device *in_dev = ifa->ifa_dev;
449 struct in_ifaddr *ifa1, **ifap, **last_primary;
453 if (!ifa->ifa_local) {
458 ifa->ifa_flags &= ~IFA_F_SECONDARY;
459 last_primary = &in_dev->ifa_list;
461 /* Don't set IPv6 only flags to IPv4 addresses */
462 ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
464 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
465 ifap = &ifa1->ifa_next) {
466 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
467 ifa->ifa_scope <= ifa1->ifa_scope)
468 last_primary = &ifa1->ifa_next;
469 if (ifa1->ifa_mask == ifa->ifa_mask &&
470 inet_ifa_match(ifa1->ifa_address, ifa)) {
471 if (ifa1->ifa_local == ifa->ifa_local) {
475 if (ifa1->ifa_scope != ifa->ifa_scope) {
479 ifa->ifa_flags |= IFA_F_SECONDARY;
483 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
484 prandom_seed((__force u32) ifa->ifa_local);
488 ifa->ifa_next = *ifap;
491 inet_hash_insert(dev_net(in_dev->dev), ifa);
493 cancel_delayed_work(&check_lifetime_work);
494 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
496 /* Send message first, then call notifier.
497 Notifier will trigger FIB update, so that
498 listeners of netlink will know about new ifaddr */
499 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
500 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
505 static int inet_insert_ifa(struct in_ifaddr *ifa)
507 return __inet_insert_ifa(ifa, NULL, 0);
510 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
512 struct in_device *in_dev = __in_dev_get_rtnl(dev);
520 ipv4_devconf_setall(in_dev);
521 neigh_parms_data_state_setall(in_dev->arp_parms);
522 if (ifa->ifa_dev != in_dev) {
523 WARN_ON(ifa->ifa_dev);
525 ifa->ifa_dev = in_dev;
527 if (ipv4_is_loopback(ifa->ifa_local))
528 ifa->ifa_scope = RT_SCOPE_HOST;
529 return inet_insert_ifa(ifa);
532 /* Caller must hold RCU or RTNL :
533 * We dont take a reference on found in_device
535 struct in_device *inetdev_by_index(struct net *net, int ifindex)
537 struct net_device *dev;
538 struct in_device *in_dev = NULL;
541 dev = dev_get_by_index_rcu(net, ifindex);
543 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
547 EXPORT_SYMBOL(inetdev_by_index);
549 /* Called only from RTNL semaphored context. No locks. */
551 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
556 for_primary_ifa(in_dev) {
557 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
559 } endfor_ifa(in_dev);
563 static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
565 struct ip_mreqn mreq = {
566 .imr_multiaddr.s_addr = ifa->ifa_address,
567 .imr_ifindex = ifa->ifa_dev->dev->ifindex,
575 ret = ip_mc_join_group(sk, &mreq);
577 ret = ip_mc_leave_group(sk, &mreq);
583 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
585 struct net *net = sock_net(skb->sk);
586 struct nlattr *tb[IFA_MAX+1];
587 struct in_device *in_dev;
588 struct ifaddrmsg *ifm;
589 struct in_ifaddr *ifa, **ifap;
594 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
598 ifm = nlmsg_data(nlh);
599 in_dev = inetdev_by_index(net, ifm->ifa_index);
605 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
606 ifap = &ifa->ifa_next) {
608 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
611 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
614 if (tb[IFA_ADDRESS] &&
615 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
616 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
619 if (ipv4_is_multicast(ifa->ifa_address))
620 ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
621 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
625 err = -EADDRNOTAVAIL;
630 #define INFINITY_LIFE_TIME 0xFFFFFFFF
632 static void check_lifetime(struct work_struct *work)
634 unsigned long now, next, next_sec, next_sched;
635 struct in_ifaddr *ifa;
636 struct hlist_node *n;
640 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
642 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
643 bool change_needed = false;
646 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
649 if (ifa->ifa_flags & IFA_F_PERMANENT)
652 /* We try to batch several events at once. */
653 age = (now - ifa->ifa_tstamp +
654 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
656 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
657 age >= ifa->ifa_valid_lft) {
658 change_needed = true;
659 } else if (ifa->ifa_preferred_lft ==
660 INFINITY_LIFE_TIME) {
662 } else if (age >= ifa->ifa_preferred_lft) {
663 if (time_before(ifa->ifa_tstamp +
664 ifa->ifa_valid_lft * HZ, next))
665 next = ifa->ifa_tstamp +
666 ifa->ifa_valid_lft * HZ;
668 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
669 change_needed = true;
670 } else if (time_before(ifa->ifa_tstamp +
671 ifa->ifa_preferred_lft * HZ,
673 next = ifa->ifa_tstamp +
674 ifa->ifa_preferred_lft * HZ;
681 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
684 if (ifa->ifa_flags & IFA_F_PERMANENT)
687 /* We try to batch several events at once. */
688 age = (now - ifa->ifa_tstamp +
689 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
691 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
692 age >= ifa->ifa_valid_lft) {
693 struct in_ifaddr **ifap;
695 for (ifap = &ifa->ifa_dev->ifa_list;
696 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
698 inet_del_ifa(ifa->ifa_dev,
703 } else if (ifa->ifa_preferred_lft !=
704 INFINITY_LIFE_TIME &&
705 age >= ifa->ifa_preferred_lft &&
706 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
707 ifa->ifa_flags |= IFA_F_DEPRECATED;
708 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
714 next_sec = round_jiffies_up(next);
717 /* If rounded timeout is accurate enough, accept it. */
718 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
719 next_sched = next_sec;
722 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
723 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
724 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
726 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
730 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
733 unsigned long timeout;
735 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
737 timeout = addrconf_timeout_fixup(valid_lft, HZ);
738 if (addrconf_finite_timeout(timeout))
739 ifa->ifa_valid_lft = timeout;
741 ifa->ifa_flags |= IFA_F_PERMANENT;
743 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
744 if (addrconf_finite_timeout(timeout)) {
746 ifa->ifa_flags |= IFA_F_DEPRECATED;
747 ifa->ifa_preferred_lft = timeout;
749 ifa->ifa_tstamp = jiffies;
750 if (!ifa->ifa_cstamp)
751 ifa->ifa_cstamp = ifa->ifa_tstamp;
754 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
755 __u32 *pvalid_lft, __u32 *pprefered_lft)
757 struct nlattr *tb[IFA_MAX+1];
758 struct in_ifaddr *ifa;
759 struct ifaddrmsg *ifm;
760 struct net_device *dev;
761 struct in_device *in_dev;
764 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
768 ifm = nlmsg_data(nlh);
770 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
773 dev = __dev_get_by_index(net, ifm->ifa_index);
778 in_dev = __in_dev_get_rtnl(dev);
783 ifa = inet_alloc_ifa();
786 * A potential indev allocation can be left alive, it stays
787 * assigned to its device and is destroy with it.
791 ipv4_devconf_setall(in_dev);
792 neigh_parms_data_state_setall(in_dev->arp_parms);
795 if (!tb[IFA_ADDRESS])
796 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
798 INIT_HLIST_NODE(&ifa->hash);
799 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
800 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
801 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
803 ifa->ifa_scope = ifm->ifa_scope;
804 ifa->ifa_dev = in_dev;
806 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
807 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
809 if (tb[IFA_BROADCAST])
810 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
813 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
815 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
817 if (tb[IFA_CACHEINFO]) {
818 struct ifa_cacheinfo *ci;
820 ci = nla_data(tb[IFA_CACHEINFO]);
821 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
825 *pvalid_lft = ci->ifa_valid;
826 *pprefered_lft = ci->ifa_prefered;
837 static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
839 struct in_device *in_dev = ifa->ifa_dev;
840 struct in_ifaddr *ifa1, **ifap;
845 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
846 ifap = &ifa1->ifa_next) {
847 if (ifa1->ifa_mask == ifa->ifa_mask &&
848 inet_ifa_match(ifa1->ifa_address, ifa) &&
849 ifa1->ifa_local == ifa->ifa_local)
855 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
857 struct net *net = sock_net(skb->sk);
858 struct in_ifaddr *ifa;
859 struct in_ifaddr *ifa_existing;
860 __u32 valid_lft = INFINITY_LIFE_TIME;
861 __u32 prefered_lft = INFINITY_LIFE_TIME;
865 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
869 ifa_existing = find_matching_ifa(ifa);
871 /* It would be best to check for !NLM_F_CREATE here but
872 * userspace already relies on not having to provide this.
874 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
875 if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
876 int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
884 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
888 if (nlh->nlmsg_flags & NLM_F_EXCL ||
889 !(nlh->nlmsg_flags & NLM_F_REPLACE))
892 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
893 cancel_delayed_work(&check_lifetime_work);
894 queue_delayed_work(system_power_efficient_wq,
895 &check_lifetime_work, 0);
896 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
902 * Determine a default network mask, based on the IP address.
905 static int inet_abc_len(__be32 addr)
907 int rc = -1; /* Something else, probably a multicast. */
909 if (ipv4_is_zeronet(addr))
912 __u32 haddr = ntohl(addr);
914 if (IN_CLASSA(haddr))
916 else if (IN_CLASSB(haddr))
918 else if (IN_CLASSC(haddr))
926 int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
929 struct sockaddr_in sin_orig;
930 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
931 struct in_device *in_dev;
932 struct in_ifaddr **ifap = NULL;
933 struct in_ifaddr *ifa = NULL;
934 struct net_device *dev;
937 int tryaddrmatch = 0;
940 * Fetch the caller's info block into kernel space
943 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
945 ifr.ifr_name[IFNAMSIZ - 1] = 0;
947 /* save original address for comparison */
948 memcpy(&sin_orig, sin, sizeof(*sin));
950 colon = strchr(ifr.ifr_name, ':');
954 dev_load(net, ifr.ifr_name);
957 case SIOCGIFADDR: /* Get interface address */
958 case SIOCGIFBRDADDR: /* Get the broadcast address */
959 case SIOCGIFDSTADDR: /* Get the destination address */
960 case SIOCGIFNETMASK: /* Get the netmask for the interface */
961 /* Note that these ioctls will not sleep,
962 so that we do not impose a lock.
963 One day we will be forced to put shlock here (I mean SMP)
965 tryaddrmatch = (sin_orig.sin_family == AF_INET);
966 memset(sin, 0, sizeof(*sin));
967 sin->sin_family = AF_INET;
972 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
975 case SIOCSIFADDR: /* Set interface address (and family) */
976 case SIOCSIFBRDADDR: /* Set the broadcast address */
977 case SIOCSIFDSTADDR: /* Set the destination address */
978 case SIOCSIFNETMASK: /* Set the netmask for the interface */
980 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
983 if (sin->sin_family != AF_INET)
994 dev = __dev_get_by_name(net, ifr.ifr_name);
1001 in_dev = __in_dev_get_rtnl(dev);
1004 /* Matthias Andree */
1005 /* compare label and address (4.4BSD style) */
1006 /* note: we only do this for a limited set of ioctls
1007 and only if the original address family was AF_INET.
1008 This is checked above. */
1009 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1010 ifap = &ifa->ifa_next) {
1011 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
1012 sin_orig.sin_addr.s_addr ==
1018 /* we didn't get a match, maybe the application is
1019 4.3BSD-style and passed in junk so we fall back to
1020 comparing just the label */
1022 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1023 ifap = &ifa->ifa_next)
1024 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
1029 ret = -EADDRNOTAVAIL;
1030 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1034 case SIOCGIFADDR: /* Get interface address */
1035 sin->sin_addr.s_addr = ifa->ifa_local;
1038 case SIOCGIFBRDADDR: /* Get the broadcast address */
1039 sin->sin_addr.s_addr = ifa->ifa_broadcast;
1042 case SIOCGIFDSTADDR: /* Get the destination address */
1043 sin->sin_addr.s_addr = ifa->ifa_address;
1046 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1047 sin->sin_addr.s_addr = ifa->ifa_mask;
1052 ret = -EADDRNOTAVAIL;
1056 if (!(ifr.ifr_flags & IFF_UP))
1057 inet_del_ifa(in_dev, ifap, 1);
1060 ret = dev_change_flags(dev, ifr.ifr_flags);
1063 case SIOCSIFADDR: /* Set interface address (and family) */
1065 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1070 ifa = inet_alloc_ifa();
1073 INIT_HLIST_NODE(&ifa->hash);
1075 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
1077 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1080 if (ifa->ifa_local == sin->sin_addr.s_addr)
1082 inet_del_ifa(in_dev, ifap, 0);
1083 ifa->ifa_broadcast = 0;
1087 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1089 if (!(dev->flags & IFF_POINTOPOINT)) {
1090 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1091 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1092 if ((dev->flags & IFF_BROADCAST) &&
1093 ifa->ifa_prefixlen < 31)
1094 ifa->ifa_broadcast = ifa->ifa_address |
1097 ifa->ifa_prefixlen = 32;
1098 ifa->ifa_mask = inet_make_mask(32);
1100 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1101 ret = inet_set_ifa(dev, ifa);
1104 case SIOCSIFBRDADDR: /* Set the broadcast address */
1106 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1107 inet_del_ifa(in_dev, ifap, 0);
1108 ifa->ifa_broadcast = sin->sin_addr.s_addr;
1109 inet_insert_ifa(ifa);
1113 case SIOCSIFDSTADDR: /* Set the destination address */
1115 if (ifa->ifa_address == sin->sin_addr.s_addr)
1118 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1121 inet_del_ifa(in_dev, ifap, 0);
1122 ifa->ifa_address = sin->sin_addr.s_addr;
1123 inet_insert_ifa(ifa);
1126 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1129 * The mask we set must be legal.
1132 if (bad_mask(sin->sin_addr.s_addr, 0))
1135 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1136 __be32 old_mask = ifa->ifa_mask;
1137 inet_del_ifa(in_dev, ifap, 0);
1138 ifa->ifa_mask = sin->sin_addr.s_addr;
1139 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1141 /* See if current broadcast address matches
1142 * with current netmask, then recalculate
1143 * the broadcast address. Otherwise it's a
1144 * funny address, so don't touch it since
1145 * the user seems to know what (s)he's doing...
1147 if ((dev->flags & IFF_BROADCAST) &&
1148 (ifa->ifa_prefixlen < 31) &&
1149 (ifa->ifa_broadcast ==
1150 (ifa->ifa_local|~old_mask))) {
1151 ifa->ifa_broadcast = (ifa->ifa_local |
1152 ~sin->sin_addr.s_addr);
1154 inet_insert_ifa(ifa);
1164 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
1168 static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
1170 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1171 struct in_ifaddr *ifa;
1178 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1180 done += sizeof(ifr);
1183 if (len < (int) sizeof(ifr))
1185 memset(&ifr, 0, sizeof(struct ifreq));
1186 strcpy(ifr.ifr_name, ifa->ifa_label);
1188 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1189 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1192 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
1196 buf += sizeof(struct ifreq);
1197 len -= sizeof(struct ifreq);
1198 done += sizeof(struct ifreq);
1204 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1207 struct in_device *in_dev;
1208 struct net *net = dev_net(dev);
1211 in_dev = __in_dev_get_rcu(dev);
1215 for_primary_ifa(in_dev) {
1216 if (ifa->ifa_scope > scope)
1218 if (!dst || inet_ifa_match(dst, ifa)) {
1219 addr = ifa->ifa_local;
1223 addr = ifa->ifa_local;
1224 } endfor_ifa(in_dev);
1230 /* Not loopback addresses on loopback should be preferred
1231 in this case. It is important that lo is the first interface
1234 for_each_netdev_rcu(net, dev) {
1235 in_dev = __in_dev_get_rcu(dev);
1239 for_primary_ifa(in_dev) {
1240 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1241 ifa->ifa_scope <= scope) {
1242 addr = ifa->ifa_local;
1245 } endfor_ifa(in_dev);
1251 EXPORT_SYMBOL(inet_select_addr);
1253 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1254 __be32 local, int scope)
1261 (local == ifa->ifa_local || !local) &&
1262 ifa->ifa_scope <= scope) {
1263 addr = ifa->ifa_local;
1268 same = (!local || inet_ifa_match(local, ifa)) &&
1269 (!dst || inet_ifa_match(dst, ifa));
1273 /* Is the selected addr into dst subnet? */
1274 if (inet_ifa_match(addr, ifa))
1276 /* No, then can we use new local src? */
1277 if (ifa->ifa_scope <= scope) {
1278 addr = ifa->ifa_local;
1281 /* search for large dst subnet for addr */
1285 } endfor_ifa(in_dev);
1287 return same ? addr : 0;
1291 * Confirm that local IP address exists using wildcards:
1292 * - net: netns to check, cannot be NULL
1293 * - in_dev: only on this interface, NULL=any interface
1294 * - dst: only in the same subnet as dst, 0=any dst
1295 * - local: address, 0=autoselect the local address
1296 * - scope: maximum allowed scope value for the local address
1298 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1299 __be32 dst, __be32 local, int scope)
1302 struct net_device *dev;
1305 return confirm_addr_indev(in_dev, dst, local, scope);
1308 for_each_netdev_rcu(net, dev) {
1309 in_dev = __in_dev_get_rcu(dev);
1311 addr = confirm_addr_indev(in_dev, dst, local, scope);
1320 EXPORT_SYMBOL(inet_confirm_addr);
1326 int register_inetaddr_notifier(struct notifier_block *nb)
1328 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1330 EXPORT_SYMBOL(register_inetaddr_notifier);
1332 int unregister_inetaddr_notifier(struct notifier_block *nb)
1334 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1336 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1338 /* Rename ifa_labels for a device name change. Make some effort to preserve
1339 * existing alias numbering and to create unique labels if possible.
1341 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1343 struct in_ifaddr *ifa;
1346 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1347 char old[IFNAMSIZ], *dot;
1349 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1350 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1353 dot = strchr(old, ':');
1355 sprintf(old, ":%d", named);
1358 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1359 strcat(ifa->ifa_label, dot);
1361 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1363 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1367 static bool inetdev_valid_mtu(unsigned int mtu)
1369 return mtu >= IPV4_MIN_MTU;
1372 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1373 struct in_device *in_dev)
1376 struct in_ifaddr *ifa;
1378 for (ifa = in_dev->ifa_list; ifa;
1379 ifa = ifa->ifa_next) {
1380 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1381 ifa->ifa_local, dev,
1382 ifa->ifa_local, NULL,
1383 dev->dev_addr, NULL);
1387 /* Called only under RTNL semaphore */
1389 static int inetdev_event(struct notifier_block *this, unsigned long event,
1392 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1393 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1398 if (event == NETDEV_REGISTER) {
1399 in_dev = inetdev_init(dev);
1401 return notifier_from_errno(PTR_ERR(in_dev));
1402 if (dev->flags & IFF_LOOPBACK) {
1403 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1404 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1406 } else if (event == NETDEV_CHANGEMTU) {
1407 /* Re-enabling IP */
1408 if (inetdev_valid_mtu(dev->mtu))
1409 in_dev = inetdev_init(dev);
1415 case NETDEV_REGISTER:
1416 pr_debug("%s: bug\n", __func__);
1417 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1420 if (!inetdev_valid_mtu(dev->mtu))
1422 if (dev->flags & IFF_LOOPBACK) {
1423 struct in_ifaddr *ifa = inet_alloc_ifa();
1426 INIT_HLIST_NODE(&ifa->hash);
1428 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1429 ifa->ifa_prefixlen = 8;
1430 ifa->ifa_mask = inet_make_mask(8);
1431 in_dev_hold(in_dev);
1432 ifa->ifa_dev = in_dev;
1433 ifa->ifa_scope = RT_SCOPE_HOST;
1434 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1435 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1436 INFINITY_LIFE_TIME);
1437 ipv4_devconf_setall(in_dev);
1438 neigh_parms_data_state_setall(in_dev->arp_parms);
1439 inet_insert_ifa(ifa);
1444 case NETDEV_CHANGEADDR:
1445 if (!IN_DEV_ARP_NOTIFY(in_dev))
1448 case NETDEV_NOTIFY_PEERS:
1449 /* Send gratuitous ARP to notify of link change */
1450 inetdev_send_gratuitous_arp(dev, in_dev);
1455 case NETDEV_PRE_TYPE_CHANGE:
1456 ip_mc_unmap(in_dev);
1458 case NETDEV_POST_TYPE_CHANGE:
1459 ip_mc_remap(in_dev);
1461 case NETDEV_CHANGEMTU:
1462 if (inetdev_valid_mtu(dev->mtu))
1464 /* disable IP when MTU is not enough */
1465 case NETDEV_UNREGISTER:
1466 inetdev_destroy(in_dev);
1468 case NETDEV_CHANGENAME:
1469 /* Do not notify about label change, this event is
1470 * not interesting to applications using netlink.
1472 inetdev_changename(dev, in_dev);
1474 devinet_sysctl_unregister(in_dev);
1475 devinet_sysctl_register(in_dev);
1482 static struct notifier_block ip_netdev_notifier = {
1483 .notifier_call = inetdev_event,
1486 static size_t inet_nlmsg_size(void)
1488 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1489 + nla_total_size(4) /* IFA_ADDRESS */
1490 + nla_total_size(4) /* IFA_LOCAL */
1491 + nla_total_size(4) /* IFA_BROADCAST */
1492 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1493 + nla_total_size(4) /* IFA_FLAGS */
1494 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1497 static inline u32 cstamp_delta(unsigned long cstamp)
1499 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1502 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1503 unsigned long tstamp, u32 preferred, u32 valid)
1505 struct ifa_cacheinfo ci;
1507 ci.cstamp = cstamp_delta(cstamp);
1508 ci.tstamp = cstamp_delta(tstamp);
1509 ci.ifa_prefered = preferred;
1510 ci.ifa_valid = valid;
1512 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1515 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1516 u32 portid, u32 seq, int event, unsigned int flags)
1518 struct ifaddrmsg *ifm;
1519 struct nlmsghdr *nlh;
1520 u32 preferred, valid;
1522 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1526 ifm = nlmsg_data(nlh);
1527 ifm->ifa_family = AF_INET;
1528 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1529 ifm->ifa_flags = ifa->ifa_flags;
1530 ifm->ifa_scope = ifa->ifa_scope;
1531 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1533 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1534 preferred = ifa->ifa_preferred_lft;
1535 valid = ifa->ifa_valid_lft;
1536 if (preferred != INFINITY_LIFE_TIME) {
1537 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1539 if (preferred > tval)
1543 if (valid != INFINITY_LIFE_TIME) {
1551 preferred = INFINITY_LIFE_TIME;
1552 valid = INFINITY_LIFE_TIME;
1554 if ((ifa->ifa_address &&
1555 nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1557 nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1558 (ifa->ifa_broadcast &&
1559 nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1560 (ifa->ifa_label[0] &&
1561 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1562 nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1563 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1565 goto nla_put_failure;
1567 nlmsg_end(skb, nlh);
1571 nlmsg_cancel(skb, nlh);
1575 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1577 struct net *net = sock_net(skb->sk);
1580 int ip_idx, s_ip_idx;
1581 struct net_device *dev;
1582 struct in_device *in_dev;
1583 struct in_ifaddr *ifa;
1584 struct hlist_head *head;
1587 s_idx = idx = cb->args[1];
1588 s_ip_idx = ip_idx = cb->args[2];
1590 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1592 head = &net->dev_index_head[h];
1594 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1596 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1599 if (h > s_h || idx > s_idx)
1601 in_dev = __in_dev_get_rcu(dev);
1605 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1606 ifa = ifa->ifa_next, ip_idx++) {
1607 if (ip_idx < s_ip_idx)
1609 if (inet_fill_ifaddr(skb, ifa,
1610 NETLINK_CB(cb->skb).portid,
1612 RTM_NEWADDR, NLM_F_MULTI) < 0) {
1616 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1627 cb->args[2] = ip_idx;
1632 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1635 struct sk_buff *skb;
1636 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1640 net = dev_net(ifa->ifa_dev->dev);
1641 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1645 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1647 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1648 WARN_ON(err == -EMSGSIZE);
1652 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1656 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1659 static size_t inet_get_link_af_size(const struct net_device *dev,
1660 u32 ext_filter_mask)
1662 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1667 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1670 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1671 u32 ext_filter_mask)
1673 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1680 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1684 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1685 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1690 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1691 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1694 static int inet_validate_link_af(const struct net_device *dev,
1695 const struct nlattr *nla)
1697 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1700 if (dev && !__in_dev_get_rtnl(dev))
1701 return -EAFNOSUPPORT;
1703 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1707 if (tb[IFLA_INET_CONF]) {
1708 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1709 int cfgid = nla_type(a);
1714 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1722 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1724 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1725 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1729 return -EAFNOSUPPORT;
1731 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1734 if (tb[IFLA_INET_CONF]) {
1735 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1736 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1742 static int inet_netconf_msgsize_devconf(int type)
1744 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1745 + nla_total_size(4); /* NETCONFA_IFINDEX */
1747 /* type -1 is used for ALL */
1748 if (type == -1 || type == NETCONFA_FORWARDING)
1749 size += nla_total_size(4);
1750 if (type == -1 || type == NETCONFA_RP_FILTER)
1751 size += nla_total_size(4);
1752 if (type == -1 || type == NETCONFA_MC_FORWARDING)
1753 size += nla_total_size(4);
1754 if (type == -1 || type == NETCONFA_PROXY_NEIGH)
1755 size += nla_total_size(4);
1756 if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1757 size += nla_total_size(4);
1762 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1763 struct ipv4_devconf *devconf, u32 portid,
1764 u32 seq, int event, unsigned int flags,
1767 struct nlmsghdr *nlh;
1768 struct netconfmsg *ncm;
1770 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1775 ncm = nlmsg_data(nlh);
1776 ncm->ncm_family = AF_INET;
1778 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1779 goto nla_put_failure;
1781 /* type -1 is used for ALL */
1782 if ((type == -1 || type == NETCONFA_FORWARDING) &&
1783 nla_put_s32(skb, NETCONFA_FORWARDING,
1784 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1785 goto nla_put_failure;
1786 if ((type == -1 || type == NETCONFA_RP_FILTER) &&
1787 nla_put_s32(skb, NETCONFA_RP_FILTER,
1788 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1789 goto nla_put_failure;
1790 if ((type == -1 || type == NETCONFA_MC_FORWARDING) &&
1791 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1792 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1793 goto nla_put_failure;
1794 if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
1795 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1796 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1797 goto nla_put_failure;
1798 if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1799 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1800 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1801 goto nla_put_failure;
1803 nlmsg_end(skb, nlh);
1807 nlmsg_cancel(skb, nlh);
1811 void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1812 struct ipv4_devconf *devconf)
1814 struct sk_buff *skb;
1817 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
1821 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1822 RTM_NEWNETCONF, 0, type);
1824 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1825 WARN_ON(err == -EMSGSIZE);
1829 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
1833 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1836 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1837 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1838 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1839 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1840 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1841 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1844 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1845 struct nlmsghdr *nlh)
1847 struct net *net = sock_net(in_skb->sk);
1848 struct nlattr *tb[NETCONFA_MAX+1];
1849 struct netconfmsg *ncm;
1850 struct sk_buff *skb;
1851 struct ipv4_devconf *devconf;
1852 struct in_device *in_dev;
1853 struct net_device *dev;
1857 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1858 devconf_ipv4_policy);
1863 if (!tb[NETCONFA_IFINDEX])
1866 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1868 case NETCONFA_IFINDEX_ALL:
1869 devconf = net->ipv4.devconf_all;
1871 case NETCONFA_IFINDEX_DEFAULT:
1872 devconf = net->ipv4.devconf_dflt;
1875 dev = __dev_get_by_index(net, ifindex);
1878 in_dev = __in_dev_get_rtnl(dev);
1881 devconf = &in_dev->cnf;
1886 skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
1890 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1891 NETLINK_CB(in_skb).portid,
1892 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1895 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1896 WARN_ON(err == -EMSGSIZE);
1900 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1905 static int inet_netconf_dump_devconf(struct sk_buff *skb,
1906 struct netlink_callback *cb)
1908 struct net *net = sock_net(skb->sk);
1911 struct net_device *dev;
1912 struct in_device *in_dev;
1913 struct hlist_head *head;
1916 s_idx = idx = cb->args[1];
1918 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1920 head = &net->dev_index_head[h];
1922 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1924 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1927 in_dev = __in_dev_get_rcu(dev);
1931 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1933 NETLINK_CB(cb->skb).portid,
1941 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1947 if (h == NETDEV_HASHENTRIES) {
1948 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1949 net->ipv4.devconf_all,
1950 NETLINK_CB(cb->skb).portid,
1952 RTM_NEWNETCONF, NLM_F_MULTI,
1958 if (h == NETDEV_HASHENTRIES + 1) {
1959 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1960 net->ipv4.devconf_dflt,
1961 NETLINK_CB(cb->skb).portid,
1963 RTM_NEWNETCONF, NLM_F_MULTI,
1976 #ifdef CONFIG_SYSCTL
1978 static void devinet_copy_dflt_conf(struct net *net, int i)
1980 struct net_device *dev;
1983 for_each_netdev_rcu(net, dev) {
1984 struct in_device *in_dev;
1986 in_dev = __in_dev_get_rcu(dev);
1987 if (in_dev && !test_bit(i, in_dev->cnf.state))
1988 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
1993 /* called with RTNL locked */
1994 static void inet_forward_change(struct net *net)
1996 struct net_device *dev;
1997 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
1999 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2000 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2001 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2002 NETCONFA_IFINDEX_ALL,
2003 net->ipv4.devconf_all);
2004 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2005 NETCONFA_IFINDEX_DEFAULT,
2006 net->ipv4.devconf_dflt);
2008 for_each_netdev(net, dev) {
2009 struct in_device *in_dev;
2011 dev_disable_lro(dev);
2013 in_dev = __in_dev_get_rcu(dev);
2015 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2016 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2017 dev->ifindex, &in_dev->cnf);
2023 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2025 if (cnf == net->ipv4.devconf_dflt)
2026 return NETCONFA_IFINDEX_DEFAULT;
2027 else if (cnf == net->ipv4.devconf_all)
2028 return NETCONFA_IFINDEX_ALL;
2030 struct in_device *idev
2031 = container_of(cnf, struct in_device, cnf);
2032 return idev->dev->ifindex;
2036 static int devinet_conf_proc(struct ctl_table *ctl, int write,
2037 void __user *buffer,
2038 size_t *lenp, loff_t *ppos)
2040 int old_value = *(int *)ctl->data;
2041 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2042 int new_value = *(int *)ctl->data;
2045 struct ipv4_devconf *cnf = ctl->extra1;
2046 struct net *net = ctl->extra2;
2047 int i = (int *)ctl->data - cnf->data;
2050 set_bit(i, cnf->state);
2052 if (cnf == net->ipv4.devconf_dflt)
2053 devinet_copy_dflt_conf(net, i);
2054 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2055 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2056 if ((new_value == 0) && (old_value != 0))
2057 rt_cache_flush(net);
2059 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2060 new_value != old_value) {
2061 ifindex = devinet_conf_ifindex(net, cnf);
2062 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
2065 if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2066 new_value != old_value) {
2067 ifindex = devinet_conf_ifindex(net, cnf);
2068 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2071 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2072 new_value != old_value) {
2073 ifindex = devinet_conf_ifindex(net, cnf);
2074 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2082 static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2083 void __user *buffer,
2084 size_t *lenp, loff_t *ppos)
2086 int *valp = ctl->data;
2089 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2091 if (write && *valp != val) {
2092 struct net *net = ctl->extra2;
2094 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2095 if (!rtnl_trylock()) {
2096 /* Restore the original values before restarting */
2099 return restart_syscall();
2101 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2102 inet_forward_change(net);
2104 struct ipv4_devconf *cnf = ctl->extra1;
2105 struct in_device *idev =
2106 container_of(cnf, struct in_device, cnf);
2108 dev_disable_lro(idev->dev);
2109 inet_netconf_notify_devconf(net,
2110 NETCONFA_FORWARDING,
2115 rt_cache_flush(net);
2117 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2118 NETCONFA_IFINDEX_DEFAULT,
2119 net->ipv4.devconf_dflt);
2125 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2126 void __user *buffer,
2127 size_t *lenp, loff_t *ppos)
2129 int *valp = ctl->data;
2131 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2132 struct net *net = ctl->extra2;
2134 if (write && *valp != val)
2135 rt_cache_flush(net);
2140 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2143 .data = ipv4_devconf.data + \
2144 IPV4_DEVCONF_ ## attr - 1, \
2145 .maxlen = sizeof(int), \
2147 .proc_handler = proc, \
2148 .extra1 = &ipv4_devconf, \
2151 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2152 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2154 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2155 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2157 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2158 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2160 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2161 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2163 static struct devinet_sysctl_table {
2164 struct ctl_table_header *sysctl_header;
2165 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2166 } devinet_sysctl = {
2168 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2169 devinet_sysctl_forward),
2170 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2172 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2173 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2174 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2175 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2176 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2177 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2178 "accept_source_route"),
2179 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2180 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2181 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2182 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2183 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2184 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2185 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2186 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2187 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2188 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2189 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2190 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2191 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2192 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2193 "force_igmp_version"),
2194 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2195 "igmpv2_unsolicited_report_interval"),
2196 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2197 "igmpv3_unsolicited_report_interval"),
2198 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2199 "ignore_routes_with_linkdown"),
2201 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2202 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2203 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2204 "promote_secondaries"),
2205 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2207 DEVINET_SYSCTL_RW_ENTRY(NF_IPV4_DEFRAG_SKIP,
2208 "nf_ipv4_defrag_skip"),
2212 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2213 struct ipv4_devconf *p)
2216 struct devinet_sysctl_table *t;
2217 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2219 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2223 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2224 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2225 t->devinet_vars[i].extra1 = p;
2226 t->devinet_vars[i].extra2 = net;
2229 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2231 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2232 if (!t->sysctl_header)
2244 static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2246 struct devinet_sysctl_table *t = cnf->sysctl;
2252 unregister_net_sysctl_table(t->sysctl_header);
2256 static int devinet_sysctl_register(struct in_device *idev)
2260 if (!sysctl_dev_name_is_allowed(idev->dev->name))
2263 err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2266 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2269 neigh_sysctl_unregister(idev->arp_parms);
2273 static void devinet_sysctl_unregister(struct in_device *idev)
2275 __devinet_sysctl_unregister(&idev->cnf);
2276 neigh_sysctl_unregister(idev->arp_parms);
2279 static struct ctl_table ctl_forward_entry[] = {
2281 .procname = "ip_forward",
2282 .data = &ipv4_devconf.data[
2283 IPV4_DEVCONF_FORWARDING - 1],
2284 .maxlen = sizeof(int),
2286 .proc_handler = devinet_sysctl_forward,
2287 .extra1 = &ipv4_devconf,
2288 .extra2 = &init_net,
2294 static __net_init int devinet_init_net(struct net *net)
2297 struct ipv4_devconf *all, *dflt;
2298 #ifdef CONFIG_SYSCTL
2299 struct ctl_table *tbl = ctl_forward_entry;
2300 struct ctl_table_header *forw_hdr;
2304 all = &ipv4_devconf;
2305 dflt = &ipv4_devconf_dflt;
2307 if (!net_eq(net, &init_net)) {
2308 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2312 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2314 goto err_alloc_dflt;
2316 #ifdef CONFIG_SYSCTL
2317 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2321 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2322 tbl[0].extra1 = all;
2323 tbl[0].extra2 = net;
2327 #ifdef CONFIG_SYSCTL
2328 err = __devinet_sysctl_register(net, "all", all);
2332 err = __devinet_sysctl_register(net, "default", dflt);
2337 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2340 net->ipv4.forw_hdr = forw_hdr;
2343 net->ipv4.devconf_all = all;
2344 net->ipv4.devconf_dflt = dflt;
2347 #ifdef CONFIG_SYSCTL
2349 __devinet_sysctl_unregister(dflt);
2351 __devinet_sysctl_unregister(all);
2353 if (tbl != ctl_forward_entry)
2357 if (dflt != &ipv4_devconf_dflt)
2360 if (all != &ipv4_devconf)
2366 static __net_exit void devinet_exit_net(struct net *net)
2368 #ifdef CONFIG_SYSCTL
2369 struct ctl_table *tbl;
2371 tbl = net->ipv4.forw_hdr->ctl_table_arg;
2372 unregister_net_sysctl_table(net->ipv4.forw_hdr);
2373 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
2374 __devinet_sysctl_unregister(net->ipv4.devconf_all);
2377 kfree(net->ipv4.devconf_dflt);
2378 kfree(net->ipv4.devconf_all);
2381 static __net_initdata struct pernet_operations devinet_ops = {
2382 .init = devinet_init_net,
2383 .exit = devinet_exit_net,
2386 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2388 .fill_link_af = inet_fill_link_af,
2389 .get_link_af_size = inet_get_link_af_size,
2390 .validate_link_af = inet_validate_link_af,
2391 .set_link_af = inet_set_link_af,
2394 void __init devinet_init(void)
2398 for (i = 0; i < IN4_ADDR_HSIZE; i++)
2399 INIT_HLIST_HEAD(&inet_addr_lst[i]);
2401 register_pernet_subsys(&devinet_ops);
2403 register_gifconf(PF_INET, inet_gifconf);
2404 register_netdevice_notifier(&ip_netdev_notifier);
2406 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2408 rtnl_af_register(&inet_af_ops);
2410 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
2411 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2412 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2413 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2414 inet_netconf_dump_devconf, NULL);