2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
24 static size_t bond_get_slave_size(const struct net_device *bond_dev,
25 const struct net_device *slave_dev)
27 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
28 nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
29 nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
30 nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
31 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
32 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
36 static int bond_fill_slave_info(struct sk_buff *skb,
37 const struct net_device *bond_dev,
38 const struct net_device *slave_dev)
40 struct slave *slave = bond_slave_get_rtnl(slave_dev);
42 if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
45 if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
48 if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
49 slave->link_failure_count))
52 if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
53 slave_dev->addr_len, slave->perm_hwaddr))
56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
59 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
60 const struct aggregator *agg;
62 agg = SLAVE_AD_INFO(slave)->port.aggregator;
64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
65 agg->aggregator_identifier))
75 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
76 [IFLA_BOND_MODE] = { .type = NLA_U8 },
77 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
78 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
79 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
80 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
81 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
82 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
83 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
84 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
85 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
86 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
87 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
88 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
89 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
90 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
91 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
92 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
93 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
94 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
95 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
96 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
97 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
98 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
101 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
103 if (tb[IFLA_ADDRESS]) {
104 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
106 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
107 return -EADDRNOTAVAIL;
112 static int bond_changelink(struct net_device *bond_dev,
113 struct nlattr *tb[], struct nlattr *data[])
115 struct bonding *bond = netdev_priv(bond_dev);
116 struct bond_opt_value newval;
123 if (data[IFLA_BOND_MODE]) {
124 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
126 bond_opt_initval(&newval, mode);
127 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
131 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
132 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
133 struct net_device *slave_dev;
134 char *active_slave = "";
137 slave_dev = __dev_get_by_index(dev_net(bond_dev),
141 active_slave = slave_dev->name;
143 bond_opt_initstr(&newval, active_slave);
144 err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
148 if (data[IFLA_BOND_MIIMON]) {
149 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
151 bond_opt_initval(&newval, miimon);
152 err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
156 if (data[IFLA_BOND_UPDELAY]) {
157 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
159 bond_opt_initval(&newval, updelay);
160 err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
164 if (data[IFLA_BOND_DOWNDELAY]) {
165 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
167 bond_opt_initval(&newval, downdelay);
168 err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
172 if (data[IFLA_BOND_USE_CARRIER]) {
173 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
175 bond_opt_initval(&newval, use_carrier);
176 err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
180 if (data[IFLA_BOND_ARP_INTERVAL]) {
181 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
183 if (arp_interval && miimon) {
184 netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
188 bond_opt_initval(&newval, arp_interval);
189 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
193 if (data[IFLA_BOND_ARP_IP_TARGET]) {
197 bond_option_arp_ip_targets_clear(bond);
198 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
199 __be32 target = nla_get_be32(attr);
201 bond_opt_initval(&newval, (__force u64)target);
202 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
208 if (i == 0 && bond->params.arp_interval)
209 netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
213 if (data[IFLA_BOND_ARP_VALIDATE]) {
214 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
216 if (arp_validate && miimon) {
217 netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
221 bond_opt_initval(&newval, arp_validate);
222 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
226 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
227 int arp_all_targets =
228 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
230 bond_opt_initval(&newval, arp_all_targets);
231 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
235 if (data[IFLA_BOND_PRIMARY]) {
236 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
237 struct net_device *dev;
240 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
244 bond_opt_initstr(&newval, primary);
245 err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
249 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
250 int primary_reselect =
251 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
253 bond_opt_initval(&newval, primary_reselect);
254 err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
258 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
260 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
262 bond_opt_initval(&newval, fail_over_mac);
263 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
267 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
268 int xmit_hash_policy =
269 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
271 bond_opt_initval(&newval, xmit_hash_policy);
272 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
276 if (data[IFLA_BOND_RESEND_IGMP]) {
278 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
280 bond_opt_initval(&newval, resend_igmp);
281 err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
285 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
287 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
289 bond_opt_initval(&newval, num_peer_notif);
290 err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
294 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
295 int all_slaves_active =
296 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
298 bond_opt_initval(&newval, all_slaves_active);
299 err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
303 if (data[IFLA_BOND_MIN_LINKS]) {
305 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
307 bond_opt_initval(&newval, min_links);
308 err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
312 if (data[IFLA_BOND_LP_INTERVAL]) {
314 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
316 bond_opt_initval(&newval, lp_interval);
317 err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
321 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
322 int packets_per_slave =
323 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
325 bond_opt_initval(&newval, packets_per_slave);
326 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
330 if (data[IFLA_BOND_AD_LACP_RATE]) {
332 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
334 bond_opt_initval(&newval, lacp_rate);
335 err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
339 if (data[IFLA_BOND_AD_SELECT]) {
341 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
343 bond_opt_initval(&newval, ad_select);
344 err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
351 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
352 struct nlattr *tb[], struct nlattr *data[])
356 err = bond_changelink(bond_dev, tb, data);
360 return register_netdevice(bond_dev);
363 static size_t bond_get_size(const struct net_device *bond_dev)
365 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
366 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
367 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
368 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
369 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
370 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
371 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
372 /* IFLA_BOND_ARP_IP_TARGET */
373 nla_total_size(sizeof(struct nlattr)) +
374 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
375 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
376 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
377 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
378 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
379 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
380 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
381 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
382 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
383 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
384 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
385 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
386 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
387 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
388 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
389 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
390 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
391 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
392 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
393 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
394 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
398 static int bond_option_active_slave_get_ifindex(struct bonding *bond)
400 const struct net_device *slave;
404 slave = bond_option_active_slave_get_rcu(bond);
405 ifindex = slave ? slave->ifindex : 0;
410 static int bond_fill_info(struct sk_buff *skb,
411 const struct net_device *bond_dev)
413 struct bonding *bond = netdev_priv(bond_dev);
414 unsigned int packets_per_slave;
415 int ifindex, i, targets_added;
416 struct nlattr *targets;
418 if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
419 goto nla_put_failure;
421 ifindex = bond_option_active_slave_get_ifindex(bond);
422 if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
423 goto nla_put_failure;
425 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
426 goto nla_put_failure;
428 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
429 bond->params.updelay * bond->params.miimon))
430 goto nla_put_failure;
432 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
433 bond->params.downdelay * bond->params.miimon))
434 goto nla_put_failure;
436 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
437 goto nla_put_failure;
439 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
440 goto nla_put_failure;
442 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
444 goto nla_put_failure;
447 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
448 if (bond->params.arp_targets[i]) {
449 nla_put_be32(skb, i, bond->params.arp_targets[i]);
455 nla_nest_end(skb, targets);
457 nla_nest_cancel(skb, targets);
459 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
460 goto nla_put_failure;
462 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
463 bond->params.arp_all_targets))
464 goto nla_put_failure;
466 if (bond->primary_slave &&
467 nla_put_u32(skb, IFLA_BOND_PRIMARY,
468 bond->primary_slave->dev->ifindex))
469 goto nla_put_failure;
471 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
472 bond->params.primary_reselect))
473 goto nla_put_failure;
475 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
476 bond->params.fail_over_mac))
477 goto nla_put_failure;
479 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
480 bond->params.xmit_policy))
481 goto nla_put_failure;
483 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
484 bond->params.resend_igmp))
485 goto nla_put_failure;
487 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
488 bond->params.num_peer_notif))
489 goto nla_put_failure;
491 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
492 bond->params.all_slaves_active))
493 goto nla_put_failure;
495 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
496 bond->params.min_links))
497 goto nla_put_failure;
499 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
500 bond->params.lp_interval))
501 goto nla_put_failure;
503 packets_per_slave = bond->params.packets_per_slave;
504 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
506 goto nla_put_failure;
508 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
509 bond->params.lacp_fast))
510 goto nla_put_failure;
512 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
513 bond->params.ad_select))
514 goto nla_put_failure;
516 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
519 if (!bond_3ad_get_active_agg_info(bond, &info)) {
522 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
524 goto nla_put_failure;
526 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
528 goto nla_put_failure;
529 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
531 goto nla_put_failure;
532 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
534 goto nla_put_failure;
535 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
537 goto nla_put_failure;
538 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
539 sizeof(info.partner_system),
540 &info.partner_system))
541 goto nla_put_failure;
543 nla_nest_end(skb, nest);
553 struct rtnl_link_ops bond_link_ops __read_mostly = {
555 .priv_size = sizeof(struct bonding),
557 .maxtype = IFLA_BOND_MAX,
558 .policy = bond_policy,
559 .validate = bond_validate,
560 .newlink = bond_newlink,
561 .changelink = bond_changelink,
562 .get_size = bond_get_size,
563 .fill_info = bond_fill_info,
564 .get_num_tx_queues = bond_get_num_tx_queues,
565 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
567 .get_slave_size = bond_get_slave_size,
568 .fill_slave_info = bond_fill_slave_info,
571 int __init bond_netlink_init(void)
573 return rtnl_link_register(&bond_link_ops);
576 void bond_netlink_fini(void)
578 rtnl_link_unregister(&bond_link_ops);
581 MODULE_ALIAS_RTNL_LINK("bond");