1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
22 dsa_switch_for_each_port(dp, ds)
23 if (dp->ageing_time && dp->ageing_time < ageing_time)
24 ageing_time = dp->ageing_time;
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 struct dsa_notifier_ageing_time_info *info)
32 unsigned int ageing_time = info->ageing_time;
34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
40 /* Program the fastest ageing time in case of multiple bridges */
41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
43 if (ds->ops->set_ageing_time)
44 return ds->ops->set_ageing_time(ds, ageing_time);
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 struct dsa_notifier_mtu_info *info)
52 if (dp->ds->index == info->sw_index && dp->index == info->port)
55 /* Do not propagate to other switches in the tree if the notifier was
56 * targeted for a single switch.
58 if (info->targeted_match)
61 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 struct dsa_notifier_mtu_info *info)
73 if (!ds->ops->port_change_mtu)
76 dsa_switch_for_each_port(dp, ds) {
77 if (dsa_port_mtu_match(dp, info)) {
78 ret = ds->ops->port_change_mtu(ds, dp->index,
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 struct dsa_notifier_bridge_info *info)
91 struct dsa_switch_tree *dst = ds->dst;
94 if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 if (!ds->ops->port_bridge_join)
98 err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
99 &info->tx_fwd_offload);
104 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
105 ds->ops->crosschip_bridge_join) {
106 err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
108 info->port, info->bridge);
113 return dsa_tag_8021q_bridge_join(ds, info);
116 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
117 struct dsa_notifier_bridge_info *info)
119 struct dsa_switch_tree *dst = ds->dst;
120 struct netlink_ext_ack extack = {0};
121 bool change_vlan_filtering = false;
126 if (dst->index == info->tree_index && ds->index == info->sw_index &&
127 ds->ops->port_bridge_leave)
128 ds->ops->port_bridge_leave(ds, info->port, info->bridge);
130 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
131 ds->ops->crosschip_bridge_leave)
132 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
133 info->sw_index, info->port,
136 if (ds->needs_standalone_vlan_filtering &&
137 !br_vlan_enabled(info->bridge.dev)) {
138 change_vlan_filtering = true;
139 vlan_filtering = true;
140 } else if (!ds->needs_standalone_vlan_filtering &&
141 br_vlan_enabled(info->bridge.dev)) {
142 change_vlan_filtering = true;
143 vlan_filtering = false;
146 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
147 * event for changing vlan_filtering setting upon slave ports leaving
148 * it. That is a good thing, because that lets us handle it and also
149 * handle the case where the switch's vlan_filtering setting is global
150 * (not per port). When that happens, the correct moment to trigger the
151 * vlan_filtering callback is only when the last port leaves the last
154 if (change_vlan_filtering && ds->vlan_filtering_is_global) {
155 dsa_switch_for_each_port(dp, ds) {
156 struct net_device *br = dsa_port_bridge_dev_get(dp);
158 if (br && br_vlan_enabled(br)) {
159 change_vlan_filtering = false;
165 if (change_vlan_filtering) {
166 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
167 vlan_filtering, &extack);
169 dev_err(ds->dev, "port %d: %s\n", info->port,
171 if (err && err != -EOPNOTSUPP)
175 return dsa_tag_8021q_bridge_leave(ds, info);
178 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
179 * DSA links) that sit between the targeted port on which the notifier was
180 * emitted and its dedicated CPU port.
182 static bool dsa_port_host_address_match(struct dsa_port *dp,
183 int info_sw_index, int info_port)
185 struct dsa_port *targeted_dp, *cpu_dp;
186 struct dsa_switch *targeted_ds;
188 targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
189 targeted_dp = dsa_to_port(targeted_ds, info_port);
190 cpu_dp = targeted_dp->cpu_dp;
192 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
193 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
199 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
200 const unsigned char *addr,
203 struct dsa_mac_addr *a;
205 list_for_each_entry(a, addr_list, list)
206 if (ether_addr_equal(a->addr, addr) && a->vid == vid)
212 static int dsa_port_do_mdb_add(struct dsa_port *dp,
213 const struct switchdev_obj_port_mdb *mdb)
215 struct dsa_switch *ds = dp->ds;
216 struct dsa_mac_addr *a;
217 int port = dp->index;
220 /* No need to bother with refcounting for user ports */
221 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
222 return ds->ops->port_mdb_add(ds, port, mdb);
224 mutex_lock(&dp->addr_lists_lock);
226 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
228 refcount_inc(&a->refcount);
232 a = kzalloc(sizeof(*a), GFP_KERNEL);
238 err = ds->ops->port_mdb_add(ds, port, mdb);
244 ether_addr_copy(a->addr, mdb->addr);
246 refcount_set(&a->refcount, 1);
247 list_add_tail(&a->list, &dp->mdbs);
250 mutex_unlock(&dp->addr_lists_lock);
255 static int dsa_port_do_mdb_del(struct dsa_port *dp,
256 const struct switchdev_obj_port_mdb *mdb)
258 struct dsa_switch *ds = dp->ds;
259 struct dsa_mac_addr *a;
260 int port = dp->index;
263 /* No need to bother with refcounting for user ports */
264 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
265 return ds->ops->port_mdb_del(ds, port, mdb);
267 mutex_lock(&dp->addr_lists_lock);
269 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
275 if (!refcount_dec_and_test(&a->refcount))
278 err = ds->ops->port_mdb_del(ds, port, mdb);
280 refcount_set(&a->refcount, 1);
288 mutex_unlock(&dp->addr_lists_lock);
293 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
296 struct dsa_switch *ds = dp->ds;
297 struct dsa_mac_addr *a;
298 int port = dp->index;
301 /* No need to bother with refcounting for user ports */
302 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
303 return ds->ops->port_fdb_add(ds, port, addr, vid);
305 mutex_lock(&dp->addr_lists_lock);
307 a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
309 refcount_inc(&a->refcount);
313 a = kzalloc(sizeof(*a), GFP_KERNEL);
319 err = ds->ops->port_fdb_add(ds, port, addr, vid);
325 ether_addr_copy(a->addr, addr);
327 refcount_set(&a->refcount, 1);
328 list_add_tail(&a->list, &dp->fdbs);
331 mutex_unlock(&dp->addr_lists_lock);
336 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
339 struct dsa_switch *ds = dp->ds;
340 struct dsa_mac_addr *a;
341 int port = dp->index;
344 /* No need to bother with refcounting for user ports */
345 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
346 return ds->ops->port_fdb_del(ds, port, addr, vid);
348 mutex_lock(&dp->addr_lists_lock);
350 a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
356 if (!refcount_dec_and_test(&a->refcount))
359 err = ds->ops->port_fdb_del(ds, port, addr, vid);
361 refcount_set(&a->refcount, 1);
369 mutex_unlock(&dp->addr_lists_lock);
374 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
375 struct dsa_notifier_fdb_info *info)
380 if (!ds->ops->port_fdb_add)
383 dsa_switch_for_each_port(dp, ds) {
384 if (dsa_port_host_address_match(dp, info->sw_index,
386 err = dsa_port_do_fdb_add(dp, info->addr, info->vid);
395 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
396 struct dsa_notifier_fdb_info *info)
401 if (!ds->ops->port_fdb_del)
404 dsa_switch_for_each_port(dp, ds) {
405 if (dsa_port_host_address_match(dp, info->sw_index,
407 err = dsa_port_do_fdb_del(dp, info->addr, info->vid);
416 static int dsa_switch_fdb_add(struct dsa_switch *ds,
417 struct dsa_notifier_fdb_info *info)
419 int port = dsa_towards_port(ds, info->sw_index, info->port);
420 struct dsa_port *dp = dsa_to_port(ds, port);
422 if (!ds->ops->port_fdb_add)
425 return dsa_port_do_fdb_add(dp, info->addr, info->vid);
428 static int dsa_switch_fdb_del(struct dsa_switch *ds,
429 struct dsa_notifier_fdb_info *info)
431 int port = dsa_towards_port(ds, info->sw_index, info->port);
432 struct dsa_port *dp = dsa_to_port(ds, port);
434 if (!ds->ops->port_fdb_del)
437 return dsa_port_do_fdb_del(dp, info->addr, info->vid);
440 static int dsa_switch_lag_change(struct dsa_switch *ds,
441 struct dsa_notifier_lag_info *info)
443 if (ds->index == info->sw_index && ds->ops->port_lag_change)
444 return ds->ops->port_lag_change(ds, info->port);
446 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
447 return ds->ops->crosschip_lag_change(ds, info->sw_index,
453 static int dsa_switch_lag_join(struct dsa_switch *ds,
454 struct dsa_notifier_lag_info *info)
456 if (ds->index == info->sw_index && ds->ops->port_lag_join)
457 return ds->ops->port_lag_join(ds, info->port, info->lag,
460 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
461 return ds->ops->crosschip_lag_join(ds, info->sw_index,
462 info->port, info->lag,
468 static int dsa_switch_lag_leave(struct dsa_switch *ds,
469 struct dsa_notifier_lag_info *info)
471 if (ds->index == info->sw_index && ds->ops->port_lag_leave)
472 return ds->ops->port_lag_leave(ds, info->port, info->lag);
474 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
475 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
476 info->port, info->lag);
481 static int dsa_switch_mdb_add(struct dsa_switch *ds,
482 struct dsa_notifier_mdb_info *info)
484 int port = dsa_towards_port(ds, info->sw_index, info->port);
485 struct dsa_port *dp = dsa_to_port(ds, port);
487 if (!ds->ops->port_mdb_add)
490 return dsa_port_do_mdb_add(dp, info->mdb);
493 static int dsa_switch_mdb_del(struct dsa_switch *ds,
494 struct dsa_notifier_mdb_info *info)
496 int port = dsa_towards_port(ds, info->sw_index, info->port);
497 struct dsa_port *dp = dsa_to_port(ds, port);
499 if (!ds->ops->port_mdb_del)
502 return dsa_port_do_mdb_del(dp, info->mdb);
505 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
506 struct dsa_notifier_mdb_info *info)
511 if (!ds->ops->port_mdb_add)
514 dsa_switch_for_each_port(dp, ds) {
515 if (dsa_port_host_address_match(dp, info->sw_index,
517 err = dsa_port_do_mdb_add(dp, info->mdb);
526 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
527 struct dsa_notifier_mdb_info *info)
532 if (!ds->ops->port_mdb_del)
535 dsa_switch_for_each_port(dp, ds) {
536 if (dsa_port_host_address_match(dp, info->sw_index,
538 err = dsa_port_do_mdb_del(dp, info->mdb);
547 static bool dsa_port_vlan_match(struct dsa_port *dp,
548 struct dsa_notifier_vlan_info *info)
550 if (dp->ds->index == info->sw_index && dp->index == info->port)
553 if (dsa_port_is_dsa(dp))
559 static int dsa_switch_vlan_add(struct dsa_switch *ds,
560 struct dsa_notifier_vlan_info *info)
565 if (!ds->ops->port_vlan_add)
568 dsa_switch_for_each_port(dp, ds) {
569 if (dsa_port_vlan_match(dp, info)) {
570 err = ds->ops->port_vlan_add(ds, dp->index, info->vlan,
580 static int dsa_switch_vlan_del(struct dsa_switch *ds,
581 struct dsa_notifier_vlan_info *info)
583 if (!ds->ops->port_vlan_del)
586 if (ds->index == info->sw_index)
587 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
589 /* Do not deprogram the DSA links as they may be used as conduit
590 * for other VLAN members in the fabric.
595 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
596 struct dsa_notifier_tag_proto_info *info)
598 const struct dsa_device_ops *tag_ops = info->tag_ops;
599 struct dsa_port *dp, *cpu_dp;
602 if (!ds->ops->change_tag_protocol)
607 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
608 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
613 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
616 /* Now that changing the tag protocol can no longer fail, let's update
617 * the remaining bits which are "duplicated for faster access", and the
618 * bits that depend on the tagger, such as the MTU.
620 dsa_switch_for_each_user_port(dp, ds) {
621 struct net_device *slave = dp->slave;
623 dsa_slave_setup_tagger(slave);
625 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
626 dsa_slave_change_mtu(slave, slave->mtu);
632 /* We use the same cross-chip notifiers to inform both the tagger side, as well
633 * as the switch side, of connection and disconnection events.
634 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
635 * switch side doesn't support connecting to this tagger, and therefore, the
636 * fact that we don't disconnect the tagger side doesn't constitute a memory
637 * leak: the tagger will still operate with persistent per-switch memory, just
638 * with the switch side unconnected to it. What does constitute a hard error is
639 * when the switch side supports connecting but fails.
642 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
643 struct dsa_notifier_tag_proto_info *info)
645 const struct dsa_device_ops *tag_ops = info->tag_ops;
648 /* Notify the new tagger about the connection to this switch */
649 if (tag_ops->connect) {
650 err = tag_ops->connect(ds);
655 if (!ds->ops->connect_tag_protocol)
658 /* Notify the switch about the connection to the new tagger */
659 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
661 /* Revert the new tagger's connection to this tree */
662 if (tag_ops->disconnect)
663 tag_ops->disconnect(ds);
671 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
672 struct dsa_notifier_tag_proto_info *info)
674 const struct dsa_device_ops *tag_ops = info->tag_ops;
676 /* Notify the tagger about the disconnection from this switch */
677 if (tag_ops->disconnect && ds->tagger_data)
678 tag_ops->disconnect(ds);
680 /* No need to notify the switch, since it shouldn't have any
681 * resources to tear down
686 static int dsa_switch_event(struct notifier_block *nb,
687 unsigned long event, void *info)
689 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
693 case DSA_NOTIFIER_AGEING_TIME:
694 err = dsa_switch_ageing_time(ds, info);
696 case DSA_NOTIFIER_BRIDGE_JOIN:
697 err = dsa_switch_bridge_join(ds, info);
699 case DSA_NOTIFIER_BRIDGE_LEAVE:
700 err = dsa_switch_bridge_leave(ds, info);
702 case DSA_NOTIFIER_FDB_ADD:
703 err = dsa_switch_fdb_add(ds, info);
705 case DSA_NOTIFIER_FDB_DEL:
706 err = dsa_switch_fdb_del(ds, info);
708 case DSA_NOTIFIER_HOST_FDB_ADD:
709 err = dsa_switch_host_fdb_add(ds, info);
711 case DSA_NOTIFIER_HOST_FDB_DEL:
712 err = dsa_switch_host_fdb_del(ds, info);
714 case DSA_NOTIFIER_LAG_CHANGE:
715 err = dsa_switch_lag_change(ds, info);
717 case DSA_NOTIFIER_LAG_JOIN:
718 err = dsa_switch_lag_join(ds, info);
720 case DSA_NOTIFIER_LAG_LEAVE:
721 err = dsa_switch_lag_leave(ds, info);
723 case DSA_NOTIFIER_MDB_ADD:
724 err = dsa_switch_mdb_add(ds, info);
726 case DSA_NOTIFIER_MDB_DEL:
727 err = dsa_switch_mdb_del(ds, info);
729 case DSA_NOTIFIER_HOST_MDB_ADD:
730 err = dsa_switch_host_mdb_add(ds, info);
732 case DSA_NOTIFIER_HOST_MDB_DEL:
733 err = dsa_switch_host_mdb_del(ds, info);
735 case DSA_NOTIFIER_VLAN_ADD:
736 err = dsa_switch_vlan_add(ds, info);
738 case DSA_NOTIFIER_VLAN_DEL:
739 err = dsa_switch_vlan_del(ds, info);
741 case DSA_NOTIFIER_MTU:
742 err = dsa_switch_mtu(ds, info);
744 case DSA_NOTIFIER_TAG_PROTO:
745 err = dsa_switch_change_tag_proto(ds, info);
747 case DSA_NOTIFIER_TAG_PROTO_CONNECT:
748 err = dsa_switch_connect_tag_proto(ds, info);
750 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
751 err = dsa_switch_disconnect_tag_proto(ds, info);
753 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
754 err = dsa_switch_tag_8021q_vlan_add(ds, info);
756 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
757 err = dsa_switch_tag_8021q_vlan_del(ds, info);
765 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
768 return notifier_from_errno(err);
771 int dsa_switch_register_notifier(struct dsa_switch *ds)
773 ds->nb.notifier_call = dsa_switch_event;
775 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
778 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
782 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
784 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);