1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
22 dsa_switch_for_each_port(dp, ds)
23 if (dp->ageing_time && dp->ageing_time < ageing_time)
24 ageing_time = dp->ageing_time;
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 struct dsa_notifier_ageing_time_info *info)
32 unsigned int ageing_time = info->ageing_time;
34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
40 /* Program the fastest ageing time in case of multiple bridges */
41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
43 if (ds->ops->set_ageing_time)
44 return ds->ops->set_ageing_time(ds, ageing_time);
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 struct dsa_notifier_mtu_info *info)
52 if (dp->ds->index == info->sw_index && dp->index == info->port)
55 /* Do not propagate to other switches in the tree if the notifier was
56 * targeted for a single switch.
58 if (info->targeted_match)
61 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 struct dsa_notifier_mtu_info *info)
73 if (!ds->ops->port_change_mtu)
76 dsa_switch_for_each_port(dp, ds) {
77 if (dsa_port_mtu_match(dp, info)) {
78 ret = ds->ops->port_change_mtu(ds, dp->index,
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 struct dsa_notifier_bridge_info *info)
91 struct dsa_switch_tree *dst = ds->dst;
94 if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 if (!ds->ops->port_bridge_join)
98 err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
99 &info->tx_fwd_offload);
104 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
105 ds->ops->crosschip_bridge_join) {
106 err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
108 info->port, info->bridge);
116 static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds,
117 struct dsa_notifier_bridge_info *info)
119 struct netlink_ext_ack extack = {0};
120 bool change_vlan_filtering = false;
125 if (ds->needs_standalone_vlan_filtering &&
126 !br_vlan_enabled(info->bridge.dev)) {
127 change_vlan_filtering = true;
128 vlan_filtering = true;
129 } else if (!ds->needs_standalone_vlan_filtering &&
130 br_vlan_enabled(info->bridge.dev)) {
131 change_vlan_filtering = true;
132 vlan_filtering = false;
135 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
136 * event for changing vlan_filtering setting upon slave ports leaving
137 * it. That is a good thing, because that lets us handle it and also
138 * handle the case where the switch's vlan_filtering setting is global
139 * (not per port). When that happens, the correct moment to trigger the
140 * vlan_filtering callback is only when the last port leaves the last
143 if (change_vlan_filtering && ds->vlan_filtering_is_global) {
144 dsa_switch_for_each_port(dp, ds) {
145 struct net_device *br = dsa_port_bridge_dev_get(dp);
147 if (br && br_vlan_enabled(br)) {
148 change_vlan_filtering = false;
154 if (change_vlan_filtering) {
155 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
156 vlan_filtering, &extack);
158 dev_err(ds->dev, "port %d: %s\n", info->port,
160 if (err && err != -EOPNOTSUPP)
167 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
168 struct dsa_notifier_bridge_info *info)
170 struct dsa_switch_tree *dst = ds->dst;
173 if (dst->index == info->tree_index && ds->index == info->sw_index &&
174 ds->ops->port_bridge_leave)
175 ds->ops->port_bridge_leave(ds, info->port, info->bridge);
177 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
178 ds->ops->crosschip_bridge_leave)
179 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
180 info->sw_index, info->port,
183 if (ds->dst->index == info->tree_index && ds->index == info->sw_index) {
184 err = dsa_switch_sync_vlan_filtering(ds, info);
192 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
193 * DSA links) that sit between the targeted port on which the notifier was
194 * emitted and its dedicated CPU port.
196 static bool dsa_port_host_address_match(struct dsa_port *dp,
197 int info_sw_index, int info_port)
199 struct dsa_port *targeted_dp, *cpu_dp;
200 struct dsa_switch *targeted_ds;
202 targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
203 targeted_dp = dsa_to_port(targeted_ds, info_port);
204 cpu_dp = targeted_dp->cpu_dp;
206 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
207 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
213 static bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
215 if (a->type != b->type)
220 return a->dp == b->dp;
222 return a->lag.dev == b->lag.dev;
224 return a->bridge.num == b->bridge.num;
231 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
232 const unsigned char *addr, u16 vid,
235 struct dsa_mac_addr *a;
237 list_for_each_entry(a, addr_list, list)
238 if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
239 dsa_db_equal(&a->db, &db))
245 static int dsa_port_do_mdb_add(struct dsa_port *dp,
246 const struct switchdev_obj_port_mdb *mdb,
249 struct dsa_switch *ds = dp->ds;
250 struct dsa_mac_addr *a;
251 int port = dp->index;
254 /* No need to bother with refcounting for user ports */
255 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
256 return ds->ops->port_mdb_add(ds, port, mdb, db);
258 mutex_lock(&dp->addr_lists_lock);
260 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
262 refcount_inc(&a->refcount);
266 a = kzalloc(sizeof(*a), GFP_KERNEL);
272 err = ds->ops->port_mdb_add(ds, port, mdb, db);
278 ether_addr_copy(a->addr, mdb->addr);
281 refcount_set(&a->refcount, 1);
282 list_add_tail(&a->list, &dp->mdbs);
285 mutex_unlock(&dp->addr_lists_lock);
290 static int dsa_port_do_mdb_del(struct dsa_port *dp,
291 const struct switchdev_obj_port_mdb *mdb,
294 struct dsa_switch *ds = dp->ds;
295 struct dsa_mac_addr *a;
296 int port = dp->index;
299 /* No need to bother with refcounting for user ports */
300 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
301 return ds->ops->port_mdb_del(ds, port, mdb, db);
303 mutex_lock(&dp->addr_lists_lock);
305 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
311 if (!refcount_dec_and_test(&a->refcount))
314 err = ds->ops->port_mdb_del(ds, port, mdb, db);
316 refcount_set(&a->refcount, 1);
324 mutex_unlock(&dp->addr_lists_lock);
329 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
330 u16 vid, struct dsa_db db)
332 struct dsa_switch *ds = dp->ds;
333 struct dsa_mac_addr *a;
334 int port = dp->index;
337 /* No need to bother with refcounting for user ports */
338 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
339 return ds->ops->port_fdb_add(ds, port, addr, vid, db);
341 mutex_lock(&dp->addr_lists_lock);
343 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
345 refcount_inc(&a->refcount);
349 a = kzalloc(sizeof(*a), GFP_KERNEL);
355 err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
361 ether_addr_copy(a->addr, addr);
364 refcount_set(&a->refcount, 1);
365 list_add_tail(&a->list, &dp->fdbs);
368 mutex_unlock(&dp->addr_lists_lock);
373 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
374 u16 vid, struct dsa_db db)
376 struct dsa_switch *ds = dp->ds;
377 struct dsa_mac_addr *a;
378 int port = dp->index;
381 /* No need to bother with refcounting for user ports */
382 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
383 return ds->ops->port_fdb_del(ds, port, addr, vid, db);
385 mutex_lock(&dp->addr_lists_lock);
387 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
393 if (!refcount_dec_and_test(&a->refcount))
396 err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
398 refcount_set(&a->refcount, 1);
406 mutex_unlock(&dp->addr_lists_lock);
411 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
412 const unsigned char *addr, u16 vid,
415 struct dsa_mac_addr *a;
418 mutex_lock(&lag->fdb_lock);
420 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
422 refcount_inc(&a->refcount);
426 a = kzalloc(sizeof(*a), GFP_KERNEL);
432 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
438 ether_addr_copy(a->addr, addr);
440 refcount_set(&a->refcount, 1);
441 list_add_tail(&a->list, &lag->fdbs);
444 mutex_unlock(&lag->fdb_lock);
449 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
450 const unsigned char *addr, u16 vid,
453 struct dsa_mac_addr *a;
456 mutex_lock(&lag->fdb_lock);
458 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
464 if (!refcount_dec_and_test(&a->refcount))
467 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
469 refcount_set(&a->refcount, 1);
477 mutex_unlock(&lag->fdb_lock);
482 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
483 struct dsa_notifier_fdb_info *info)
488 if (!ds->ops->port_fdb_add)
491 dsa_switch_for_each_port(dp, ds) {
492 if (dsa_port_host_address_match(dp, info->sw_index,
494 err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
504 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
505 struct dsa_notifier_fdb_info *info)
510 if (!ds->ops->port_fdb_del)
513 dsa_switch_for_each_port(dp, ds) {
514 if (dsa_port_host_address_match(dp, info->sw_index,
516 err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
526 static int dsa_switch_fdb_add(struct dsa_switch *ds,
527 struct dsa_notifier_fdb_info *info)
529 int port = dsa_towards_port(ds, info->sw_index, info->port);
530 struct dsa_port *dp = dsa_to_port(ds, port);
532 if (!ds->ops->port_fdb_add)
535 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
538 static int dsa_switch_fdb_del(struct dsa_switch *ds,
539 struct dsa_notifier_fdb_info *info)
541 int port = dsa_towards_port(ds, info->sw_index, info->port);
542 struct dsa_port *dp = dsa_to_port(ds, port);
544 if (!ds->ops->port_fdb_del)
547 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
550 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
551 struct dsa_notifier_lag_fdb_info *info)
555 if (!ds->ops->lag_fdb_add)
558 /* Notify switch only if it has a port in this LAG */
559 dsa_switch_for_each_port(dp, ds)
560 if (dsa_port_offloads_lag(dp, info->lag))
561 return dsa_switch_do_lag_fdb_add(ds, info->lag,
562 info->addr, info->vid,
568 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
569 struct dsa_notifier_lag_fdb_info *info)
573 if (!ds->ops->lag_fdb_del)
576 /* Notify switch only if it has a port in this LAG */
577 dsa_switch_for_each_port(dp, ds)
578 if (dsa_port_offloads_lag(dp, info->lag))
579 return dsa_switch_do_lag_fdb_del(ds, info->lag,
580 info->addr, info->vid,
586 static int dsa_switch_lag_change(struct dsa_switch *ds,
587 struct dsa_notifier_lag_info *info)
589 if (ds->index == info->sw_index && ds->ops->port_lag_change)
590 return ds->ops->port_lag_change(ds, info->port);
592 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
593 return ds->ops->crosschip_lag_change(ds, info->sw_index,
599 static int dsa_switch_lag_join(struct dsa_switch *ds,
600 struct dsa_notifier_lag_info *info)
602 if (ds->index == info->sw_index && ds->ops->port_lag_join)
603 return ds->ops->port_lag_join(ds, info->port, info->lag,
606 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
607 return ds->ops->crosschip_lag_join(ds, info->sw_index,
608 info->port, info->lag,
614 static int dsa_switch_lag_leave(struct dsa_switch *ds,
615 struct dsa_notifier_lag_info *info)
617 if (ds->index == info->sw_index && ds->ops->port_lag_leave)
618 return ds->ops->port_lag_leave(ds, info->port, info->lag);
620 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
621 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
622 info->port, info->lag);
627 static int dsa_switch_mdb_add(struct dsa_switch *ds,
628 struct dsa_notifier_mdb_info *info)
630 int port = dsa_towards_port(ds, info->sw_index, info->port);
631 struct dsa_port *dp = dsa_to_port(ds, port);
633 if (!ds->ops->port_mdb_add)
636 return dsa_port_do_mdb_add(dp, info->mdb, info->db);
639 static int dsa_switch_mdb_del(struct dsa_switch *ds,
640 struct dsa_notifier_mdb_info *info)
642 int port = dsa_towards_port(ds, info->sw_index, info->port);
643 struct dsa_port *dp = dsa_to_port(ds, port);
645 if (!ds->ops->port_mdb_del)
648 return dsa_port_do_mdb_del(dp, info->mdb, info->db);
651 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
652 struct dsa_notifier_mdb_info *info)
657 if (!ds->ops->port_mdb_add)
660 dsa_switch_for_each_port(dp, ds) {
661 if (dsa_port_host_address_match(dp, info->sw_index,
663 err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
672 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
673 struct dsa_notifier_mdb_info *info)
678 if (!ds->ops->port_mdb_del)
681 dsa_switch_for_each_port(dp, ds) {
682 if (dsa_port_host_address_match(dp, info->sw_index,
684 err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
693 /* Port VLANs match on the targeted port and on all DSA ports */
694 static bool dsa_port_vlan_match(struct dsa_port *dp,
695 struct dsa_notifier_vlan_info *info)
697 if (dp->ds->index == info->sw_index && dp->index == info->port)
700 if (dsa_port_is_dsa(dp))
706 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
707 * (upstream and downstream) of that switch and its upstream switches.
709 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
710 struct dsa_notifier_vlan_info *info)
712 struct dsa_port *targeted_dp, *cpu_dp;
713 struct dsa_switch *targeted_ds;
715 targeted_ds = dsa_switch_find(dp->ds->dst->index, info->sw_index);
716 targeted_dp = dsa_to_port(targeted_ds, info->port);
717 cpu_dp = targeted_dp->cpu_dp;
719 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
720 return dsa_port_is_dsa(dp) || dp == cpu_dp;
725 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
726 const struct switchdev_obj_port_vlan *vlan)
730 list_for_each_entry(v, vlan_list, list)
731 if (v->vid == vlan->vid)
737 static int dsa_port_do_vlan_add(struct dsa_port *dp,
738 const struct switchdev_obj_port_vlan *vlan,
739 struct netlink_ext_ack *extack)
741 struct dsa_switch *ds = dp->ds;
742 int port = dp->index;
746 /* No need to bother with refcounting for user ports. */
747 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
748 return ds->ops->port_vlan_add(ds, port, vlan, extack);
750 /* No need to propagate on shared ports the existing VLANs that were
751 * re-notified after just the flags have changed. This would cause a
752 * refcount bump which we need to avoid, since it unbalances the
753 * additions with the deletions.
758 mutex_lock(&dp->vlans_lock);
760 v = dsa_vlan_find(&dp->vlans, vlan);
762 refcount_inc(&v->refcount);
766 v = kzalloc(sizeof(*v), GFP_KERNEL);
772 err = ds->ops->port_vlan_add(ds, port, vlan, extack);
779 refcount_set(&v->refcount, 1);
780 list_add_tail(&v->list, &dp->vlans);
783 mutex_unlock(&dp->vlans_lock);
788 static int dsa_port_do_vlan_del(struct dsa_port *dp,
789 const struct switchdev_obj_port_vlan *vlan)
791 struct dsa_switch *ds = dp->ds;
792 int port = dp->index;
796 /* No need to bother with refcounting for user ports */
797 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
798 return ds->ops->port_vlan_del(ds, port, vlan);
800 mutex_lock(&dp->vlans_lock);
802 v = dsa_vlan_find(&dp->vlans, vlan);
808 if (!refcount_dec_and_test(&v->refcount))
811 err = ds->ops->port_vlan_del(ds, port, vlan);
813 refcount_set(&v->refcount, 1);
821 mutex_unlock(&dp->vlans_lock);
826 static int dsa_switch_vlan_add(struct dsa_switch *ds,
827 struct dsa_notifier_vlan_info *info)
832 if (!ds->ops->port_vlan_add)
835 dsa_switch_for_each_port(dp, ds) {
836 if (dsa_port_vlan_match(dp, info)) {
837 err = dsa_port_do_vlan_add(dp, info->vlan,
847 static int dsa_switch_vlan_del(struct dsa_switch *ds,
848 struct dsa_notifier_vlan_info *info)
853 if (!ds->ops->port_vlan_del)
856 dsa_switch_for_each_port(dp, ds) {
857 if (dsa_port_vlan_match(dp, info)) {
858 err = dsa_port_do_vlan_del(dp, info->vlan);
867 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
868 struct dsa_notifier_vlan_info *info)
873 if (!ds->ops->port_vlan_add)
876 dsa_switch_for_each_port(dp, ds) {
877 if (dsa_port_host_vlan_match(dp, info)) {
878 err = dsa_port_do_vlan_add(dp, info->vlan,
888 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
889 struct dsa_notifier_vlan_info *info)
894 if (!ds->ops->port_vlan_del)
897 dsa_switch_for_each_port(dp, ds) {
898 if (dsa_port_host_vlan_match(dp, info)) {
899 err = dsa_port_do_vlan_del(dp, info->vlan);
908 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
909 struct dsa_notifier_tag_proto_info *info)
911 const struct dsa_device_ops *tag_ops = info->tag_ops;
912 struct dsa_port *dp, *cpu_dp;
915 if (!ds->ops->change_tag_protocol)
920 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
921 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
926 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
929 /* Now that changing the tag protocol can no longer fail, let's update
930 * the remaining bits which are "duplicated for faster access", and the
931 * bits that depend on the tagger, such as the MTU.
933 dsa_switch_for_each_user_port(dp, ds) {
934 struct net_device *slave = dp->slave;
936 dsa_slave_setup_tagger(slave);
938 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
939 dsa_slave_change_mtu(slave, slave->mtu);
945 /* We use the same cross-chip notifiers to inform both the tagger side, as well
946 * as the switch side, of connection and disconnection events.
947 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
948 * switch side doesn't support connecting to this tagger, and therefore, the
949 * fact that we don't disconnect the tagger side doesn't constitute a memory
950 * leak: the tagger will still operate with persistent per-switch memory, just
951 * with the switch side unconnected to it. What does constitute a hard error is
952 * when the switch side supports connecting but fails.
955 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
956 struct dsa_notifier_tag_proto_info *info)
958 const struct dsa_device_ops *tag_ops = info->tag_ops;
961 /* Notify the new tagger about the connection to this switch */
962 if (tag_ops->connect) {
963 err = tag_ops->connect(ds);
968 if (!ds->ops->connect_tag_protocol)
971 /* Notify the switch about the connection to the new tagger */
972 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
974 /* Revert the new tagger's connection to this tree */
975 if (tag_ops->disconnect)
976 tag_ops->disconnect(ds);
984 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
985 struct dsa_notifier_tag_proto_info *info)
987 const struct dsa_device_ops *tag_ops = info->tag_ops;
989 /* Notify the tagger about the disconnection from this switch */
990 if (tag_ops->disconnect && ds->tagger_data)
991 tag_ops->disconnect(ds);
993 /* No need to notify the switch, since it shouldn't have any
994 * resources to tear down
1000 dsa_switch_master_state_change(struct dsa_switch *ds,
1001 struct dsa_notifier_master_state_info *info)
1003 if (!ds->ops->master_state_change)
1006 ds->ops->master_state_change(ds, info->master, info->operational);
1011 static int dsa_switch_event(struct notifier_block *nb,
1012 unsigned long event, void *info)
1014 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
1018 case DSA_NOTIFIER_AGEING_TIME:
1019 err = dsa_switch_ageing_time(ds, info);
1021 case DSA_NOTIFIER_BRIDGE_JOIN:
1022 err = dsa_switch_bridge_join(ds, info);
1024 case DSA_NOTIFIER_BRIDGE_LEAVE:
1025 err = dsa_switch_bridge_leave(ds, info);
1027 case DSA_NOTIFIER_FDB_ADD:
1028 err = dsa_switch_fdb_add(ds, info);
1030 case DSA_NOTIFIER_FDB_DEL:
1031 err = dsa_switch_fdb_del(ds, info);
1033 case DSA_NOTIFIER_HOST_FDB_ADD:
1034 err = dsa_switch_host_fdb_add(ds, info);
1036 case DSA_NOTIFIER_HOST_FDB_DEL:
1037 err = dsa_switch_host_fdb_del(ds, info);
1039 case DSA_NOTIFIER_LAG_FDB_ADD:
1040 err = dsa_switch_lag_fdb_add(ds, info);
1042 case DSA_NOTIFIER_LAG_FDB_DEL:
1043 err = dsa_switch_lag_fdb_del(ds, info);
1045 case DSA_NOTIFIER_LAG_CHANGE:
1046 err = dsa_switch_lag_change(ds, info);
1048 case DSA_NOTIFIER_LAG_JOIN:
1049 err = dsa_switch_lag_join(ds, info);
1051 case DSA_NOTIFIER_LAG_LEAVE:
1052 err = dsa_switch_lag_leave(ds, info);
1054 case DSA_NOTIFIER_MDB_ADD:
1055 err = dsa_switch_mdb_add(ds, info);
1057 case DSA_NOTIFIER_MDB_DEL:
1058 err = dsa_switch_mdb_del(ds, info);
1060 case DSA_NOTIFIER_HOST_MDB_ADD:
1061 err = dsa_switch_host_mdb_add(ds, info);
1063 case DSA_NOTIFIER_HOST_MDB_DEL:
1064 err = dsa_switch_host_mdb_del(ds, info);
1066 case DSA_NOTIFIER_VLAN_ADD:
1067 err = dsa_switch_vlan_add(ds, info);
1069 case DSA_NOTIFIER_VLAN_DEL:
1070 err = dsa_switch_vlan_del(ds, info);
1072 case DSA_NOTIFIER_HOST_VLAN_ADD:
1073 err = dsa_switch_host_vlan_add(ds, info);
1075 case DSA_NOTIFIER_HOST_VLAN_DEL:
1076 err = dsa_switch_host_vlan_del(ds, info);
1078 case DSA_NOTIFIER_MTU:
1079 err = dsa_switch_mtu(ds, info);
1081 case DSA_NOTIFIER_TAG_PROTO:
1082 err = dsa_switch_change_tag_proto(ds, info);
1084 case DSA_NOTIFIER_TAG_PROTO_CONNECT:
1085 err = dsa_switch_connect_tag_proto(ds, info);
1087 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
1088 err = dsa_switch_disconnect_tag_proto(ds, info);
1090 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1091 err = dsa_switch_tag_8021q_vlan_add(ds, info);
1093 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1094 err = dsa_switch_tag_8021q_vlan_del(ds, info);
1096 case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1097 err = dsa_switch_master_state_change(ds, info);
1105 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1108 return notifier_from_errno(err);
1111 int dsa_switch_register_notifier(struct dsa_switch *ds)
1113 ds->nb.notifier_call = dsa_switch_event;
1115 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1118 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1122 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1124 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);