OSDN Git Service

Merge tag 'memblock-v5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt...
[uclinux-h8/linux.git] / net / dsa / switch.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14
15 #include "dsa_priv.h"
16
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18                                                    unsigned int ageing_time)
19 {
20         struct dsa_port *dp;
21
22         dsa_switch_for_each_port(dp, ds)
23                 if (dp->ageing_time && dp->ageing_time < ageing_time)
24                         ageing_time = dp->ageing_time;
25
26         return ageing_time;
27 }
28
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30                                   struct dsa_notifier_ageing_time_info *info)
31 {
32         unsigned int ageing_time = info->ageing_time;
33
34         if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35                 return -ERANGE;
36
37         if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38                 return -ERANGE;
39
40         /* Program the fastest ageing time in case of multiple bridges */
41         ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42
43         if (ds->ops->set_ageing_time)
44                 return ds->ops->set_ageing_time(ds, ageing_time);
45
46         return 0;
47 }
48
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50                                struct dsa_notifier_mtu_info *info)
51 {
52         if (dp->ds->index == info->sw_index && dp->index == info->port)
53                 return true;
54
55         /* Do not propagate to other switches in the tree if the notifier was
56          * targeted for a single switch.
57          */
58         if (info->targeted_match)
59                 return false;
60
61         if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
62                 return true;
63
64         return false;
65 }
66
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68                           struct dsa_notifier_mtu_info *info)
69 {
70         struct dsa_port *dp;
71         int ret;
72
73         if (!ds->ops->port_change_mtu)
74                 return -EOPNOTSUPP;
75
76         dsa_switch_for_each_port(dp, ds) {
77                 if (dsa_port_mtu_match(dp, info)) {
78                         ret = ds->ops->port_change_mtu(ds, dp->index,
79                                                        info->mtu);
80                         if (ret)
81                                 return ret;
82                 }
83         }
84
85         return 0;
86 }
87
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89                                   struct dsa_notifier_bridge_info *info)
90 {
91         struct dsa_switch_tree *dst = ds->dst;
92         int err;
93
94         if (dst->index == info->tree_index && ds->index == info->sw_index) {
95                 if (!ds->ops->port_bridge_join)
96                         return -EOPNOTSUPP;
97
98                 err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
99                                                 &info->tx_fwd_offload);
100                 if (err)
101                         return err;
102         }
103
104         if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
105             ds->ops->crosschip_bridge_join) {
106                 err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
107                                                      info->sw_index,
108                                                      info->port, info->bridge);
109                 if (err)
110                         return err;
111         }
112
113         return dsa_tag_8021q_bridge_join(ds, info);
114 }
115
116 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
117                                    struct dsa_notifier_bridge_info *info)
118 {
119         struct dsa_switch_tree *dst = ds->dst;
120         struct netlink_ext_ack extack = {0};
121         bool change_vlan_filtering = false;
122         bool vlan_filtering;
123         struct dsa_port *dp;
124         int err;
125
126         if (dst->index == info->tree_index && ds->index == info->sw_index &&
127             ds->ops->port_bridge_leave)
128                 ds->ops->port_bridge_leave(ds, info->port, info->bridge);
129
130         if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
131             ds->ops->crosschip_bridge_leave)
132                 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
133                                                 info->sw_index, info->port,
134                                                 info->bridge);
135
136         if (ds->needs_standalone_vlan_filtering &&
137             !br_vlan_enabled(info->bridge.dev)) {
138                 change_vlan_filtering = true;
139                 vlan_filtering = true;
140         } else if (!ds->needs_standalone_vlan_filtering &&
141                    br_vlan_enabled(info->bridge.dev)) {
142                 change_vlan_filtering = true;
143                 vlan_filtering = false;
144         }
145
146         /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
147          * event for changing vlan_filtering setting upon slave ports leaving
148          * it. That is a good thing, because that lets us handle it and also
149          * handle the case where the switch's vlan_filtering setting is global
150          * (not per port). When that happens, the correct moment to trigger the
151          * vlan_filtering callback is only when the last port leaves the last
152          * VLAN-aware bridge.
153          */
154         if (change_vlan_filtering && ds->vlan_filtering_is_global) {
155                 dsa_switch_for_each_port(dp, ds) {
156                         struct net_device *br = dsa_port_bridge_dev_get(dp);
157
158                         if (br && br_vlan_enabled(br)) {
159                                 change_vlan_filtering = false;
160                                 break;
161                         }
162                 }
163         }
164
165         if (change_vlan_filtering) {
166                 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
167                                               vlan_filtering, &extack);
168                 if (extack._msg)
169                         dev_err(ds->dev, "port %d: %s\n", info->port,
170                                 extack._msg);
171                 if (err && err != -EOPNOTSUPP)
172                         return err;
173         }
174
175         return dsa_tag_8021q_bridge_leave(ds, info);
176 }
177
178 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
179  * DSA links) that sit between the targeted port on which the notifier was
180  * emitted and its dedicated CPU port.
181  */
182 static bool dsa_port_host_address_match(struct dsa_port *dp,
183                                         int info_sw_index, int info_port)
184 {
185         struct dsa_port *targeted_dp, *cpu_dp;
186         struct dsa_switch *targeted_ds;
187
188         targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
189         targeted_dp = dsa_to_port(targeted_ds, info_port);
190         cpu_dp = targeted_dp->cpu_dp;
191
192         if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
193                 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
194                                                      cpu_dp->index);
195
196         return false;
197 }
198
199 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
200                                               const unsigned char *addr,
201                                               u16 vid)
202 {
203         struct dsa_mac_addr *a;
204
205         list_for_each_entry(a, addr_list, list)
206                 if (ether_addr_equal(a->addr, addr) && a->vid == vid)
207                         return a;
208
209         return NULL;
210 }
211
212 static int dsa_port_do_mdb_add(struct dsa_port *dp,
213                                const struct switchdev_obj_port_mdb *mdb)
214 {
215         struct dsa_switch *ds = dp->ds;
216         struct dsa_mac_addr *a;
217         int port = dp->index;
218         int err = 0;
219
220         /* No need to bother with refcounting for user ports */
221         if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
222                 return ds->ops->port_mdb_add(ds, port, mdb);
223
224         mutex_lock(&dp->addr_lists_lock);
225
226         a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
227         if (a) {
228                 refcount_inc(&a->refcount);
229                 goto out;
230         }
231
232         a = kzalloc(sizeof(*a), GFP_KERNEL);
233         if (!a) {
234                 err = -ENOMEM;
235                 goto out;
236         }
237
238         err = ds->ops->port_mdb_add(ds, port, mdb);
239         if (err) {
240                 kfree(a);
241                 goto out;
242         }
243
244         ether_addr_copy(a->addr, mdb->addr);
245         a->vid = mdb->vid;
246         refcount_set(&a->refcount, 1);
247         list_add_tail(&a->list, &dp->mdbs);
248
249 out:
250         mutex_unlock(&dp->addr_lists_lock);
251
252         return err;
253 }
254
255 static int dsa_port_do_mdb_del(struct dsa_port *dp,
256                                const struct switchdev_obj_port_mdb *mdb)
257 {
258         struct dsa_switch *ds = dp->ds;
259         struct dsa_mac_addr *a;
260         int port = dp->index;
261         int err = 0;
262
263         /* No need to bother with refcounting for user ports */
264         if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
265                 return ds->ops->port_mdb_del(ds, port, mdb);
266
267         mutex_lock(&dp->addr_lists_lock);
268
269         a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
270         if (!a) {
271                 err = -ENOENT;
272                 goto out;
273         }
274
275         if (!refcount_dec_and_test(&a->refcount))
276                 goto out;
277
278         err = ds->ops->port_mdb_del(ds, port, mdb);
279         if (err) {
280                 refcount_set(&a->refcount, 1);
281                 goto out;
282         }
283
284         list_del(&a->list);
285         kfree(a);
286
287 out:
288         mutex_unlock(&dp->addr_lists_lock);
289
290         return err;
291 }
292
293 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
294                                u16 vid)
295 {
296         struct dsa_switch *ds = dp->ds;
297         struct dsa_mac_addr *a;
298         int port = dp->index;
299         int err = 0;
300
301         /* No need to bother with refcounting for user ports */
302         if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
303                 return ds->ops->port_fdb_add(ds, port, addr, vid);
304
305         mutex_lock(&dp->addr_lists_lock);
306
307         a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
308         if (a) {
309                 refcount_inc(&a->refcount);
310                 goto out;
311         }
312
313         a = kzalloc(sizeof(*a), GFP_KERNEL);
314         if (!a) {
315                 err = -ENOMEM;
316                 goto out;
317         }
318
319         err = ds->ops->port_fdb_add(ds, port, addr, vid);
320         if (err) {
321                 kfree(a);
322                 goto out;
323         }
324
325         ether_addr_copy(a->addr, addr);
326         a->vid = vid;
327         refcount_set(&a->refcount, 1);
328         list_add_tail(&a->list, &dp->fdbs);
329
330 out:
331         mutex_unlock(&dp->addr_lists_lock);
332
333         return err;
334 }
335
336 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
337                                u16 vid)
338 {
339         struct dsa_switch *ds = dp->ds;
340         struct dsa_mac_addr *a;
341         int port = dp->index;
342         int err = 0;
343
344         /* No need to bother with refcounting for user ports */
345         if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
346                 return ds->ops->port_fdb_del(ds, port, addr, vid);
347
348         mutex_lock(&dp->addr_lists_lock);
349
350         a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
351         if (!a) {
352                 err = -ENOENT;
353                 goto out;
354         }
355
356         if (!refcount_dec_and_test(&a->refcount))
357                 goto out;
358
359         err = ds->ops->port_fdb_del(ds, port, addr, vid);
360         if (err) {
361                 refcount_set(&a->refcount, 1);
362                 goto out;
363         }
364
365         list_del(&a->list);
366         kfree(a);
367
368 out:
369         mutex_unlock(&dp->addr_lists_lock);
370
371         return err;
372 }
373
374 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
375                                    struct dsa_notifier_fdb_info *info)
376 {
377         struct dsa_port *dp;
378         int err = 0;
379
380         if (!ds->ops->port_fdb_add)
381                 return -EOPNOTSUPP;
382
383         dsa_switch_for_each_port(dp, ds) {
384                 if (dsa_port_host_address_match(dp, info->sw_index,
385                                                 info->port)) {
386                         err = dsa_port_do_fdb_add(dp, info->addr, info->vid);
387                         if (err)
388                                 break;
389                 }
390         }
391
392         return err;
393 }
394
395 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
396                                    struct dsa_notifier_fdb_info *info)
397 {
398         struct dsa_port *dp;
399         int err = 0;
400
401         if (!ds->ops->port_fdb_del)
402                 return -EOPNOTSUPP;
403
404         dsa_switch_for_each_port(dp, ds) {
405                 if (dsa_port_host_address_match(dp, info->sw_index,
406                                                 info->port)) {
407                         err = dsa_port_do_fdb_del(dp, info->addr, info->vid);
408                         if (err)
409                                 break;
410                 }
411         }
412
413         return err;
414 }
415
416 static int dsa_switch_fdb_add(struct dsa_switch *ds,
417                               struct dsa_notifier_fdb_info *info)
418 {
419         int port = dsa_towards_port(ds, info->sw_index, info->port);
420         struct dsa_port *dp = dsa_to_port(ds, port);
421
422         if (!ds->ops->port_fdb_add)
423                 return -EOPNOTSUPP;
424
425         return dsa_port_do_fdb_add(dp, info->addr, info->vid);
426 }
427
428 static int dsa_switch_fdb_del(struct dsa_switch *ds,
429                               struct dsa_notifier_fdb_info *info)
430 {
431         int port = dsa_towards_port(ds, info->sw_index, info->port);
432         struct dsa_port *dp = dsa_to_port(ds, port);
433
434         if (!ds->ops->port_fdb_del)
435                 return -EOPNOTSUPP;
436
437         return dsa_port_do_fdb_del(dp, info->addr, info->vid);
438 }
439
440 static int dsa_switch_lag_change(struct dsa_switch *ds,
441                                  struct dsa_notifier_lag_info *info)
442 {
443         if (ds->index == info->sw_index && ds->ops->port_lag_change)
444                 return ds->ops->port_lag_change(ds, info->port);
445
446         if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
447                 return ds->ops->crosschip_lag_change(ds, info->sw_index,
448                                                      info->port);
449
450         return 0;
451 }
452
453 static int dsa_switch_lag_join(struct dsa_switch *ds,
454                                struct dsa_notifier_lag_info *info)
455 {
456         if (ds->index == info->sw_index && ds->ops->port_lag_join)
457                 return ds->ops->port_lag_join(ds, info->port, info->lag,
458                                               info->info);
459
460         if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
461                 return ds->ops->crosschip_lag_join(ds, info->sw_index,
462                                                    info->port, info->lag,
463                                                    info->info);
464
465         return -EOPNOTSUPP;
466 }
467
468 static int dsa_switch_lag_leave(struct dsa_switch *ds,
469                                 struct dsa_notifier_lag_info *info)
470 {
471         if (ds->index == info->sw_index && ds->ops->port_lag_leave)
472                 return ds->ops->port_lag_leave(ds, info->port, info->lag);
473
474         if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
475                 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
476                                                     info->port, info->lag);
477
478         return -EOPNOTSUPP;
479 }
480
481 static int dsa_switch_mdb_add(struct dsa_switch *ds,
482                               struct dsa_notifier_mdb_info *info)
483 {
484         int port = dsa_towards_port(ds, info->sw_index, info->port);
485         struct dsa_port *dp = dsa_to_port(ds, port);
486
487         if (!ds->ops->port_mdb_add)
488                 return -EOPNOTSUPP;
489
490         return dsa_port_do_mdb_add(dp, info->mdb);
491 }
492
493 static int dsa_switch_mdb_del(struct dsa_switch *ds,
494                               struct dsa_notifier_mdb_info *info)
495 {
496         int port = dsa_towards_port(ds, info->sw_index, info->port);
497         struct dsa_port *dp = dsa_to_port(ds, port);
498
499         if (!ds->ops->port_mdb_del)
500                 return -EOPNOTSUPP;
501
502         return dsa_port_do_mdb_del(dp, info->mdb);
503 }
504
505 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
506                                    struct dsa_notifier_mdb_info *info)
507 {
508         struct dsa_port *dp;
509         int err = 0;
510
511         if (!ds->ops->port_mdb_add)
512                 return -EOPNOTSUPP;
513
514         dsa_switch_for_each_port(dp, ds) {
515                 if (dsa_port_host_address_match(dp, info->sw_index,
516                                                 info->port)) {
517                         err = dsa_port_do_mdb_add(dp, info->mdb);
518                         if (err)
519                                 break;
520                 }
521         }
522
523         return err;
524 }
525
526 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
527                                    struct dsa_notifier_mdb_info *info)
528 {
529         struct dsa_port *dp;
530         int err = 0;
531
532         if (!ds->ops->port_mdb_del)
533                 return -EOPNOTSUPP;
534
535         dsa_switch_for_each_port(dp, ds) {
536                 if (dsa_port_host_address_match(dp, info->sw_index,
537                                                 info->port)) {
538                         err = dsa_port_do_mdb_del(dp, info->mdb);
539                         if (err)
540                                 break;
541                 }
542         }
543
544         return err;
545 }
546
547 static bool dsa_port_vlan_match(struct dsa_port *dp,
548                                 struct dsa_notifier_vlan_info *info)
549 {
550         if (dp->ds->index == info->sw_index && dp->index == info->port)
551                 return true;
552
553         if (dsa_port_is_dsa(dp))
554                 return true;
555
556         return false;
557 }
558
559 static int dsa_switch_vlan_add(struct dsa_switch *ds,
560                                struct dsa_notifier_vlan_info *info)
561 {
562         struct dsa_port *dp;
563         int err;
564
565         if (!ds->ops->port_vlan_add)
566                 return -EOPNOTSUPP;
567
568         dsa_switch_for_each_port(dp, ds) {
569                 if (dsa_port_vlan_match(dp, info)) {
570                         err = ds->ops->port_vlan_add(ds, dp->index, info->vlan,
571                                                      info->extack);
572                         if (err)
573                                 return err;
574                 }
575         }
576
577         return 0;
578 }
579
580 static int dsa_switch_vlan_del(struct dsa_switch *ds,
581                                struct dsa_notifier_vlan_info *info)
582 {
583         if (!ds->ops->port_vlan_del)
584                 return -EOPNOTSUPP;
585
586         if (ds->index == info->sw_index)
587                 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
588
589         /* Do not deprogram the DSA links as they may be used as conduit
590          * for other VLAN members in the fabric.
591          */
592         return 0;
593 }
594
595 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
596                                        struct dsa_notifier_tag_proto_info *info)
597 {
598         const struct dsa_device_ops *tag_ops = info->tag_ops;
599         struct dsa_port *dp, *cpu_dp;
600         int err;
601
602         if (!ds->ops->change_tag_protocol)
603                 return -EOPNOTSUPP;
604
605         ASSERT_RTNL();
606
607         dsa_switch_for_each_cpu_port(cpu_dp, ds) {
608                 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
609                                                    tag_ops->proto);
610                 if (err)
611                         return err;
612
613                 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
614         }
615
616         /* Now that changing the tag protocol can no longer fail, let's update
617          * the remaining bits which are "duplicated for faster access", and the
618          * bits that depend on the tagger, such as the MTU.
619          */
620         dsa_switch_for_each_user_port(dp, ds) {
621                 struct net_device *slave = dp->slave;
622
623                 dsa_slave_setup_tagger(slave);
624
625                 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
626                 dsa_slave_change_mtu(slave, slave->mtu);
627         }
628
629         return 0;
630 }
631
632 /* We use the same cross-chip notifiers to inform both the tagger side, as well
633  * as the switch side, of connection and disconnection events.
634  * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
635  * switch side doesn't support connecting to this tagger, and therefore, the
636  * fact that we don't disconnect the tagger side doesn't constitute a memory
637  * leak: the tagger will still operate with persistent per-switch memory, just
638  * with the switch side unconnected to it. What does constitute a hard error is
639  * when the switch side supports connecting but fails.
640  */
641 static int
642 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
643                              struct dsa_notifier_tag_proto_info *info)
644 {
645         const struct dsa_device_ops *tag_ops = info->tag_ops;
646         int err;
647
648         /* Notify the new tagger about the connection to this switch */
649         if (tag_ops->connect) {
650                 err = tag_ops->connect(ds);
651                 if (err)
652                         return err;
653         }
654
655         if (!ds->ops->connect_tag_protocol)
656                 return -EOPNOTSUPP;
657
658         /* Notify the switch about the connection to the new tagger */
659         err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
660         if (err) {
661                 /* Revert the new tagger's connection to this tree */
662                 if (tag_ops->disconnect)
663                         tag_ops->disconnect(ds);
664                 return err;
665         }
666
667         return 0;
668 }
669
670 static int
671 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
672                                 struct dsa_notifier_tag_proto_info *info)
673 {
674         const struct dsa_device_ops *tag_ops = info->tag_ops;
675
676         /* Notify the tagger about the disconnection from this switch */
677         if (tag_ops->disconnect && ds->tagger_data)
678                 tag_ops->disconnect(ds);
679
680         /* No need to notify the switch, since it shouldn't have any
681          * resources to tear down
682          */
683         return 0;
684 }
685
686 static int dsa_switch_event(struct notifier_block *nb,
687                             unsigned long event, void *info)
688 {
689         struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
690         int err;
691
692         switch (event) {
693         case DSA_NOTIFIER_AGEING_TIME:
694                 err = dsa_switch_ageing_time(ds, info);
695                 break;
696         case DSA_NOTIFIER_BRIDGE_JOIN:
697                 err = dsa_switch_bridge_join(ds, info);
698                 break;
699         case DSA_NOTIFIER_BRIDGE_LEAVE:
700                 err = dsa_switch_bridge_leave(ds, info);
701                 break;
702         case DSA_NOTIFIER_FDB_ADD:
703                 err = dsa_switch_fdb_add(ds, info);
704                 break;
705         case DSA_NOTIFIER_FDB_DEL:
706                 err = dsa_switch_fdb_del(ds, info);
707                 break;
708         case DSA_NOTIFIER_HOST_FDB_ADD:
709                 err = dsa_switch_host_fdb_add(ds, info);
710                 break;
711         case DSA_NOTIFIER_HOST_FDB_DEL:
712                 err = dsa_switch_host_fdb_del(ds, info);
713                 break;
714         case DSA_NOTIFIER_LAG_CHANGE:
715                 err = dsa_switch_lag_change(ds, info);
716                 break;
717         case DSA_NOTIFIER_LAG_JOIN:
718                 err = dsa_switch_lag_join(ds, info);
719                 break;
720         case DSA_NOTIFIER_LAG_LEAVE:
721                 err = dsa_switch_lag_leave(ds, info);
722                 break;
723         case DSA_NOTIFIER_MDB_ADD:
724                 err = dsa_switch_mdb_add(ds, info);
725                 break;
726         case DSA_NOTIFIER_MDB_DEL:
727                 err = dsa_switch_mdb_del(ds, info);
728                 break;
729         case DSA_NOTIFIER_HOST_MDB_ADD:
730                 err = dsa_switch_host_mdb_add(ds, info);
731                 break;
732         case DSA_NOTIFIER_HOST_MDB_DEL:
733                 err = dsa_switch_host_mdb_del(ds, info);
734                 break;
735         case DSA_NOTIFIER_VLAN_ADD:
736                 err = dsa_switch_vlan_add(ds, info);
737                 break;
738         case DSA_NOTIFIER_VLAN_DEL:
739                 err = dsa_switch_vlan_del(ds, info);
740                 break;
741         case DSA_NOTIFIER_MTU:
742                 err = dsa_switch_mtu(ds, info);
743                 break;
744         case DSA_NOTIFIER_TAG_PROTO:
745                 err = dsa_switch_change_tag_proto(ds, info);
746                 break;
747         case DSA_NOTIFIER_TAG_PROTO_CONNECT:
748                 err = dsa_switch_connect_tag_proto(ds, info);
749                 break;
750         case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
751                 err = dsa_switch_disconnect_tag_proto(ds, info);
752                 break;
753         case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
754                 err = dsa_switch_tag_8021q_vlan_add(ds, info);
755                 break;
756         case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
757                 err = dsa_switch_tag_8021q_vlan_del(ds, info);
758                 break;
759         default:
760                 err = -EOPNOTSUPP;
761                 break;
762         }
763
764         if (err)
765                 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
766                         event, err);
767
768         return notifier_from_errno(err);
769 }
770
771 int dsa_switch_register_notifier(struct dsa_switch *ds)
772 {
773         ds->nb.notifier_call = dsa_switch_event;
774
775         return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
776 }
777
778 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
779 {
780         int err;
781
782         err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
783         if (err)
784                 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
785 }