OSDN Git Service

net: dsa: tag_qca: add define for handling mgmt Ethernet packet
[uclinux-h8/linux.git] / net / dsa / dsa2.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7  */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
18 #include <net/sch_generic.h>
19
20 #include "dsa_priv.h"
21
22 static DEFINE_MUTEX(dsa2_mutex);
23 LIST_HEAD(dsa_tree_list);
24
25 /* Track the bridges with forwarding offload enabled */
26 static unsigned long dsa_fwd_offloading_bridges;
27
28 /**
29  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
30  * @dst: collection of struct dsa_switch devices to notify.
31  * @e: event, must be of type DSA_NOTIFIER_*
32  * @v: event-specific value.
33  *
34  * Given a struct dsa_switch_tree, this can be used to run a function once for
35  * each member DSA switch. The other alternative of traversing the tree is only
36  * through its ports list, which does not uniquely list the switches.
37  */
38 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
39 {
40         struct raw_notifier_head *nh = &dst->nh;
41         int err;
42
43         err = raw_notifier_call_chain(nh, e, v);
44
45         return notifier_to_errno(err);
46 }
47
48 /**
49  * dsa_broadcast - Notify all DSA trees in the system.
50  * @e: event, must be of type DSA_NOTIFIER_*
51  * @v: event-specific value.
52  *
53  * Can be used to notify the switching fabric of events such as cross-chip
54  * bridging between disjoint trees (such as islands of tagger-compatible
55  * switches bridged by an incompatible middle switch).
56  *
57  * WARNING: this function is not reliable during probe time, because probing
58  * between trees is asynchronous and not all DSA trees might have probed.
59  */
60 int dsa_broadcast(unsigned long e, void *v)
61 {
62         struct dsa_switch_tree *dst;
63         int err = 0;
64
65         list_for_each_entry(dst, &dsa_tree_list, list) {
66                 err = dsa_tree_notify(dst, e, v);
67                 if (err)
68                         break;
69         }
70
71         return err;
72 }
73
74 /**
75  * dsa_lag_map() - Map LAG netdev to a linear LAG ID
76  * @dst: Tree in which to record the mapping.
77  * @lag: Netdev that is to be mapped to an ID.
78  *
79  * dsa_lag_id/dsa_lag_dev can then be used to translate between the
80  * two spaces. The size of the mapping space is determined by the
81  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
82  * it unset if it is not needed, in which case these functions become
83  * no-ops.
84  */
85 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
86 {
87         unsigned int id;
88
89         if (dsa_lag_id(dst, lag) >= 0)
90                 /* Already mapped */
91                 return;
92
93         for (id = 0; id < dst->lags_len; id++) {
94                 if (!dsa_lag_dev(dst, id)) {
95                         dst->lags[id] = lag;
96                         return;
97                 }
98         }
99
100         /* No IDs left, which is OK. Some drivers do not need it. The
101          * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
102          * returns an error for this device when joining the LAG. The
103          * driver can then return -EOPNOTSUPP back to DSA, which will
104          * fall back to a software LAG.
105          */
106 }
107
108 /**
109  * dsa_lag_unmap() - Remove a LAG ID mapping
110  * @dst: Tree in which the mapping is recorded.
111  * @lag: Netdev that was mapped.
112  *
113  * As there may be multiple users of the mapping, it is only removed
114  * if there are no other references to it.
115  */
116 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
117 {
118         struct dsa_port *dp;
119         unsigned int id;
120
121         dsa_lag_foreach_port(dp, dst, lag)
122                 /* There are remaining users of this mapping */
123                 return;
124
125         dsa_lags_foreach_id(id, dst) {
126                 if (dsa_lag_dev(dst, id) == lag) {
127                         dst->lags[id] = NULL;
128                         break;
129                 }
130         }
131 }
132
133 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
134                                         const struct net_device *br)
135 {
136         struct dsa_port *dp;
137
138         list_for_each_entry(dp, &dst->ports, list)
139                 if (dsa_port_bridge_dev_get(dp) == br)
140                         return dp->bridge;
141
142         return NULL;
143 }
144
145 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
146 {
147         struct dsa_switch_tree *dst;
148
149         list_for_each_entry(dst, &dsa_tree_list, list) {
150                 struct dsa_bridge *bridge;
151
152                 bridge = dsa_tree_bridge_find(dst, bridge_dev);
153                 if (bridge)
154                         return bridge->num;
155         }
156
157         return 0;
158 }
159
160 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
161 {
162         unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
163
164         /* Switches without FDB isolation support don't get unique
165          * bridge numbering
166          */
167         if (!max)
168                 return 0;
169
170         if (!bridge_num) {
171                 /* First port that requests FDB isolation or TX forwarding
172                  * offload for this bridge
173                  */
174                 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
175                                                 DSA_MAX_NUM_OFFLOADING_BRIDGES,
176                                                 1);
177                 if (bridge_num >= max)
178                         return 0;
179
180                 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
181         }
182
183         return bridge_num;
184 }
185
186 void dsa_bridge_num_put(const struct net_device *bridge_dev,
187                         unsigned int bridge_num)
188 {
189         /* Since we refcount bridges, we know that when we call this function
190          * it is no longer in use, so we can just go ahead and remove it from
191          * the bit mask.
192          */
193         clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
194 }
195
196 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
197 {
198         struct dsa_switch_tree *dst;
199         struct dsa_port *dp;
200
201         list_for_each_entry(dst, &dsa_tree_list, list) {
202                 if (dst->index != tree_index)
203                         continue;
204
205                 list_for_each_entry(dp, &dst->ports, list) {
206                         if (dp->ds->index != sw_index)
207                                 continue;
208
209                         return dp->ds;
210                 }
211         }
212
213         return NULL;
214 }
215 EXPORT_SYMBOL_GPL(dsa_switch_find);
216
217 static struct dsa_switch_tree *dsa_tree_find(int index)
218 {
219         struct dsa_switch_tree *dst;
220
221         list_for_each_entry(dst, &dsa_tree_list, list)
222                 if (dst->index == index)
223                         return dst;
224
225         return NULL;
226 }
227
228 static struct dsa_switch_tree *dsa_tree_alloc(int index)
229 {
230         struct dsa_switch_tree *dst;
231
232         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
233         if (!dst)
234                 return NULL;
235
236         dst->index = index;
237
238         INIT_LIST_HEAD(&dst->rtable);
239
240         INIT_LIST_HEAD(&dst->ports);
241
242         INIT_LIST_HEAD(&dst->list);
243         list_add_tail(&dst->list, &dsa_tree_list);
244
245         kref_init(&dst->refcount);
246
247         return dst;
248 }
249
250 static void dsa_tree_free(struct dsa_switch_tree *dst)
251 {
252         if (dst->tag_ops)
253                 dsa_tag_driver_put(dst->tag_ops);
254         list_del(&dst->list);
255         kfree(dst);
256 }
257
258 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
259 {
260         if (dst)
261                 kref_get(&dst->refcount);
262
263         return dst;
264 }
265
266 static struct dsa_switch_tree *dsa_tree_touch(int index)
267 {
268         struct dsa_switch_tree *dst;
269
270         dst = dsa_tree_find(index);
271         if (dst)
272                 return dsa_tree_get(dst);
273         else
274                 return dsa_tree_alloc(index);
275 }
276
277 static void dsa_tree_release(struct kref *ref)
278 {
279         struct dsa_switch_tree *dst;
280
281         dst = container_of(ref, struct dsa_switch_tree, refcount);
282
283         dsa_tree_free(dst);
284 }
285
286 static void dsa_tree_put(struct dsa_switch_tree *dst)
287 {
288         if (dst)
289                 kref_put(&dst->refcount, dsa_tree_release);
290 }
291
292 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
293                                                    struct device_node *dn)
294 {
295         struct dsa_port *dp;
296
297         list_for_each_entry(dp, &dst->ports, list)
298                 if (dp->dn == dn)
299                         return dp;
300
301         return NULL;
302 }
303
304 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
305                                        struct dsa_port *link_dp)
306 {
307         struct dsa_switch *ds = dp->ds;
308         struct dsa_switch_tree *dst;
309         struct dsa_link *dl;
310
311         dst = ds->dst;
312
313         list_for_each_entry(dl, &dst->rtable, list)
314                 if (dl->dp == dp && dl->link_dp == link_dp)
315                         return dl;
316
317         dl = kzalloc(sizeof(*dl), GFP_KERNEL);
318         if (!dl)
319                 return NULL;
320
321         dl->dp = dp;
322         dl->link_dp = link_dp;
323
324         INIT_LIST_HEAD(&dl->list);
325         list_add_tail(&dl->list, &dst->rtable);
326
327         return dl;
328 }
329
330 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
331 {
332         struct dsa_switch *ds = dp->ds;
333         struct dsa_switch_tree *dst = ds->dst;
334         struct device_node *dn = dp->dn;
335         struct of_phandle_iterator it;
336         struct dsa_port *link_dp;
337         struct dsa_link *dl;
338         int err;
339
340         of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
341                 link_dp = dsa_tree_find_port_by_node(dst, it.node);
342                 if (!link_dp) {
343                         of_node_put(it.node);
344                         return false;
345                 }
346
347                 dl = dsa_link_touch(dp, link_dp);
348                 if (!dl) {
349                         of_node_put(it.node);
350                         return false;
351                 }
352         }
353
354         return true;
355 }
356
357 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
358 {
359         bool complete = true;
360         struct dsa_port *dp;
361
362         list_for_each_entry(dp, &dst->ports, list) {
363                 if (dsa_port_is_dsa(dp)) {
364                         complete = dsa_port_setup_routing_table(dp);
365                         if (!complete)
366                                 break;
367                 }
368         }
369
370         return complete;
371 }
372
373 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
374 {
375         struct dsa_port *dp;
376
377         list_for_each_entry(dp, &dst->ports, list)
378                 if (dsa_port_is_cpu(dp))
379                         return dp;
380
381         return NULL;
382 }
383
384 /* Assign the default CPU port (the first one in the tree) to all ports of the
385  * fabric which don't already have one as part of their own switch.
386  */
387 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
388 {
389         struct dsa_port *cpu_dp, *dp;
390
391         cpu_dp = dsa_tree_find_first_cpu(dst);
392         if (!cpu_dp) {
393                 pr_err("DSA: tree %d has no CPU port\n", dst->index);
394                 return -EINVAL;
395         }
396
397         list_for_each_entry(dp, &dst->ports, list) {
398                 if (dp->cpu_dp)
399                         continue;
400
401                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
402                         dp->cpu_dp = cpu_dp;
403         }
404
405         return 0;
406 }
407
408 /* Perform initial assignment of CPU ports to user ports and DSA links in the
409  * fabric, giving preference to CPU ports local to each switch. Default to
410  * using the first CPU port in the switch tree if the port does not have a CPU
411  * port local to this switch.
412  */
413 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
414 {
415         struct dsa_port *cpu_dp, *dp;
416
417         list_for_each_entry(cpu_dp, &dst->ports, list) {
418                 if (!dsa_port_is_cpu(cpu_dp))
419                         continue;
420
421                 /* Prefer a local CPU port */
422                 dsa_switch_for_each_port(dp, cpu_dp->ds) {
423                         /* Prefer the first local CPU port found */
424                         if (dp->cpu_dp)
425                                 continue;
426
427                         if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
428                                 dp->cpu_dp = cpu_dp;
429                 }
430         }
431
432         return dsa_tree_setup_default_cpu(dst);
433 }
434
435 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
436 {
437         struct dsa_port *dp;
438
439         list_for_each_entry(dp, &dst->ports, list)
440                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
441                         dp->cpu_dp = NULL;
442 }
443
444 static int dsa_port_setup(struct dsa_port *dp)
445 {
446         struct devlink_port *dlp = &dp->devlink_port;
447         bool dsa_port_link_registered = false;
448         struct dsa_switch *ds = dp->ds;
449         bool dsa_port_enabled = false;
450         int err = 0;
451
452         if (dp->setup)
453                 return 0;
454
455         mutex_init(&dp->addr_lists_lock);
456         INIT_LIST_HEAD(&dp->fdbs);
457         INIT_LIST_HEAD(&dp->mdbs);
458
459         if (ds->ops->port_setup) {
460                 err = ds->ops->port_setup(ds, dp->index);
461                 if (err)
462                         return err;
463         }
464
465         switch (dp->type) {
466         case DSA_PORT_TYPE_UNUSED:
467                 dsa_port_disable(dp);
468                 break;
469         case DSA_PORT_TYPE_CPU:
470                 err = dsa_port_link_register_of(dp);
471                 if (err)
472                         break;
473                 dsa_port_link_registered = true;
474
475                 err = dsa_port_enable(dp, NULL);
476                 if (err)
477                         break;
478                 dsa_port_enabled = true;
479
480                 break;
481         case DSA_PORT_TYPE_DSA:
482                 err = dsa_port_link_register_of(dp);
483                 if (err)
484                         break;
485                 dsa_port_link_registered = true;
486
487                 err = dsa_port_enable(dp, NULL);
488                 if (err)
489                         break;
490                 dsa_port_enabled = true;
491
492                 break;
493         case DSA_PORT_TYPE_USER:
494                 of_get_mac_address(dp->dn, dp->mac);
495                 err = dsa_slave_create(dp);
496                 if (err)
497                         break;
498
499                 devlink_port_type_eth_set(dlp, dp->slave);
500                 break;
501         }
502
503         if (err && dsa_port_enabled)
504                 dsa_port_disable(dp);
505         if (err && dsa_port_link_registered)
506                 dsa_port_link_unregister_of(dp);
507         if (err) {
508                 if (ds->ops->port_teardown)
509                         ds->ops->port_teardown(ds, dp->index);
510                 return err;
511         }
512
513         dp->setup = true;
514
515         return 0;
516 }
517
518 static int dsa_port_devlink_setup(struct dsa_port *dp)
519 {
520         struct devlink_port *dlp = &dp->devlink_port;
521         struct dsa_switch_tree *dst = dp->ds->dst;
522         struct devlink_port_attrs attrs = {};
523         struct devlink *dl = dp->ds->devlink;
524         const unsigned char *id;
525         unsigned char len;
526         int err;
527
528         id = (const unsigned char *)&dst->index;
529         len = sizeof(dst->index);
530
531         attrs.phys.port_number = dp->index;
532         memcpy(attrs.switch_id.id, id, len);
533         attrs.switch_id.id_len = len;
534         memset(dlp, 0, sizeof(*dlp));
535
536         switch (dp->type) {
537         case DSA_PORT_TYPE_UNUSED:
538                 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
539                 break;
540         case DSA_PORT_TYPE_CPU:
541                 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
542                 break;
543         case DSA_PORT_TYPE_DSA:
544                 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
545                 break;
546         case DSA_PORT_TYPE_USER:
547                 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
548                 break;
549         }
550
551         devlink_port_attrs_set(dlp, &attrs);
552         err = devlink_port_register(dl, dlp, dp->index);
553
554         if (!err)
555                 dp->devlink_port_setup = true;
556
557         return err;
558 }
559
560 static void dsa_port_teardown(struct dsa_port *dp)
561 {
562         struct devlink_port *dlp = &dp->devlink_port;
563         struct dsa_switch *ds = dp->ds;
564         struct dsa_mac_addr *a, *tmp;
565         struct net_device *slave;
566
567         if (!dp->setup)
568                 return;
569
570         if (ds->ops->port_teardown)
571                 ds->ops->port_teardown(ds, dp->index);
572
573         devlink_port_type_clear(dlp);
574
575         switch (dp->type) {
576         case DSA_PORT_TYPE_UNUSED:
577                 break;
578         case DSA_PORT_TYPE_CPU:
579                 dsa_port_disable(dp);
580                 dsa_port_link_unregister_of(dp);
581                 break;
582         case DSA_PORT_TYPE_DSA:
583                 dsa_port_disable(dp);
584                 dsa_port_link_unregister_of(dp);
585                 break;
586         case DSA_PORT_TYPE_USER:
587                 slave = dp->slave;
588
589                 if (slave) {
590                         dp->slave = NULL;
591                         dsa_slave_destroy(slave);
592                 }
593                 break;
594         }
595
596         list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
597                 list_del(&a->list);
598                 kfree(a);
599         }
600
601         list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
602                 list_del(&a->list);
603                 kfree(a);
604         }
605
606         dp->setup = false;
607 }
608
609 static void dsa_port_devlink_teardown(struct dsa_port *dp)
610 {
611         struct devlink_port *dlp = &dp->devlink_port;
612
613         if (dp->devlink_port_setup)
614                 devlink_port_unregister(dlp);
615         dp->devlink_port_setup = false;
616 }
617
618 /* Destroy the current devlink port, and create a new one which has the UNUSED
619  * flavour. At this point, any call to ds->ops->port_setup has been already
620  * balanced out by a call to ds->ops->port_teardown, so we know that any
621  * devlink port regions the driver had are now unregistered. We then call its
622  * ds->ops->port_setup again, in order for the driver to re-create them on the
623  * new devlink port.
624  */
625 static int dsa_port_reinit_as_unused(struct dsa_port *dp)
626 {
627         struct dsa_switch *ds = dp->ds;
628         int err;
629
630         dsa_port_devlink_teardown(dp);
631         dp->type = DSA_PORT_TYPE_UNUSED;
632         err = dsa_port_devlink_setup(dp);
633         if (err)
634                 return err;
635
636         if (ds->ops->port_setup) {
637                 /* On error, leave the devlink port registered,
638                  * dsa_switch_teardown will clean it up later.
639                  */
640                 err = ds->ops->port_setup(ds, dp->index);
641                 if (err)
642                         return err;
643         }
644
645         return 0;
646 }
647
648 static int dsa_devlink_info_get(struct devlink *dl,
649                                 struct devlink_info_req *req,
650                                 struct netlink_ext_ack *extack)
651 {
652         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
653
654         if (ds->ops->devlink_info_get)
655                 return ds->ops->devlink_info_get(ds, req, extack);
656
657         return -EOPNOTSUPP;
658 }
659
660 static int dsa_devlink_sb_pool_get(struct devlink *dl,
661                                    unsigned int sb_index, u16 pool_index,
662                                    struct devlink_sb_pool_info *pool_info)
663 {
664         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
665
666         if (!ds->ops->devlink_sb_pool_get)
667                 return -EOPNOTSUPP;
668
669         return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
670                                             pool_info);
671 }
672
673 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
674                                    u16 pool_index, u32 size,
675                                    enum devlink_sb_threshold_type threshold_type,
676                                    struct netlink_ext_ack *extack)
677 {
678         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
679
680         if (!ds->ops->devlink_sb_pool_set)
681                 return -EOPNOTSUPP;
682
683         return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
684                                             threshold_type, extack);
685 }
686
687 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
688                                         unsigned int sb_index, u16 pool_index,
689                                         u32 *p_threshold)
690 {
691         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
692         int port = dsa_devlink_port_to_port(dlp);
693
694         if (!ds->ops->devlink_sb_port_pool_get)
695                 return -EOPNOTSUPP;
696
697         return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
698                                                  pool_index, p_threshold);
699 }
700
701 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
702                                         unsigned int sb_index, u16 pool_index,
703                                         u32 threshold,
704                                         struct netlink_ext_ack *extack)
705 {
706         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
707         int port = dsa_devlink_port_to_port(dlp);
708
709         if (!ds->ops->devlink_sb_port_pool_set)
710                 return -EOPNOTSUPP;
711
712         return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
713                                                  pool_index, threshold, extack);
714 }
715
716 static int
717 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
718                                 unsigned int sb_index, u16 tc_index,
719                                 enum devlink_sb_pool_type pool_type,
720                                 u16 *p_pool_index, u32 *p_threshold)
721 {
722         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
723         int port = dsa_devlink_port_to_port(dlp);
724
725         if (!ds->ops->devlink_sb_tc_pool_bind_get)
726                 return -EOPNOTSUPP;
727
728         return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
729                                                     tc_index, pool_type,
730                                                     p_pool_index, p_threshold);
731 }
732
733 static int
734 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
735                                 unsigned int sb_index, u16 tc_index,
736                                 enum devlink_sb_pool_type pool_type,
737                                 u16 pool_index, u32 threshold,
738                                 struct netlink_ext_ack *extack)
739 {
740         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
741         int port = dsa_devlink_port_to_port(dlp);
742
743         if (!ds->ops->devlink_sb_tc_pool_bind_set)
744                 return -EOPNOTSUPP;
745
746         return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
747                                                     tc_index, pool_type,
748                                                     pool_index, threshold,
749                                                     extack);
750 }
751
752 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
753                                        unsigned int sb_index)
754 {
755         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
756
757         if (!ds->ops->devlink_sb_occ_snapshot)
758                 return -EOPNOTSUPP;
759
760         return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
761 }
762
763 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
764                                         unsigned int sb_index)
765 {
766         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
767
768         if (!ds->ops->devlink_sb_occ_max_clear)
769                 return -EOPNOTSUPP;
770
771         return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
772 }
773
774 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
775                                             unsigned int sb_index,
776                                             u16 pool_index, u32 *p_cur,
777                                             u32 *p_max)
778 {
779         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
780         int port = dsa_devlink_port_to_port(dlp);
781
782         if (!ds->ops->devlink_sb_occ_port_pool_get)
783                 return -EOPNOTSUPP;
784
785         return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
786                                                      pool_index, p_cur, p_max);
787 }
788
789 static int
790 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
791                                     unsigned int sb_index, u16 tc_index,
792                                     enum devlink_sb_pool_type pool_type,
793                                     u32 *p_cur, u32 *p_max)
794 {
795         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
796         int port = dsa_devlink_port_to_port(dlp);
797
798         if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
799                 return -EOPNOTSUPP;
800
801         return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
802                                                         sb_index, tc_index,
803                                                         pool_type, p_cur,
804                                                         p_max);
805 }
806
807 static const struct devlink_ops dsa_devlink_ops = {
808         .info_get                       = dsa_devlink_info_get,
809         .sb_pool_get                    = dsa_devlink_sb_pool_get,
810         .sb_pool_set                    = dsa_devlink_sb_pool_set,
811         .sb_port_pool_get               = dsa_devlink_sb_port_pool_get,
812         .sb_port_pool_set               = dsa_devlink_sb_port_pool_set,
813         .sb_tc_pool_bind_get            = dsa_devlink_sb_tc_pool_bind_get,
814         .sb_tc_pool_bind_set            = dsa_devlink_sb_tc_pool_bind_set,
815         .sb_occ_snapshot                = dsa_devlink_sb_occ_snapshot,
816         .sb_occ_max_clear               = dsa_devlink_sb_occ_max_clear,
817         .sb_occ_port_pool_get           = dsa_devlink_sb_occ_port_pool_get,
818         .sb_occ_tc_port_bind_get        = dsa_devlink_sb_occ_tc_port_bind_get,
819 };
820
821 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
822 {
823         const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
824         struct dsa_switch_tree *dst = ds->dst;
825         struct dsa_port *cpu_dp;
826         int err;
827
828         if (tag_ops->proto == dst->default_proto)
829                 goto connect;
830
831         dsa_switch_for_each_cpu_port(cpu_dp, ds) {
832                 rtnl_lock();
833                 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
834                                                    tag_ops->proto);
835                 rtnl_unlock();
836                 if (err) {
837                         dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
838                                 tag_ops->name, ERR_PTR(err));
839                         return err;
840                 }
841         }
842
843 connect:
844         if (tag_ops->connect) {
845                 err = tag_ops->connect(ds);
846                 if (err)
847                         return err;
848         }
849
850         if (ds->ops->connect_tag_protocol) {
851                 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
852                 if (err) {
853                         dev_err(ds->dev,
854                                 "Unable to connect to tag protocol \"%s\": %pe\n",
855                                 tag_ops->name, ERR_PTR(err));
856                         goto disconnect;
857                 }
858         }
859
860         return 0;
861
862 disconnect:
863         if (tag_ops->disconnect)
864                 tag_ops->disconnect(ds);
865
866         return err;
867 }
868
869 static int dsa_switch_setup(struct dsa_switch *ds)
870 {
871         struct dsa_devlink_priv *dl_priv;
872         struct dsa_port *dp;
873         int err;
874
875         if (ds->setup)
876                 return 0;
877
878         /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
879          * driver and before ops->setup() has run, since the switch drivers and
880          * the slave MDIO bus driver rely on these values for probing PHY
881          * devices or not
882          */
883         ds->phys_mii_mask |= dsa_user_ports(ds);
884
885         /* Add the switch to devlink before calling setup, so that setup can
886          * add dpipe tables
887          */
888         ds->devlink =
889                 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
890         if (!ds->devlink)
891                 return -ENOMEM;
892         dl_priv = devlink_priv(ds->devlink);
893         dl_priv->ds = ds;
894
895         /* Setup devlink port instances now, so that the switch
896          * setup() can register regions etc, against the ports
897          */
898         dsa_switch_for_each_port(dp, ds) {
899                 err = dsa_port_devlink_setup(dp);
900                 if (err)
901                         goto unregister_devlink_ports;
902         }
903
904         err = dsa_switch_register_notifier(ds);
905         if (err)
906                 goto unregister_devlink_ports;
907
908         ds->configure_vlan_while_not_filtering = true;
909
910         err = ds->ops->setup(ds);
911         if (err < 0)
912                 goto unregister_notifier;
913
914         err = dsa_switch_setup_tag_protocol(ds);
915         if (err)
916                 goto teardown;
917
918         if (!ds->slave_mii_bus && ds->ops->phy_read) {
919                 ds->slave_mii_bus = mdiobus_alloc();
920                 if (!ds->slave_mii_bus) {
921                         err = -ENOMEM;
922                         goto teardown;
923                 }
924
925                 dsa_slave_mii_bus_init(ds);
926
927                 err = mdiobus_register(ds->slave_mii_bus);
928                 if (err < 0)
929                         goto free_slave_mii_bus;
930         }
931
932         ds->setup = true;
933         devlink_register(ds->devlink);
934         return 0;
935
936 free_slave_mii_bus:
937         if (ds->slave_mii_bus && ds->ops->phy_read)
938                 mdiobus_free(ds->slave_mii_bus);
939 teardown:
940         if (ds->ops->teardown)
941                 ds->ops->teardown(ds);
942 unregister_notifier:
943         dsa_switch_unregister_notifier(ds);
944 unregister_devlink_ports:
945         dsa_switch_for_each_port(dp, ds)
946                 dsa_port_devlink_teardown(dp);
947         devlink_free(ds->devlink);
948         ds->devlink = NULL;
949         return err;
950 }
951
952 static void dsa_switch_teardown(struct dsa_switch *ds)
953 {
954         struct dsa_port *dp;
955
956         if (!ds->setup)
957                 return;
958
959         if (ds->devlink)
960                 devlink_unregister(ds->devlink);
961
962         if (ds->slave_mii_bus && ds->ops->phy_read) {
963                 mdiobus_unregister(ds->slave_mii_bus);
964                 mdiobus_free(ds->slave_mii_bus);
965                 ds->slave_mii_bus = NULL;
966         }
967
968         if (ds->ops->teardown)
969                 ds->ops->teardown(ds);
970
971         dsa_switch_unregister_notifier(ds);
972
973         if (ds->devlink) {
974                 dsa_switch_for_each_port(dp, ds)
975                         dsa_port_devlink_teardown(dp);
976                 devlink_free(ds->devlink);
977                 ds->devlink = NULL;
978         }
979
980         ds->setup = false;
981 }
982
983 /* First tear down the non-shared, then the shared ports. This ensures that
984  * all work items scheduled by our switchdev handlers for user ports have
985  * completed before we destroy the refcounting kept on the shared ports.
986  */
987 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
988 {
989         struct dsa_port *dp;
990
991         list_for_each_entry(dp, &dst->ports, list)
992                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
993                         dsa_port_teardown(dp);
994
995         dsa_flush_workqueue();
996
997         list_for_each_entry(dp, &dst->ports, list)
998                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
999                         dsa_port_teardown(dp);
1000 }
1001
1002 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
1003 {
1004         struct dsa_port *dp;
1005
1006         list_for_each_entry(dp, &dst->ports, list)
1007                 dsa_switch_teardown(dp->ds);
1008 }
1009
1010 /* Bring shared ports up first, then non-shared ports */
1011 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
1012 {
1013         struct dsa_port *dp;
1014         int err = 0;
1015
1016         list_for_each_entry(dp, &dst->ports, list) {
1017                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1018                         err = dsa_port_setup(dp);
1019                         if (err)
1020                                 goto teardown;
1021                 }
1022         }
1023
1024         list_for_each_entry(dp, &dst->ports, list) {
1025                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1026                         err = dsa_port_setup(dp);
1027                         if (err) {
1028                                 err = dsa_port_reinit_as_unused(dp);
1029                                 if (err)
1030                                         goto teardown;
1031                         }
1032                 }
1033         }
1034
1035         return 0;
1036
1037 teardown:
1038         dsa_tree_teardown_ports(dst);
1039
1040         return err;
1041 }
1042
1043 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1044 {
1045         struct dsa_port *dp;
1046         int err = 0;
1047
1048         list_for_each_entry(dp, &dst->ports, list) {
1049                 err = dsa_switch_setup(dp->ds);
1050                 if (err) {
1051                         dsa_tree_teardown_switches(dst);
1052                         break;
1053                 }
1054         }
1055
1056         return err;
1057 }
1058
1059 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1060 {
1061         struct dsa_port *dp;
1062         int err;
1063
1064         rtnl_lock();
1065
1066         list_for_each_entry(dp, &dst->ports, list) {
1067                 if (dsa_port_is_cpu(dp)) {
1068                         struct net_device *master = dp->master;
1069                         bool admin_up = (master->flags & IFF_UP) &&
1070                                         !qdisc_tx_is_noop(master);
1071
1072                         err = dsa_master_setup(master, dp);
1073                         if (err)
1074                                 return err;
1075
1076                         /* Replay master state event */
1077                         dsa_tree_master_admin_state_change(dst, master, admin_up);
1078                         dsa_tree_master_oper_state_change(dst, master,
1079                                                           netif_oper_up(master));
1080                 }
1081         }
1082
1083         rtnl_unlock();
1084
1085         return 0;
1086 }
1087
1088 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1089 {
1090         struct dsa_port *dp;
1091
1092         rtnl_lock();
1093
1094         list_for_each_entry(dp, &dst->ports, list) {
1095                 if (dsa_port_is_cpu(dp)) {
1096                         struct net_device *master = dp->master;
1097
1098                         /* Synthesizing an "admin down" state is sufficient for
1099                          * the switches to get a notification if the master is
1100                          * currently up and running.
1101                          */
1102                         dsa_tree_master_admin_state_change(dst, master, false);
1103
1104                         dsa_master_teardown(master);
1105                 }
1106         }
1107
1108         rtnl_unlock();
1109 }
1110
1111 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1112 {
1113         unsigned int len = 0;
1114         struct dsa_port *dp;
1115
1116         list_for_each_entry(dp, &dst->ports, list) {
1117                 if (dp->ds->num_lag_ids > len)
1118                         len = dp->ds->num_lag_ids;
1119         }
1120
1121         if (!len)
1122                 return 0;
1123
1124         dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1125         if (!dst->lags)
1126                 return -ENOMEM;
1127
1128         dst->lags_len = len;
1129         return 0;
1130 }
1131
1132 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1133 {
1134         kfree(dst->lags);
1135 }
1136
1137 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1138 {
1139         bool complete;
1140         int err;
1141
1142         if (dst->setup) {
1143                 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1144                        dst->index);
1145                 return -EEXIST;
1146         }
1147
1148         complete = dsa_tree_setup_routing_table(dst);
1149         if (!complete)
1150                 return 0;
1151
1152         err = dsa_tree_setup_cpu_ports(dst);
1153         if (err)
1154                 return err;
1155
1156         err = dsa_tree_setup_switches(dst);
1157         if (err)
1158                 goto teardown_cpu_ports;
1159
1160         err = dsa_tree_setup_master(dst);
1161         if (err)
1162                 goto teardown_switches;
1163
1164         err = dsa_tree_setup_ports(dst);
1165         if (err)
1166                 goto teardown_master;
1167
1168         err = dsa_tree_setup_lags(dst);
1169         if (err)
1170                 goto teardown_ports;
1171
1172         dst->setup = true;
1173
1174         pr_info("DSA: tree %d setup\n", dst->index);
1175
1176         return 0;
1177
1178 teardown_ports:
1179         dsa_tree_teardown_ports(dst);
1180 teardown_master:
1181         dsa_tree_teardown_master(dst);
1182 teardown_switches:
1183         dsa_tree_teardown_switches(dst);
1184 teardown_cpu_ports:
1185         dsa_tree_teardown_cpu_ports(dst);
1186
1187         return err;
1188 }
1189
1190 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1191 {
1192         struct dsa_link *dl, *next;
1193
1194         if (!dst->setup)
1195                 return;
1196
1197         dsa_tree_teardown_lags(dst);
1198
1199         dsa_tree_teardown_ports(dst);
1200
1201         dsa_tree_teardown_master(dst);
1202
1203         dsa_tree_teardown_switches(dst);
1204
1205         dsa_tree_teardown_cpu_ports(dst);
1206
1207         list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1208                 list_del(&dl->list);
1209                 kfree(dl);
1210         }
1211
1212         pr_info("DSA: tree %d torn down\n", dst->index);
1213
1214         dst->setup = false;
1215 }
1216
1217 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1218                                    const struct dsa_device_ops *tag_ops)
1219 {
1220         const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1221         struct dsa_notifier_tag_proto_info info;
1222         int err;
1223
1224         dst->tag_ops = tag_ops;
1225
1226         /* Notify the switches from this tree about the connection
1227          * to the new tagger
1228          */
1229         info.tag_ops = tag_ops;
1230         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1231         if (err && err != -EOPNOTSUPP)
1232                 goto out_disconnect;
1233
1234         /* Notify the old tagger about the disconnection from this tree */
1235         info.tag_ops = old_tag_ops;
1236         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1237
1238         return 0;
1239
1240 out_disconnect:
1241         info.tag_ops = tag_ops;
1242         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1243         dst->tag_ops = old_tag_ops;
1244
1245         return err;
1246 }
1247
1248 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1249  * is that all DSA switches within a tree share the same tagger, otherwise
1250  * they would have formed disjoint trees (different "dsa,member" values).
1251  */
1252 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1253                               struct net_device *master,
1254                               const struct dsa_device_ops *tag_ops,
1255                               const struct dsa_device_ops *old_tag_ops)
1256 {
1257         struct dsa_notifier_tag_proto_info info;
1258         struct dsa_port *dp;
1259         int err = -EBUSY;
1260
1261         if (!rtnl_trylock())
1262                 return restart_syscall();
1263
1264         /* At the moment we don't allow changing the tag protocol under
1265          * traffic. The rtnl_mutex also happens to serialize concurrent
1266          * attempts to change the tagging protocol. If we ever lift the IFF_UP
1267          * restriction, there needs to be another mutex which serializes this.
1268          */
1269         if (master->flags & IFF_UP)
1270                 goto out_unlock;
1271
1272         list_for_each_entry(dp, &dst->ports, list) {
1273                 if (!dsa_port_is_user(dp))
1274                         continue;
1275
1276                 if (dp->slave->flags & IFF_UP)
1277                         goto out_unlock;
1278         }
1279
1280         /* Notify the tag protocol change */
1281         info.tag_ops = tag_ops;
1282         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1283         if (err)
1284                 return err;
1285
1286         err = dsa_tree_bind_tag_proto(dst, tag_ops);
1287         if (err)
1288                 goto out_unwind_tagger;
1289
1290         rtnl_unlock();
1291
1292         return 0;
1293
1294 out_unwind_tagger:
1295         info.tag_ops = old_tag_ops;
1296         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1297 out_unlock:
1298         rtnl_unlock();
1299         return err;
1300 }
1301
1302 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1303                                          struct net_device *master)
1304 {
1305         struct dsa_notifier_master_state_info info;
1306         struct dsa_port *cpu_dp = master->dsa_ptr;
1307
1308         info.master = master;
1309         info.operational = dsa_port_master_is_operational(cpu_dp);
1310
1311         dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1312 }
1313
1314 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1315                                         struct net_device *master,
1316                                         bool up)
1317 {
1318         struct dsa_port *cpu_dp = master->dsa_ptr;
1319         bool notify = false;
1320
1321         if ((dsa_port_master_is_operational(cpu_dp)) !=
1322             (up && cpu_dp->master_oper_up))
1323                 notify = true;
1324
1325         cpu_dp->master_admin_up = up;
1326
1327         if (notify)
1328                 dsa_tree_master_state_change(dst, master);
1329 }
1330
1331 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1332                                        struct net_device *master,
1333                                        bool up)
1334 {
1335         struct dsa_port *cpu_dp = master->dsa_ptr;
1336         bool notify = false;
1337
1338         if ((dsa_port_master_is_operational(cpu_dp)) !=
1339             (cpu_dp->master_admin_up && up))
1340                 notify = true;
1341
1342         cpu_dp->master_oper_up = up;
1343
1344         if (notify)
1345                 dsa_tree_master_state_change(dst, master);
1346 }
1347
1348 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1349 {
1350         struct dsa_switch_tree *dst = ds->dst;
1351         struct dsa_port *dp;
1352
1353         dsa_switch_for_each_port(dp, ds)
1354                 if (dp->index == index)
1355                         return dp;
1356
1357         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1358         if (!dp)
1359                 return NULL;
1360
1361         dp->ds = ds;
1362         dp->index = index;
1363
1364         INIT_LIST_HEAD(&dp->list);
1365         list_add_tail(&dp->list, &dst->ports);
1366
1367         return dp;
1368 }
1369
1370 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1371 {
1372         if (!name)
1373                 name = "eth%d";
1374
1375         dp->type = DSA_PORT_TYPE_USER;
1376         dp->name = name;
1377
1378         return 0;
1379 }
1380
1381 static int dsa_port_parse_dsa(struct dsa_port *dp)
1382 {
1383         dp->type = DSA_PORT_TYPE_DSA;
1384
1385         return 0;
1386 }
1387
1388 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1389                                                   struct net_device *master)
1390 {
1391         enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1392         struct dsa_switch *mds, *ds = dp->ds;
1393         unsigned int mdp_upstream;
1394         struct dsa_port *mdp;
1395
1396         /* It is possible to stack DSA switches onto one another when that
1397          * happens the switch driver may want to know if its tagging protocol
1398          * is going to work in such a configuration.
1399          */
1400         if (dsa_slave_dev_check(master)) {
1401                 mdp = dsa_slave_to_port(master);
1402                 mds = mdp->ds;
1403                 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1404                 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1405                                                           DSA_TAG_PROTO_NONE);
1406         }
1407
1408         /* If the master device is not itself a DSA slave in a disjoint DSA
1409          * tree, then return immediately.
1410          */
1411         return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1412 }
1413
1414 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1415                               const char *user_protocol)
1416 {
1417         struct dsa_switch *ds = dp->ds;
1418         struct dsa_switch_tree *dst = ds->dst;
1419         const struct dsa_device_ops *tag_ops;
1420         enum dsa_tag_protocol default_proto;
1421
1422         /* Find out which protocol the switch would prefer. */
1423         default_proto = dsa_get_tag_protocol(dp, master);
1424         if (dst->default_proto) {
1425                 if (dst->default_proto != default_proto) {
1426                         dev_err(ds->dev,
1427                                 "A DSA switch tree can have only one tagging protocol\n");
1428                         return -EINVAL;
1429                 }
1430         } else {
1431                 dst->default_proto = default_proto;
1432         }
1433
1434         /* See if the user wants to override that preference. */
1435         if (user_protocol) {
1436                 if (!ds->ops->change_tag_protocol) {
1437                         dev_err(ds->dev, "Tag protocol cannot be modified\n");
1438                         return -EINVAL;
1439                 }
1440
1441                 tag_ops = dsa_find_tagger_by_name(user_protocol);
1442         } else {
1443                 tag_ops = dsa_tag_driver_get(default_proto);
1444         }
1445
1446         if (IS_ERR(tag_ops)) {
1447                 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1448                         return -EPROBE_DEFER;
1449
1450                 dev_warn(ds->dev, "No tagger for this switch\n");
1451                 return PTR_ERR(tag_ops);
1452         }
1453
1454         if (dst->tag_ops) {
1455                 if (dst->tag_ops != tag_ops) {
1456                         dev_err(ds->dev,
1457                                 "A DSA switch tree can have only one tagging protocol\n");
1458
1459                         dsa_tag_driver_put(tag_ops);
1460                         return -EINVAL;
1461                 }
1462
1463                 /* In the case of multiple CPU ports per switch, the tagging
1464                  * protocol is still reference-counted only per switch tree.
1465                  */
1466                 dsa_tag_driver_put(tag_ops);
1467         } else {
1468                 dst->tag_ops = tag_ops;
1469         }
1470
1471         dp->master = master;
1472         dp->type = DSA_PORT_TYPE_CPU;
1473         dsa_port_set_tag_protocol(dp, dst->tag_ops);
1474         dp->dst = dst;
1475
1476         /* At this point, the tree may be configured to use a different
1477          * tagger than the one chosen by the switch driver during
1478          * .setup, in the case when a user selects a custom protocol
1479          * through the DT.
1480          *
1481          * This is resolved by syncing the driver with the tree in
1482          * dsa_switch_setup_tag_protocol once .setup has run and the
1483          * driver is ready to accept calls to .change_tag_protocol. If
1484          * the driver does not support the custom protocol at that
1485          * point, the tree is wholly rejected, thereby ensuring that the
1486          * tree and driver are always in agreement on the protocol to
1487          * use.
1488          */
1489         return 0;
1490 }
1491
1492 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1493 {
1494         struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1495         const char *name = of_get_property(dn, "label", NULL);
1496         bool link = of_property_read_bool(dn, "link");
1497
1498         dp->dn = dn;
1499
1500         if (ethernet) {
1501                 struct net_device *master;
1502                 const char *user_protocol;
1503
1504                 master = of_find_net_device_by_node(ethernet);
1505                 if (!master)
1506                         return -EPROBE_DEFER;
1507
1508                 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1509                 return dsa_port_parse_cpu(dp, master, user_protocol);
1510         }
1511
1512         if (link)
1513                 return dsa_port_parse_dsa(dp);
1514
1515         return dsa_port_parse_user(dp, name);
1516 }
1517
1518 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1519                                      struct device_node *dn)
1520 {
1521         struct device_node *ports, *port;
1522         struct dsa_port *dp;
1523         int err = 0;
1524         u32 reg;
1525
1526         ports = of_get_child_by_name(dn, "ports");
1527         if (!ports) {
1528                 /* The second possibility is "ethernet-ports" */
1529                 ports = of_get_child_by_name(dn, "ethernet-ports");
1530                 if (!ports) {
1531                         dev_err(ds->dev, "no ports child node found\n");
1532                         return -EINVAL;
1533                 }
1534         }
1535
1536         for_each_available_child_of_node(ports, port) {
1537                 err = of_property_read_u32(port, "reg", &reg);
1538                 if (err) {
1539                         of_node_put(port);
1540                         goto out_put_node;
1541                 }
1542
1543                 if (reg >= ds->num_ports) {
1544                         dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1545                                 port, reg, ds->num_ports);
1546                         of_node_put(port);
1547                         err = -EINVAL;
1548                         goto out_put_node;
1549                 }
1550
1551                 dp = dsa_to_port(ds, reg);
1552
1553                 err = dsa_port_parse_of(dp, port);
1554                 if (err) {
1555                         of_node_put(port);
1556                         goto out_put_node;
1557                 }
1558         }
1559
1560 out_put_node:
1561         of_node_put(ports);
1562         return err;
1563 }
1564
1565 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1566                                       struct device_node *dn)
1567 {
1568         u32 m[2] = { 0, 0 };
1569         int sz;
1570
1571         /* Don't error out if this optional property isn't found */
1572         sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1573         if (sz < 0 && sz != -EINVAL)
1574                 return sz;
1575
1576         ds->index = m[1];
1577
1578         ds->dst = dsa_tree_touch(m[0]);
1579         if (!ds->dst)
1580                 return -ENOMEM;
1581
1582         if (dsa_switch_find(ds->dst->index, ds->index)) {
1583                 dev_err(ds->dev,
1584                         "A DSA switch with index %d already exists in tree %d\n",
1585                         ds->index, ds->dst->index);
1586                 return -EEXIST;
1587         }
1588
1589         if (ds->dst->last_switch < ds->index)
1590                 ds->dst->last_switch = ds->index;
1591
1592         return 0;
1593 }
1594
1595 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1596 {
1597         struct dsa_port *dp;
1598         int port;
1599
1600         for (port = 0; port < ds->num_ports; port++) {
1601                 dp = dsa_port_touch(ds, port);
1602                 if (!dp)
1603                         return -ENOMEM;
1604         }
1605
1606         return 0;
1607 }
1608
1609 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1610 {
1611         int err;
1612
1613         err = dsa_switch_parse_member_of(ds, dn);
1614         if (err)
1615                 return err;
1616
1617         err = dsa_switch_touch_ports(ds);
1618         if (err)
1619                 return err;
1620
1621         return dsa_switch_parse_ports_of(ds, dn);
1622 }
1623
1624 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1625                           struct device *dev)
1626 {
1627         if (!strcmp(name, "cpu")) {
1628                 struct net_device *master;
1629
1630                 master = dsa_dev_to_net_device(dev);
1631                 if (!master)
1632                         return -EPROBE_DEFER;
1633
1634                 dev_put(master);
1635
1636                 return dsa_port_parse_cpu(dp, master, NULL);
1637         }
1638
1639         if (!strcmp(name, "dsa"))
1640                 return dsa_port_parse_dsa(dp);
1641
1642         return dsa_port_parse_user(dp, name);
1643 }
1644
1645 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1646                                   struct dsa_chip_data *cd)
1647 {
1648         bool valid_name_found = false;
1649         struct dsa_port *dp;
1650         struct device *dev;
1651         const char *name;
1652         unsigned int i;
1653         int err;
1654
1655         for (i = 0; i < DSA_MAX_PORTS; i++) {
1656                 name = cd->port_names[i];
1657                 dev = cd->netdev[i];
1658                 dp = dsa_to_port(ds, i);
1659
1660                 if (!name)
1661                         continue;
1662
1663                 err = dsa_port_parse(dp, name, dev);
1664                 if (err)
1665                         return err;
1666
1667                 valid_name_found = true;
1668         }
1669
1670         if (!valid_name_found && i == DSA_MAX_PORTS)
1671                 return -EINVAL;
1672
1673         return 0;
1674 }
1675
1676 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1677 {
1678         int err;
1679
1680         ds->cd = cd;
1681
1682         /* We don't support interconnected switches nor multiple trees via
1683          * platform data, so this is the unique switch of the tree.
1684          */
1685         ds->index = 0;
1686         ds->dst = dsa_tree_touch(0);
1687         if (!ds->dst)
1688                 return -ENOMEM;
1689
1690         err = dsa_switch_touch_ports(ds);
1691         if (err)
1692                 return err;
1693
1694         return dsa_switch_parse_ports(ds, cd);
1695 }
1696
1697 static void dsa_switch_release_ports(struct dsa_switch *ds)
1698 {
1699         struct dsa_port *dp, *next;
1700
1701         dsa_switch_for_each_port_safe(dp, next, ds) {
1702                 list_del(&dp->list);
1703                 kfree(dp);
1704         }
1705 }
1706
1707 static int dsa_switch_probe(struct dsa_switch *ds)
1708 {
1709         struct dsa_switch_tree *dst;
1710         struct dsa_chip_data *pdata;
1711         struct device_node *np;
1712         int err;
1713
1714         if (!ds->dev)
1715                 return -ENODEV;
1716
1717         pdata = ds->dev->platform_data;
1718         np = ds->dev->of_node;
1719
1720         if (!ds->num_ports)
1721                 return -EINVAL;
1722
1723         if (np) {
1724                 err = dsa_switch_parse_of(ds, np);
1725                 if (err)
1726                         dsa_switch_release_ports(ds);
1727         } else if (pdata) {
1728                 err = dsa_switch_parse(ds, pdata);
1729                 if (err)
1730                         dsa_switch_release_ports(ds);
1731         } else {
1732                 err = -ENODEV;
1733         }
1734
1735         if (err)
1736                 return err;
1737
1738         dst = ds->dst;
1739         dsa_tree_get(dst);
1740         err = dsa_tree_setup(dst);
1741         if (err) {
1742                 dsa_switch_release_ports(ds);
1743                 dsa_tree_put(dst);
1744         }
1745
1746         return err;
1747 }
1748
1749 int dsa_register_switch(struct dsa_switch *ds)
1750 {
1751         int err;
1752
1753         mutex_lock(&dsa2_mutex);
1754         err = dsa_switch_probe(ds);
1755         dsa_tree_put(ds->dst);
1756         mutex_unlock(&dsa2_mutex);
1757
1758         return err;
1759 }
1760 EXPORT_SYMBOL_GPL(dsa_register_switch);
1761
1762 static void dsa_switch_remove(struct dsa_switch *ds)
1763 {
1764         struct dsa_switch_tree *dst = ds->dst;
1765
1766         dsa_tree_teardown(dst);
1767         dsa_switch_release_ports(ds);
1768         dsa_tree_put(dst);
1769 }
1770
1771 void dsa_unregister_switch(struct dsa_switch *ds)
1772 {
1773         mutex_lock(&dsa2_mutex);
1774         dsa_switch_remove(ds);
1775         mutex_unlock(&dsa2_mutex);
1776 }
1777 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1778
1779 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1780  * blocking that operation from completion, due to the dev_hold taken inside
1781  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1782  * the DSA master, so that the system can reboot successfully.
1783  */
1784 void dsa_switch_shutdown(struct dsa_switch *ds)
1785 {
1786         struct net_device *master, *slave_dev;
1787         LIST_HEAD(unregister_list);
1788         struct dsa_port *dp;
1789
1790         mutex_lock(&dsa2_mutex);
1791         rtnl_lock();
1792
1793         dsa_switch_for_each_user_port(dp, ds) {
1794                 master = dp->cpu_dp->master;
1795                 slave_dev = dp->slave;
1796
1797                 netdev_upper_dev_unlink(master, slave_dev);
1798                 /* Just unlinking ourselves as uppers of the master is not
1799                  * sufficient. When the master net device unregisters, that will
1800                  * also call dev_close, which we will catch as NETDEV_GOING_DOWN
1801                  * and trigger a dev_close on our own devices (dsa_slave_close).
1802                  * In turn, that will call dev_mc_unsync on the master's net
1803                  * device. If the master is also a DSA switch port, this will
1804                  * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
1805                  * its own master. Lockdep will complain about the fact that
1806                  * all cascaded masters have the same dsa_master_addr_list_lock_key,
1807                  * which it normally would not do if the cascaded masters would
1808                  * be in a proper upper/lower relationship, which we've just
1809                  * destroyed.
1810                  * To suppress the lockdep warnings, let's actually unregister
1811                  * the DSA slave interfaces too, to avoid the nonsensical
1812                  * multicast address list synchronization on shutdown.
1813                  */
1814                 unregister_netdevice_queue(slave_dev, &unregister_list);
1815         }
1816         unregister_netdevice_many(&unregister_list);
1817
1818         rtnl_unlock();
1819         mutex_unlock(&dsa2_mutex);
1820 }
1821 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);