OSDN Git Service

net: bridge: vlan: add rtnetlink group and notify support
[tomoyo/tomoyo-test1.git] / net / bridge / br_vlan.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/netdevice.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/slab.h>
6 #include <net/switchdev.h>
7
8 #include "br_private.h"
9 #include "br_private_tunnel.h"
10
11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14                               const void *ptr)
15 {
16         const struct net_bridge_vlan *vle = ptr;
17         u16 vid = *(u16 *)arg->key;
18
19         return vle->vid != vid;
20 }
21
22 static const struct rhashtable_params br_vlan_rht_params = {
23         .head_offset = offsetof(struct net_bridge_vlan, vnode),
24         .key_offset = offsetof(struct net_bridge_vlan, vid),
25         .key_len = sizeof(u16),
26         .nelem_hint = 3,
27         .max_size = VLAN_N_VID,
28         .obj_cmpfn = br_vlan_cmp,
29         .automatic_shrinking = true,
30 };
31
32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
33 {
34         return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
35 }
36
37 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
38 {
39         if (vg->pvid == vid)
40                 return false;
41
42         smp_wmb();
43         vg->pvid = vid;
44
45         return true;
46 }
47
48 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
49 {
50         if (vg->pvid != vid)
51                 return false;
52
53         smp_wmb();
54         vg->pvid = 0;
55
56         return true;
57 }
58
59 /* return true if anything changed, false otherwise */
60 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
61 {
62         struct net_bridge_vlan_group *vg;
63         u16 old_flags = v->flags;
64         bool ret;
65
66         if (br_vlan_is_master(v))
67                 vg = br_vlan_group(v->br);
68         else
69                 vg = nbp_vlan_group(v->port);
70
71         if (flags & BRIDGE_VLAN_INFO_PVID)
72                 ret = __vlan_add_pvid(vg, v->vid);
73         else
74                 ret = __vlan_delete_pvid(vg, v->vid);
75
76         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
77                 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
78         else
79                 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
80
81         return ret || !!(old_flags ^ v->flags);
82 }
83
84 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
85                           struct net_bridge_vlan *v, u16 flags,
86                           struct netlink_ext_ack *extack)
87 {
88         int err;
89
90         /* Try switchdev op first. In case it is not supported, fallback to
91          * 8021q add.
92          */
93         err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
94         if (err == -EOPNOTSUPP)
95                 return vlan_vid_add(dev, br->vlan_proto, v->vid);
96         v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
97         return err;
98 }
99
100 static void __vlan_add_list(struct net_bridge_vlan *v)
101 {
102         struct net_bridge_vlan_group *vg;
103         struct list_head *headp, *hpos;
104         struct net_bridge_vlan *vent;
105
106         if (br_vlan_is_master(v))
107                 vg = br_vlan_group(v->br);
108         else
109                 vg = nbp_vlan_group(v->port);
110
111         headp = &vg->vlan_list;
112         list_for_each_prev(hpos, headp) {
113                 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
114                 if (v->vid < vent->vid)
115                         continue;
116                 else
117                         break;
118         }
119         list_add_rcu(&v->vlist, hpos);
120 }
121
122 static void __vlan_del_list(struct net_bridge_vlan *v)
123 {
124         list_del_rcu(&v->vlist);
125 }
126
127 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
128                           const struct net_bridge_vlan *v)
129 {
130         int err;
131
132         /* Try switchdev op first. In case it is not supported, fallback to
133          * 8021q del.
134          */
135         err = br_switchdev_port_vlan_del(dev, v->vid);
136         if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
137                 vlan_vid_del(dev, br->vlan_proto, v->vid);
138         return err == -EOPNOTSUPP ? 0 : err;
139 }
140
141 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
142  * a reference is taken to the master vlan before returning.
143  */
144 static struct net_bridge_vlan *
145 br_vlan_get_master(struct net_bridge *br, u16 vid,
146                    struct netlink_ext_ack *extack)
147 {
148         struct net_bridge_vlan_group *vg;
149         struct net_bridge_vlan *masterv;
150
151         vg = br_vlan_group(br);
152         masterv = br_vlan_find(vg, vid);
153         if (!masterv) {
154                 bool changed;
155
156                 /* missing global ctx, create it now */
157                 if (br_vlan_add(br, vid, 0, &changed, extack))
158                         return NULL;
159                 masterv = br_vlan_find(vg, vid);
160                 if (WARN_ON(!masterv))
161                         return NULL;
162                 refcount_set(&masterv->refcnt, 1);
163                 return masterv;
164         }
165         refcount_inc(&masterv->refcnt);
166
167         return masterv;
168 }
169
170 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
171 {
172         struct net_bridge_vlan *v;
173
174         v = container_of(rcu, struct net_bridge_vlan, rcu);
175         WARN_ON(!br_vlan_is_master(v));
176         free_percpu(v->stats);
177         v->stats = NULL;
178         kfree(v);
179 }
180
181 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
182 {
183         struct net_bridge_vlan_group *vg;
184
185         if (!br_vlan_is_master(masterv))
186                 return;
187
188         vg = br_vlan_group(masterv->br);
189         if (refcount_dec_and_test(&masterv->refcnt)) {
190                 rhashtable_remove_fast(&vg->vlan_hash,
191                                        &masterv->vnode, br_vlan_rht_params);
192                 __vlan_del_list(masterv);
193                 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
194         }
195 }
196
197 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
198 {
199         struct net_bridge_vlan *v;
200
201         v = container_of(rcu, struct net_bridge_vlan, rcu);
202         WARN_ON(br_vlan_is_master(v));
203         /* if we had per-port stats configured then free them here */
204         if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
205                 free_percpu(v->stats);
206         v->stats = NULL;
207         kfree(v);
208 }
209
210 /* This is the shared VLAN add function which works for both ports and bridge
211  * devices. There are four possible calls to this function in terms of the
212  * vlan entry type:
213  * 1. vlan is being added on a port (no master flags, global entry exists)
214  * 2. vlan is being added on a bridge (both master and brentry flags)
215  * 3. vlan is being added on a port, but a global entry didn't exist which
216  *    is being created right now (master flag set, brentry flag unset), the
217  *    global entry is used for global per-vlan features, but not for filtering
218  * 4. same as 3 but with both master and brentry flags set so the entry
219  *    will be used for filtering in both the port and the bridge
220  */
221 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
222                       struct netlink_ext_ack *extack)
223 {
224         struct net_bridge_vlan *masterv = NULL;
225         struct net_bridge_port *p = NULL;
226         struct net_bridge_vlan_group *vg;
227         struct net_device *dev;
228         struct net_bridge *br;
229         int err;
230
231         if (br_vlan_is_master(v)) {
232                 br = v->br;
233                 dev = br->dev;
234                 vg = br_vlan_group(br);
235         } else {
236                 p = v->port;
237                 br = p->br;
238                 dev = p->dev;
239                 vg = nbp_vlan_group(p);
240         }
241
242         if (p) {
243                 /* Add VLAN to the device filter if it is supported.
244                  * This ensures tagged traffic enters the bridge when
245                  * promiscuous mode is disabled by br_manage_promisc().
246                  */
247                 err = __vlan_vid_add(dev, br, v, flags, extack);
248                 if (err)
249                         goto out;
250
251                 /* need to work on the master vlan too */
252                 if (flags & BRIDGE_VLAN_INFO_MASTER) {
253                         bool changed;
254
255                         err = br_vlan_add(br, v->vid,
256                                           flags | BRIDGE_VLAN_INFO_BRENTRY,
257                                           &changed, extack);
258                         if (err)
259                                 goto out_filt;
260                 }
261
262                 masterv = br_vlan_get_master(br, v->vid, extack);
263                 if (!masterv)
264                         goto out_filt;
265                 v->brvlan = masterv;
266                 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
267                         v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
268                         if (!v->stats) {
269                                 err = -ENOMEM;
270                                 goto out_filt;
271                         }
272                         v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
273                 } else {
274                         v->stats = masterv->stats;
275                 }
276         } else {
277                 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
278                 if (err && err != -EOPNOTSUPP)
279                         goto out;
280         }
281
282         /* Add the dev mac and count the vlan only if it's usable */
283         if (br_vlan_should_use(v)) {
284                 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
285                 if (err) {
286                         br_err(br, "failed insert local address into bridge forwarding table\n");
287                         goto out_filt;
288                 }
289                 vg->num_vlans++;
290         }
291
292         err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
293                                             br_vlan_rht_params);
294         if (err)
295                 goto out_fdb_insert;
296
297         __vlan_add_list(v);
298         __vlan_add_flags(v, flags);
299
300         if (p)
301                 nbp_vlan_set_vlan_dev_state(p, v->vid);
302 out:
303         return err;
304
305 out_fdb_insert:
306         if (br_vlan_should_use(v)) {
307                 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
308                 vg->num_vlans--;
309         }
310
311 out_filt:
312         if (p) {
313                 __vlan_vid_del(dev, br, v);
314                 if (masterv) {
315                         if (v->stats && masterv->stats != v->stats)
316                                 free_percpu(v->stats);
317                         v->stats = NULL;
318
319                         br_vlan_put_master(masterv);
320                         v->brvlan = NULL;
321                 }
322         } else {
323                 br_switchdev_port_vlan_del(dev, v->vid);
324         }
325
326         goto out;
327 }
328
329 static int __vlan_del(struct net_bridge_vlan *v)
330 {
331         struct net_bridge_vlan *masterv = v;
332         struct net_bridge_vlan_group *vg;
333         struct net_bridge_port *p = NULL;
334         int err = 0;
335
336         if (br_vlan_is_master(v)) {
337                 vg = br_vlan_group(v->br);
338         } else {
339                 p = v->port;
340                 vg = nbp_vlan_group(v->port);
341                 masterv = v->brvlan;
342         }
343
344         __vlan_delete_pvid(vg, v->vid);
345         if (p) {
346                 err = __vlan_vid_del(p->dev, p->br, v);
347                 if (err)
348                         goto out;
349         } else {
350                 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
351                 if (err && err != -EOPNOTSUPP)
352                         goto out;
353                 err = 0;
354         }
355
356         if (br_vlan_should_use(v)) {
357                 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
358                 vg->num_vlans--;
359         }
360
361         if (masterv != v) {
362                 vlan_tunnel_info_del(vg, v);
363                 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
364                                        br_vlan_rht_params);
365                 __vlan_del_list(v);
366                 nbp_vlan_set_vlan_dev_state(p, v->vid);
367                 call_rcu(&v->rcu, nbp_vlan_rcu_free);
368         }
369
370         br_vlan_put_master(masterv);
371 out:
372         return err;
373 }
374
375 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
376 {
377         WARN_ON(!list_empty(&vg->vlan_list));
378         rhashtable_destroy(&vg->vlan_hash);
379         vlan_tunnel_deinit(vg);
380         kfree(vg);
381 }
382
383 static void __vlan_flush(struct net_bridge_vlan_group *vg)
384 {
385         struct net_bridge_vlan *vlan, *tmp;
386
387         __vlan_delete_pvid(vg, vg->pvid);
388         list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
389                 __vlan_del(vlan);
390 }
391
392 struct sk_buff *br_handle_vlan(struct net_bridge *br,
393                                const struct net_bridge_port *p,
394                                struct net_bridge_vlan_group *vg,
395                                struct sk_buff *skb)
396 {
397         struct br_vlan_stats *stats;
398         struct net_bridge_vlan *v;
399         u16 vid;
400
401         /* If this packet was not filtered at input, let it pass */
402         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
403                 goto out;
404
405         /* At this point, we know that the frame was filtered and contains
406          * a valid vlan id.  If the vlan id has untagged flag set,
407          * send untagged; otherwise, send tagged.
408          */
409         br_vlan_get_tag(skb, &vid);
410         v = br_vlan_find(vg, vid);
411         /* Vlan entry must be configured at this point.  The
412          * only exception is the bridge is set in promisc mode and the
413          * packet is destined for the bridge device.  In this case
414          * pass the packet as is.
415          */
416         if (!v || !br_vlan_should_use(v)) {
417                 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
418                         goto out;
419                 } else {
420                         kfree_skb(skb);
421                         return NULL;
422                 }
423         }
424         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
425                 stats = this_cpu_ptr(v->stats);
426                 u64_stats_update_begin(&stats->syncp);
427                 stats->tx_bytes += skb->len;
428                 stats->tx_packets++;
429                 u64_stats_update_end(&stats->syncp);
430         }
431
432         if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
433                 __vlan_hwaccel_clear_tag(skb);
434
435         if (p && (p->flags & BR_VLAN_TUNNEL) &&
436             br_handle_egress_vlan_tunnel(skb, v)) {
437                 kfree_skb(skb);
438                 return NULL;
439         }
440 out:
441         return skb;
442 }
443
444 /* Called under RCU */
445 static bool __allowed_ingress(const struct net_bridge *br,
446                               struct net_bridge_vlan_group *vg,
447                               struct sk_buff *skb, u16 *vid)
448 {
449         struct br_vlan_stats *stats;
450         struct net_bridge_vlan *v;
451         bool tagged;
452
453         BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
454         /* If vlan tx offload is disabled on bridge device and frame was
455          * sent from vlan device on the bridge device, it does not have
456          * HW accelerated vlan tag.
457          */
458         if (unlikely(!skb_vlan_tag_present(skb) &&
459                      skb->protocol == br->vlan_proto)) {
460                 skb = skb_vlan_untag(skb);
461                 if (unlikely(!skb))
462                         return false;
463         }
464
465         if (!br_vlan_get_tag(skb, vid)) {
466                 /* Tagged frame */
467                 if (skb->vlan_proto != br->vlan_proto) {
468                         /* Protocol-mismatch, empty out vlan_tci for new tag */
469                         skb_push(skb, ETH_HLEN);
470                         skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
471                                                         skb_vlan_tag_get(skb));
472                         if (unlikely(!skb))
473                                 return false;
474
475                         skb_pull(skb, ETH_HLEN);
476                         skb_reset_mac_len(skb);
477                         *vid = 0;
478                         tagged = false;
479                 } else {
480                         tagged = true;
481                 }
482         } else {
483                 /* Untagged frame */
484                 tagged = false;
485         }
486
487         if (!*vid) {
488                 u16 pvid = br_get_pvid(vg);
489
490                 /* Frame had a tag with VID 0 or did not have a tag.
491                  * See if pvid is set on this port.  That tells us which
492                  * vlan untagged or priority-tagged traffic belongs to.
493                  */
494                 if (!pvid)
495                         goto drop;
496
497                 /* PVID is set on this port.  Any untagged or priority-tagged
498                  * ingress frame is considered to belong to this vlan.
499                  */
500                 *vid = pvid;
501                 if (likely(!tagged))
502                         /* Untagged Frame. */
503                         __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
504                 else
505                         /* Priority-tagged Frame.
506                          * At this point, we know that skb->vlan_tci VID
507                          * field was 0.
508                          * We update only VID field and preserve PCP field.
509                          */
510                         skb->vlan_tci |= pvid;
511
512                 /* if stats are disabled we can avoid the lookup */
513                 if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
514                         return true;
515         }
516         v = br_vlan_find(vg, *vid);
517         if (!v || !br_vlan_should_use(v))
518                 goto drop;
519
520         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
521                 stats = this_cpu_ptr(v->stats);
522                 u64_stats_update_begin(&stats->syncp);
523                 stats->rx_bytes += skb->len;
524                 stats->rx_packets++;
525                 u64_stats_update_end(&stats->syncp);
526         }
527
528         return true;
529
530 drop:
531         kfree_skb(skb);
532         return false;
533 }
534
535 bool br_allowed_ingress(const struct net_bridge *br,
536                         struct net_bridge_vlan_group *vg, struct sk_buff *skb,
537                         u16 *vid)
538 {
539         /* If VLAN filtering is disabled on the bridge, all packets are
540          * permitted.
541          */
542         if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
543                 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
544                 return true;
545         }
546
547         return __allowed_ingress(br, vg, skb, vid);
548 }
549
550 /* Called under RCU. */
551 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
552                        const struct sk_buff *skb)
553 {
554         const struct net_bridge_vlan *v;
555         u16 vid;
556
557         /* If this packet was not filtered at input, let it pass */
558         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
559                 return true;
560
561         br_vlan_get_tag(skb, &vid);
562         v = br_vlan_find(vg, vid);
563         if (v && br_vlan_should_use(v))
564                 return true;
565
566         return false;
567 }
568
569 /* Called under RCU */
570 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
571 {
572         struct net_bridge_vlan_group *vg;
573         struct net_bridge *br = p->br;
574
575         /* If filtering was disabled at input, let it pass. */
576         if (!br_opt_get(br, BROPT_VLAN_ENABLED))
577                 return true;
578
579         vg = nbp_vlan_group_rcu(p);
580         if (!vg || !vg->num_vlans)
581                 return false;
582
583         if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
584                 *vid = 0;
585
586         if (!*vid) {
587                 *vid = br_get_pvid(vg);
588                 if (!*vid)
589                         return false;
590
591                 return true;
592         }
593
594         if (br_vlan_find(vg, *vid))
595                 return true;
596
597         return false;
598 }
599
600 static int br_vlan_add_existing(struct net_bridge *br,
601                                 struct net_bridge_vlan_group *vg,
602                                 struct net_bridge_vlan *vlan,
603                                 u16 flags, bool *changed,
604                                 struct netlink_ext_ack *extack)
605 {
606         int err;
607
608         err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
609         if (err && err != -EOPNOTSUPP)
610                 return err;
611
612         if (!br_vlan_is_brentry(vlan)) {
613                 /* Trying to change flags of non-existent bridge vlan */
614                 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
615                         err = -EINVAL;
616                         goto err_flags;
617                 }
618                 /* It was only kept for port vlans, now make it real */
619                 err = br_fdb_insert(br, NULL, br->dev->dev_addr,
620                                     vlan->vid);
621                 if (err) {
622                         br_err(br, "failed to insert local address into bridge forwarding table\n");
623                         goto err_fdb_insert;
624                 }
625
626                 refcount_inc(&vlan->refcnt);
627                 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
628                 vg->num_vlans++;
629                 *changed = true;
630         }
631
632         if (__vlan_add_flags(vlan, flags))
633                 *changed = true;
634
635         return 0;
636
637 err_fdb_insert:
638 err_flags:
639         br_switchdev_port_vlan_del(br->dev, vlan->vid);
640         return err;
641 }
642
643 /* Must be protected by RTNL.
644  * Must be called with vid in range from 1 to 4094 inclusive.
645  * changed must be true only if the vlan was created or updated
646  */
647 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
648                 struct netlink_ext_ack *extack)
649 {
650         struct net_bridge_vlan_group *vg;
651         struct net_bridge_vlan *vlan;
652         int ret;
653
654         ASSERT_RTNL();
655
656         *changed = false;
657         vg = br_vlan_group(br);
658         vlan = br_vlan_find(vg, vid);
659         if (vlan)
660                 return br_vlan_add_existing(br, vg, vlan, flags, changed,
661                                             extack);
662
663         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
664         if (!vlan)
665                 return -ENOMEM;
666
667         vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
668         if (!vlan->stats) {
669                 kfree(vlan);
670                 return -ENOMEM;
671         }
672         vlan->vid = vid;
673         vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
674         vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
675         vlan->br = br;
676         if (flags & BRIDGE_VLAN_INFO_BRENTRY)
677                 refcount_set(&vlan->refcnt, 1);
678         ret = __vlan_add(vlan, flags, extack);
679         if (ret) {
680                 free_percpu(vlan->stats);
681                 kfree(vlan);
682         } else {
683                 *changed = true;
684         }
685
686         return ret;
687 }
688
689 /* Must be protected by RTNL.
690  * Must be called with vid in range from 1 to 4094 inclusive.
691  */
692 int br_vlan_delete(struct net_bridge *br, u16 vid)
693 {
694         struct net_bridge_vlan_group *vg;
695         struct net_bridge_vlan *v;
696
697         ASSERT_RTNL();
698
699         vg = br_vlan_group(br);
700         v = br_vlan_find(vg, vid);
701         if (!v || !br_vlan_is_brentry(v))
702                 return -ENOENT;
703
704         br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
705         br_fdb_delete_by_port(br, NULL, vid, 0);
706
707         vlan_tunnel_info_del(vg, v);
708
709         return __vlan_del(v);
710 }
711
712 void br_vlan_flush(struct net_bridge *br)
713 {
714         struct net_bridge_vlan_group *vg;
715
716         ASSERT_RTNL();
717
718         vg = br_vlan_group(br);
719         __vlan_flush(vg);
720         RCU_INIT_POINTER(br->vlgrp, NULL);
721         synchronize_rcu();
722         __vlan_group_free(vg);
723 }
724
725 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
726 {
727         if (!vg)
728                 return NULL;
729
730         return br_vlan_lookup(&vg->vlan_hash, vid);
731 }
732
733 /* Must be protected by RTNL. */
734 static void recalculate_group_addr(struct net_bridge *br)
735 {
736         if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
737                 return;
738
739         spin_lock_bh(&br->lock);
740         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
741             br->vlan_proto == htons(ETH_P_8021Q)) {
742                 /* Bridge Group Address */
743                 br->group_addr[5] = 0x00;
744         } else { /* vlan_enabled && ETH_P_8021AD */
745                 /* Provider Bridge Group Address */
746                 br->group_addr[5] = 0x08;
747         }
748         spin_unlock_bh(&br->lock);
749 }
750
751 /* Must be protected by RTNL. */
752 void br_recalculate_fwd_mask(struct net_bridge *br)
753 {
754         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
755             br->vlan_proto == htons(ETH_P_8021Q))
756                 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
757         else /* vlan_enabled && ETH_P_8021AD */
758                 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
759                                               ~(1u << br->group_addr[5]);
760 }
761
762 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
763 {
764         struct switchdev_attr attr = {
765                 .orig_dev = br->dev,
766                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
767                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
768                 .u.vlan_filtering = val,
769         };
770         int err;
771
772         if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
773                 return 0;
774
775         err = switchdev_port_attr_set(br->dev, &attr);
776         if (err && err != -EOPNOTSUPP)
777                 return err;
778
779         br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
780         br_manage_promisc(br);
781         recalculate_group_addr(br);
782         br_recalculate_fwd_mask(br);
783
784         return 0;
785 }
786
787 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
788 {
789         return __br_vlan_filter_toggle(br, val);
790 }
791
792 bool br_vlan_enabled(const struct net_device *dev)
793 {
794         struct net_bridge *br = netdev_priv(dev);
795
796         return br_opt_get(br, BROPT_VLAN_ENABLED);
797 }
798 EXPORT_SYMBOL_GPL(br_vlan_enabled);
799
800 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
801 {
802         struct net_bridge *br = netdev_priv(dev);
803
804         *p_proto = ntohs(br->vlan_proto);
805
806         return 0;
807 }
808 EXPORT_SYMBOL_GPL(br_vlan_get_proto);
809
810 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
811 {
812         int err = 0;
813         struct net_bridge_port *p;
814         struct net_bridge_vlan *vlan;
815         struct net_bridge_vlan_group *vg;
816         __be16 oldproto;
817
818         if (br->vlan_proto == proto)
819                 return 0;
820
821         /* Add VLANs for the new proto to the device filter. */
822         list_for_each_entry(p, &br->port_list, list) {
823                 vg = nbp_vlan_group(p);
824                 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
825                         err = vlan_vid_add(p->dev, proto, vlan->vid);
826                         if (err)
827                                 goto err_filt;
828                 }
829         }
830
831         oldproto = br->vlan_proto;
832         br->vlan_proto = proto;
833
834         recalculate_group_addr(br);
835         br_recalculate_fwd_mask(br);
836
837         /* Delete VLANs for the old proto from the device filter. */
838         list_for_each_entry(p, &br->port_list, list) {
839                 vg = nbp_vlan_group(p);
840                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
841                         vlan_vid_del(p->dev, oldproto, vlan->vid);
842         }
843
844         return 0;
845
846 err_filt:
847         list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
848                 vlan_vid_del(p->dev, proto, vlan->vid);
849
850         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
851                 vg = nbp_vlan_group(p);
852                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
853                         vlan_vid_del(p->dev, proto, vlan->vid);
854         }
855
856         return err;
857 }
858
859 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
860 {
861         if (val != ETH_P_8021Q && val != ETH_P_8021AD)
862                 return -EPROTONOSUPPORT;
863
864         return __br_vlan_set_proto(br, htons(val));
865 }
866
867 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
868 {
869         switch (val) {
870         case 0:
871         case 1:
872                 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
873                 break;
874         default:
875                 return -EINVAL;
876         }
877
878         return 0;
879 }
880
881 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
882 {
883         struct net_bridge_port *p;
884
885         /* allow to change the option if there are no port vlans configured */
886         list_for_each_entry(p, &br->port_list, list) {
887                 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
888
889                 if (vg->num_vlans)
890                         return -EBUSY;
891         }
892
893         switch (val) {
894         case 0:
895         case 1:
896                 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
897                 break;
898         default:
899                 return -EINVAL;
900         }
901
902         return 0;
903 }
904
905 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
906 {
907         struct net_bridge_vlan *v;
908
909         if (vid != vg->pvid)
910                 return false;
911
912         v = br_vlan_lookup(&vg->vlan_hash, vid);
913         if (v && br_vlan_should_use(v) &&
914             (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
915                 return true;
916
917         return false;
918 }
919
920 static void br_vlan_disable_default_pvid(struct net_bridge *br)
921 {
922         struct net_bridge_port *p;
923         u16 pvid = br->default_pvid;
924
925         /* Disable default_pvid on all ports where it is still
926          * configured.
927          */
928         if (vlan_default_pvid(br_vlan_group(br), pvid))
929                 br_vlan_delete(br, pvid);
930
931         list_for_each_entry(p, &br->port_list, list) {
932                 if (vlan_default_pvid(nbp_vlan_group(p), pvid))
933                         nbp_vlan_delete(p, pvid);
934         }
935
936         br->default_pvid = 0;
937 }
938
939 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
940                                struct netlink_ext_ack *extack)
941 {
942         const struct net_bridge_vlan *pvent;
943         struct net_bridge_vlan_group *vg;
944         struct net_bridge_port *p;
945         unsigned long *changed;
946         bool vlchange;
947         u16 old_pvid;
948         int err = 0;
949
950         if (!pvid) {
951                 br_vlan_disable_default_pvid(br);
952                 return 0;
953         }
954
955         changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
956         if (!changed)
957                 return -ENOMEM;
958
959         old_pvid = br->default_pvid;
960
961         /* Update default_pvid config only if we do not conflict with
962          * user configuration.
963          */
964         vg = br_vlan_group(br);
965         pvent = br_vlan_find(vg, pvid);
966         if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
967             (!pvent || !br_vlan_should_use(pvent))) {
968                 err = br_vlan_add(br, pvid,
969                                   BRIDGE_VLAN_INFO_PVID |
970                                   BRIDGE_VLAN_INFO_UNTAGGED |
971                                   BRIDGE_VLAN_INFO_BRENTRY,
972                                   &vlchange, extack);
973                 if (err)
974                         goto out;
975                 br_vlan_delete(br, old_pvid);
976                 set_bit(0, changed);
977         }
978
979         list_for_each_entry(p, &br->port_list, list) {
980                 /* Update default_pvid config only if we do not conflict with
981                  * user configuration.
982                  */
983                 vg = nbp_vlan_group(p);
984                 if ((old_pvid &&
985                      !vlan_default_pvid(vg, old_pvid)) ||
986                     br_vlan_find(vg, pvid))
987                         continue;
988
989                 err = nbp_vlan_add(p, pvid,
990                                    BRIDGE_VLAN_INFO_PVID |
991                                    BRIDGE_VLAN_INFO_UNTAGGED,
992                                    &vlchange, extack);
993                 if (err)
994                         goto err_port;
995                 nbp_vlan_delete(p, old_pvid);
996                 set_bit(p->port_no, changed);
997         }
998
999         br->default_pvid = pvid;
1000
1001 out:
1002         bitmap_free(changed);
1003         return err;
1004
1005 err_port:
1006         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1007                 if (!test_bit(p->port_no, changed))
1008                         continue;
1009
1010                 if (old_pvid)
1011                         nbp_vlan_add(p, old_pvid,
1012                                      BRIDGE_VLAN_INFO_PVID |
1013                                      BRIDGE_VLAN_INFO_UNTAGGED,
1014                                      &vlchange, NULL);
1015                 nbp_vlan_delete(p, pvid);
1016         }
1017
1018         if (test_bit(0, changed)) {
1019                 if (old_pvid)
1020                         br_vlan_add(br, old_pvid,
1021                                     BRIDGE_VLAN_INFO_PVID |
1022                                     BRIDGE_VLAN_INFO_UNTAGGED |
1023                                     BRIDGE_VLAN_INFO_BRENTRY,
1024                                     &vlchange, NULL);
1025                 br_vlan_delete(br, pvid);
1026         }
1027         goto out;
1028 }
1029
1030 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1031 {
1032         u16 pvid = val;
1033         int err = 0;
1034
1035         if (val >= VLAN_VID_MASK)
1036                 return -EINVAL;
1037
1038         if (pvid == br->default_pvid)
1039                 goto out;
1040
1041         /* Only allow default pvid change when filtering is disabled */
1042         if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1043                 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1044                 err = -EPERM;
1045                 goto out;
1046         }
1047         err = __br_vlan_set_default_pvid(br, pvid, NULL);
1048 out:
1049         return err;
1050 }
1051
1052 int br_vlan_init(struct net_bridge *br)
1053 {
1054         struct net_bridge_vlan_group *vg;
1055         int ret = -ENOMEM;
1056
1057         vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1058         if (!vg)
1059                 goto out;
1060         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1061         if (ret)
1062                 goto err_rhtbl;
1063         ret = vlan_tunnel_init(vg);
1064         if (ret)
1065                 goto err_tunnel_init;
1066         INIT_LIST_HEAD(&vg->vlan_list);
1067         br->vlan_proto = htons(ETH_P_8021Q);
1068         br->default_pvid = 1;
1069         rcu_assign_pointer(br->vlgrp, vg);
1070
1071 out:
1072         return ret;
1073
1074 err_tunnel_init:
1075         rhashtable_destroy(&vg->vlan_hash);
1076 err_rhtbl:
1077         kfree(vg);
1078
1079         goto out;
1080 }
1081
1082 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1083 {
1084         struct switchdev_attr attr = {
1085                 .orig_dev = p->br->dev,
1086                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1087                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1088                 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1089         };
1090         struct net_bridge_vlan_group *vg;
1091         int ret = -ENOMEM;
1092
1093         vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1094         if (!vg)
1095                 goto out;
1096
1097         ret = switchdev_port_attr_set(p->dev, &attr);
1098         if (ret && ret != -EOPNOTSUPP)
1099                 goto err_vlan_enabled;
1100
1101         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1102         if (ret)
1103                 goto err_rhtbl;
1104         ret = vlan_tunnel_init(vg);
1105         if (ret)
1106                 goto err_tunnel_init;
1107         INIT_LIST_HEAD(&vg->vlan_list);
1108         rcu_assign_pointer(p->vlgrp, vg);
1109         if (p->br->default_pvid) {
1110                 bool changed;
1111
1112                 ret = nbp_vlan_add(p, p->br->default_pvid,
1113                                    BRIDGE_VLAN_INFO_PVID |
1114                                    BRIDGE_VLAN_INFO_UNTAGGED,
1115                                    &changed, extack);
1116                 if (ret)
1117                         goto err_vlan_add;
1118         }
1119 out:
1120         return ret;
1121
1122 err_vlan_add:
1123         RCU_INIT_POINTER(p->vlgrp, NULL);
1124         synchronize_rcu();
1125         vlan_tunnel_deinit(vg);
1126 err_tunnel_init:
1127         rhashtable_destroy(&vg->vlan_hash);
1128 err_rhtbl:
1129 err_vlan_enabled:
1130         kfree(vg);
1131
1132         goto out;
1133 }
1134
1135 /* Must be protected by RTNL.
1136  * Must be called with vid in range from 1 to 4094 inclusive.
1137  * changed must be true only if the vlan was created or updated
1138  */
1139 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1140                  bool *changed, struct netlink_ext_ack *extack)
1141 {
1142         struct net_bridge_vlan *vlan;
1143         int ret;
1144
1145         ASSERT_RTNL();
1146
1147         *changed = false;
1148         vlan = br_vlan_find(nbp_vlan_group(port), vid);
1149         if (vlan) {
1150                 /* Pass the flags to the hardware bridge */
1151                 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1152                 if (ret && ret != -EOPNOTSUPP)
1153                         return ret;
1154                 *changed = __vlan_add_flags(vlan, flags);
1155
1156                 return 0;
1157         }
1158
1159         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1160         if (!vlan)
1161                 return -ENOMEM;
1162
1163         vlan->vid = vid;
1164         vlan->port = port;
1165         ret = __vlan_add(vlan, flags, extack);
1166         if (ret)
1167                 kfree(vlan);
1168         else
1169                 *changed = true;
1170
1171         return ret;
1172 }
1173
1174 /* Must be protected by RTNL.
1175  * Must be called with vid in range from 1 to 4094 inclusive.
1176  */
1177 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1178 {
1179         struct net_bridge_vlan *v;
1180
1181         ASSERT_RTNL();
1182
1183         v = br_vlan_find(nbp_vlan_group(port), vid);
1184         if (!v)
1185                 return -ENOENT;
1186         br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1187         br_fdb_delete_by_port(port->br, port, vid, 0);
1188
1189         return __vlan_del(v);
1190 }
1191
1192 void nbp_vlan_flush(struct net_bridge_port *port)
1193 {
1194         struct net_bridge_vlan_group *vg;
1195
1196         ASSERT_RTNL();
1197
1198         vg = nbp_vlan_group(port);
1199         __vlan_flush(vg);
1200         RCU_INIT_POINTER(port->vlgrp, NULL);
1201         synchronize_rcu();
1202         __vlan_group_free(vg);
1203 }
1204
1205 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1206                        struct br_vlan_stats *stats)
1207 {
1208         int i;
1209
1210         memset(stats, 0, sizeof(*stats));
1211         for_each_possible_cpu(i) {
1212                 u64 rxpackets, rxbytes, txpackets, txbytes;
1213                 struct br_vlan_stats *cpu_stats;
1214                 unsigned int start;
1215
1216                 cpu_stats = per_cpu_ptr(v->stats, i);
1217                 do {
1218                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1219                         rxpackets = cpu_stats->rx_packets;
1220                         rxbytes = cpu_stats->rx_bytes;
1221                         txbytes = cpu_stats->tx_bytes;
1222                         txpackets = cpu_stats->tx_packets;
1223                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1224
1225                 stats->rx_packets += rxpackets;
1226                 stats->rx_bytes += rxbytes;
1227                 stats->tx_bytes += txbytes;
1228                 stats->tx_packets += txpackets;
1229         }
1230 }
1231
1232 static int __br_vlan_get_pvid(const struct net_device *dev,
1233                               struct net_bridge_port *p, u16 *p_pvid)
1234 {
1235         struct net_bridge_vlan_group *vg;
1236
1237         if (p)
1238                 vg = nbp_vlan_group(p);
1239         else if (netif_is_bridge_master(dev))
1240                 vg = br_vlan_group(netdev_priv(dev));
1241         else
1242                 return -EINVAL;
1243
1244         *p_pvid = br_get_pvid(vg);
1245         return 0;
1246 }
1247
1248 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1249 {
1250         ASSERT_RTNL();
1251
1252         return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid);
1253 }
1254 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1255
1256 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1257 {
1258         return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid);
1259 }
1260 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1261
1262 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1263                      struct bridge_vlan_info *p_vinfo)
1264 {
1265         struct net_bridge_vlan_group *vg;
1266         struct net_bridge_vlan *v;
1267         struct net_bridge_port *p;
1268
1269         ASSERT_RTNL();
1270         p = br_port_get_check_rtnl(dev);
1271         if (p)
1272                 vg = nbp_vlan_group(p);
1273         else if (netif_is_bridge_master(dev))
1274                 vg = br_vlan_group(netdev_priv(dev));
1275         else
1276                 return -EINVAL;
1277
1278         v = br_vlan_find(vg, vid);
1279         if (!v)
1280                 return -ENOENT;
1281
1282         p_vinfo->vid = vid;
1283         p_vinfo->flags = v->flags;
1284         if (vid == br_get_pvid(vg))
1285                 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1286         return 0;
1287 }
1288 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1289
1290 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1291 {
1292         return is_vlan_dev(dev) &&
1293                 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1294 }
1295
1296 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1297                                        __always_unused void *data)
1298 {
1299         return br_vlan_is_bind_vlan_dev(dev);
1300 }
1301
1302 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1303 {
1304         int found;
1305
1306         rcu_read_lock();
1307         found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1308                                               NULL);
1309         rcu_read_unlock();
1310
1311         return !!found;
1312 }
1313
1314 struct br_vlan_bind_walk_data {
1315         u16 vid;
1316         struct net_device *result;
1317 };
1318
1319 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1320                                           void *data_in)
1321 {
1322         struct br_vlan_bind_walk_data *data = data_in;
1323         int found = 0;
1324
1325         if (br_vlan_is_bind_vlan_dev(dev) &&
1326             vlan_dev_priv(dev)->vlan_id == data->vid) {
1327                 data->result = dev;
1328                 found = 1;
1329         }
1330
1331         return found;
1332 }
1333
1334 static struct net_device *
1335 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1336 {
1337         struct br_vlan_bind_walk_data data = {
1338                 .vid = vid,
1339         };
1340
1341         rcu_read_lock();
1342         netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1343                                       &data);
1344         rcu_read_unlock();
1345
1346         return data.result;
1347 }
1348
1349 static bool br_vlan_is_dev_up(const struct net_device *dev)
1350 {
1351         return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1352 }
1353
1354 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1355                                        struct net_device *vlan_dev)
1356 {
1357         u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1358         struct net_bridge_vlan_group *vg;
1359         struct net_bridge_port *p;
1360         bool has_carrier = false;
1361
1362         if (!netif_carrier_ok(br->dev)) {
1363                 netif_carrier_off(vlan_dev);
1364                 return;
1365         }
1366
1367         list_for_each_entry(p, &br->port_list, list) {
1368                 vg = nbp_vlan_group(p);
1369                 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1370                         has_carrier = true;
1371                         break;
1372                 }
1373         }
1374
1375         if (has_carrier)
1376                 netif_carrier_on(vlan_dev);
1377         else
1378                 netif_carrier_off(vlan_dev);
1379 }
1380
1381 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1382 {
1383         struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1384         struct net_bridge_vlan *vlan;
1385         struct net_device *vlan_dev;
1386
1387         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1388                 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1389                                                            vlan->vid);
1390                 if (vlan_dev) {
1391                         if (br_vlan_is_dev_up(p->dev)) {
1392                                 if (netif_carrier_ok(p->br->dev))
1393                                         netif_carrier_on(vlan_dev);
1394                         } else {
1395                                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1396                         }
1397                 }
1398         }
1399 }
1400
1401 static void br_vlan_upper_change(struct net_device *dev,
1402                                  struct net_device *upper_dev,
1403                                  bool linking)
1404 {
1405         struct net_bridge *br = netdev_priv(dev);
1406
1407         if (!br_vlan_is_bind_vlan_dev(upper_dev))
1408                 return;
1409
1410         if (linking) {
1411                 br_vlan_set_vlan_dev_state(br, upper_dev);
1412                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1413         } else {
1414                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1415                               br_vlan_has_upper_bind_vlan_dev(dev));
1416         }
1417 }
1418
1419 struct br_vlan_link_state_walk_data {
1420         struct net_bridge *br;
1421 };
1422
1423 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1424                                         void *data_in)
1425 {
1426         struct br_vlan_link_state_walk_data *data = data_in;
1427
1428         if (br_vlan_is_bind_vlan_dev(vlan_dev))
1429                 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1430
1431         return 0;
1432 }
1433
1434 static void br_vlan_link_state_change(struct net_device *dev,
1435                                       struct net_bridge *br)
1436 {
1437         struct br_vlan_link_state_walk_data data = {
1438                 .br = br
1439         };
1440
1441         rcu_read_lock();
1442         netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1443                                       &data);
1444         rcu_read_unlock();
1445 }
1446
1447 /* Must be protected by RTNL. */
1448 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1449 {
1450         struct net_device *vlan_dev;
1451
1452         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1453                 return;
1454
1455         vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1456         if (vlan_dev)
1457                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1458 }
1459
1460 /* Must be protected by RTNL. */
1461 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1462 {
1463         struct netdev_notifier_changeupper_info *info;
1464         struct net_bridge *br = netdev_priv(dev);
1465         bool changed;
1466         int ret = 0;
1467
1468         switch (event) {
1469         case NETDEV_REGISTER:
1470                 ret = br_vlan_add(br, br->default_pvid,
1471                                   BRIDGE_VLAN_INFO_PVID |
1472                                   BRIDGE_VLAN_INFO_UNTAGGED |
1473                                   BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1474                 break;
1475         case NETDEV_UNREGISTER:
1476                 br_vlan_delete(br, br->default_pvid);
1477                 break;
1478         case NETDEV_CHANGEUPPER:
1479                 info = ptr;
1480                 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1481                 break;
1482
1483         case NETDEV_CHANGE:
1484         case NETDEV_UP:
1485                 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1486                         break;
1487                 br_vlan_link_state_change(dev, br);
1488                 break;
1489         }
1490
1491         return ret;
1492 }
1493
1494 /* Must be protected by RTNL. */
1495 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1496 {
1497         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1498                 return;
1499
1500         switch (event) {
1501         case NETDEV_CHANGE:
1502         case NETDEV_DOWN:
1503         case NETDEV_UP:
1504                 br_vlan_set_all_vlan_dev_state(p);
1505                 break;
1506         }
1507 }
1508
1509 static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1510                               u16 flags)
1511 {
1512         struct bridge_vlan_info info;
1513         struct nlattr *nest;
1514
1515         nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1516         if (!nest)
1517                 return false;
1518
1519         memset(&info, 0, sizeof(info));
1520         info.vid = vid;
1521         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1522                 info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1523         if (flags & BRIDGE_VLAN_INFO_PVID)
1524                 info.flags |= BRIDGE_VLAN_INFO_PVID;
1525
1526         if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1527                 goto out_err;
1528
1529         if (vid_range && vid < vid_range &&
1530             !(flags & BRIDGE_VLAN_INFO_PVID) &&
1531             nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1532                 goto out_err;
1533
1534         nla_nest_end(skb, nest);
1535
1536         return true;
1537
1538 out_err:
1539         nla_nest_cancel(skb, nest);
1540         return false;
1541 }
1542
1543 static size_t rtnl_vlan_nlmsg_size(void)
1544 {
1545         return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1546                 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1547                 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1548                 + nla_total_size(sizeof(struct bridge_vlan_info)); /* BRIDGE_VLANDB_ENTRY_INFO */
1549 }
1550
1551 void br_vlan_notify(const struct net_bridge *br,
1552                     const struct net_bridge_port *p,
1553                     u16 vid, u16 vid_range,
1554                     int cmd)
1555 {
1556         struct net_bridge_vlan_group *vg;
1557         struct net_bridge_vlan *v;
1558         struct br_vlan_msg *bvm;
1559         struct nlmsghdr *nlh;
1560         struct sk_buff *skb;
1561         int err = -ENOBUFS;
1562         struct net *net;
1563         u16 flags = 0;
1564         int ifindex;
1565
1566         /* right now notifications are done only with rtnl held */
1567         ASSERT_RTNL();
1568
1569         if (p) {
1570                 ifindex = p->dev->ifindex;
1571                 vg = nbp_vlan_group(p);
1572                 net = dev_net(p->dev);
1573         } else {
1574                 ifindex = br->dev->ifindex;
1575                 vg = br_vlan_group(br);
1576                 net = dev_net(br->dev);
1577         }
1578
1579         skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1580         if (!skb)
1581                 goto out_err;
1582
1583         err = -EMSGSIZE;
1584         nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1585         if (!nlh)
1586                 goto out_err;
1587         bvm = nlmsg_data(nlh);
1588         memset(bvm, 0, sizeof(*bvm));
1589         bvm->family = AF_BRIDGE;
1590         bvm->ifindex = ifindex;
1591
1592         switch (cmd) {
1593         case RTM_NEWVLAN:
1594                 /* need to find the vlan due to flags/options */
1595                 v = br_vlan_find(vg, vid);
1596                 if (!v || !br_vlan_should_use(v))
1597                         goto out_kfree;
1598
1599                 flags = v->flags;
1600                 if (br_get_pvid(vg) == v->vid)
1601                         flags |= BRIDGE_VLAN_INFO_PVID;
1602                 break;
1603         case RTM_DELVLAN:
1604                 break;
1605         default:
1606                 goto out_kfree;
1607         }
1608
1609         if (!br_vlan_fill_vids(skb, vid, vid_range, flags))
1610                 goto out_err;
1611
1612         nlmsg_end(skb, nlh);
1613         rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1614         return;
1615
1616 out_err:
1617         rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1618 out_kfree:
1619         kfree_skb(skb);
1620 }
1621
1622 /* check if v_curr can enter a range ending in range_end */
1623 static bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1624                                     const struct net_bridge_vlan *range_end)
1625 {
1626         return v_curr->vid - range_end->vid == 1 &&
1627                range_end->flags == v_curr->flags;
1628 }
1629
1630 static int br_vlan_dump_dev(const struct net_device *dev,
1631                             struct sk_buff *skb,
1632                             struct netlink_callback *cb)
1633 {
1634         struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1635         struct net_bridge_vlan_group *vg;
1636         int idx = 0, s_idx = cb->args[1];
1637         struct nlmsghdr *nlh = NULL;
1638         struct net_bridge_port *p;
1639         struct br_vlan_msg *bvm;
1640         struct net_bridge *br;
1641         int err = 0;
1642         u16 pvid;
1643
1644         if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1645                 return -EINVAL;
1646
1647         if (netif_is_bridge_master(dev)) {
1648                 br = netdev_priv(dev);
1649                 vg = br_vlan_group_rcu(br);
1650                 p = NULL;
1651         } else {
1652                 p = br_port_get_rcu(dev);
1653                 if (WARN_ON(!p))
1654                         return -EINVAL;
1655                 vg = nbp_vlan_group_rcu(p);
1656                 br = p->br;
1657         }
1658
1659         if (!vg)
1660                 return 0;
1661
1662         nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1663                         RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1664         if (!nlh)
1665                 return -EMSGSIZE;
1666         bvm = nlmsg_data(nlh);
1667         memset(bvm, 0, sizeof(*bvm));
1668         bvm->family = PF_BRIDGE;
1669         bvm->ifindex = dev->ifindex;
1670         pvid = br_get_pvid(vg);
1671
1672         /* idx must stay at range's beginning until it is filled in */
1673         list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1674                 if (!br_vlan_should_use(v))
1675                         continue;
1676                 if (idx < s_idx) {
1677                         idx++;
1678                         continue;
1679                 }
1680
1681                 if (!range_start) {
1682                         range_start = v;
1683                         range_end = v;
1684                         continue;
1685                 }
1686
1687                 if (v->vid == pvid || !br_vlan_can_enter_range(v, range_end)) {
1688                         u16 flags = br_vlan_flags(range_start, pvid);
1689
1690                         if (!br_vlan_fill_vids(skb, range_start->vid,
1691                                                range_end->vid, flags)) {
1692                                 err = -EMSGSIZE;
1693                                 break;
1694                         }
1695                         /* advance number of filled vlans */
1696                         idx += range_end->vid - range_start->vid + 1;
1697
1698                         range_start = v;
1699                 }
1700                 range_end = v;
1701         }
1702
1703         /* err will be 0 and range_start will be set in 3 cases here:
1704          * - first vlan (range_start == range_end)
1705          * - last vlan (range_start == range_end, not in range)
1706          * - last vlan range (range_start != range_end, in range)
1707          */
1708         if (!err && range_start &&
1709             !br_vlan_fill_vids(skb, range_start->vid, range_end->vid,
1710                                br_vlan_flags(range_start, pvid)))
1711                 err = -EMSGSIZE;
1712
1713         cb->args[1] = err ? idx : 0;
1714
1715         nlmsg_end(skb, nlh);
1716
1717         return err;
1718 }
1719
1720 static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
1721 {
1722         int idx = 0, err = 0, s_idx = cb->args[0];
1723         struct net *net = sock_net(skb->sk);
1724         struct br_vlan_msg *bvm;
1725         struct net_device *dev;
1726
1727         err = nlmsg_parse(cb->nlh, sizeof(*bvm), NULL, 0, NULL, cb->extack);
1728         if (err < 0)
1729                 return err;
1730
1731         bvm = nlmsg_data(cb->nlh);
1732
1733         rcu_read_lock();
1734         if (bvm->ifindex) {
1735                 dev = dev_get_by_index_rcu(net, bvm->ifindex);
1736                 if (!dev) {
1737                         err = -ENODEV;
1738                         goto out_err;
1739                 }
1740                 err = br_vlan_dump_dev(dev, skb, cb);
1741                 if (err && err != -EMSGSIZE)
1742                         goto out_err;
1743         } else {
1744                 for_each_netdev_rcu(net, dev) {
1745                         if (idx < s_idx)
1746                                 goto skip;
1747
1748                         err = br_vlan_dump_dev(dev, skb, cb);
1749                         if (err == -EMSGSIZE)
1750                                 break;
1751 skip:
1752                         idx++;
1753                 }
1754         }
1755         cb->args[0] = idx;
1756         rcu_read_unlock();
1757
1758         return skb->len;
1759
1760 out_err:
1761         rcu_read_unlock();
1762
1763         return err;
1764 }
1765
1766 static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
1767         [BRIDGE_VLANDB_ENTRY_INFO]      = { .type = NLA_EXACT_LEN,
1768                                             .len = sizeof(struct bridge_vlan_info) },
1769         [BRIDGE_VLANDB_ENTRY_RANGE]     = { .type = NLA_U16 },
1770 };
1771
1772 static int br_vlan_rtm_process_one(struct net_device *dev,
1773                                    const struct nlattr *attr,
1774                                    int cmd, struct netlink_ext_ack *extack)
1775 {
1776         struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
1777         struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
1778         struct net_bridge_vlan_group *vg;
1779         struct net_bridge_port *p = NULL;
1780         int err = 0, cmdmap = 0;
1781         struct net_bridge *br;
1782         bool changed = false;
1783
1784         if (netif_is_bridge_master(dev)) {
1785                 br = netdev_priv(dev);
1786                 vg = br_vlan_group(br);
1787         } else {
1788                 p = br_port_get_rtnl(dev);
1789                 if (WARN_ON(!p))
1790                         return -ENODEV;
1791                 br = p->br;
1792                 vg = nbp_vlan_group(p);
1793         }
1794
1795         if (WARN_ON(!vg))
1796                 return -ENODEV;
1797
1798         err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
1799                                br_vlan_db_policy, extack);
1800         if (err)
1801                 return err;
1802
1803         if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
1804                 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
1805                 return -EINVAL;
1806         }
1807         memset(&vrange_end, 0, sizeof(vrange_end));
1808
1809         vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
1810         if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
1811                             BRIDGE_VLAN_INFO_RANGE_END)) {
1812                 NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
1813                 return -EINVAL;
1814         }
1815         if (!br_vlan_valid_id(vinfo->vid, extack))
1816                 return -EINVAL;
1817
1818         if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
1819                 vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
1820                 /* validate user-provided flags without RANGE_BEGIN */
1821                 vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
1822                 vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
1823
1824                 /* vinfo_last is the range start, vinfo the range end */
1825                 vinfo_last = vinfo;
1826                 vinfo = &vrange_end;
1827
1828                 if (!br_vlan_valid_id(vinfo->vid, extack) ||
1829                     !br_vlan_valid_range(vinfo, vinfo_last, extack))
1830                         return -EINVAL;
1831         }
1832
1833         switch (cmd) {
1834         case RTM_NEWVLAN:
1835                 cmdmap = RTM_SETLINK;
1836                 break;
1837         case RTM_DELVLAN:
1838                 cmdmap = RTM_DELLINK;
1839                 break;
1840         }
1841
1842         err = br_process_vlan_info(br, p, cmdmap, vinfo, &vinfo_last, &changed,
1843                                    extack);
1844         if (changed)
1845                 br_ifinfo_notify(cmdmap, br, p);
1846
1847         return err;
1848 }
1849
1850 static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
1851                                struct netlink_ext_ack *extack)
1852 {
1853         struct net *net = sock_net(skb->sk);
1854         struct br_vlan_msg *bvm;
1855         struct net_device *dev;
1856         struct nlattr *attr;
1857         int err, vlans = 0;
1858         int rem;
1859
1860         /* this should validate the header and check for remaining bytes */
1861         err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
1862                           extack);
1863         if (err < 0)
1864                 return err;
1865
1866         bvm = nlmsg_data(nlh);
1867         dev = __dev_get_by_index(net, bvm->ifindex);
1868         if (!dev)
1869                 return -ENODEV;
1870
1871         if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
1872                 NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
1873                 return -EINVAL;
1874         }
1875
1876         nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
1877                 if (nla_type(attr) != BRIDGE_VLANDB_ENTRY)
1878                         continue;
1879
1880                 vlans++;
1881                 err = br_vlan_rtm_process_one(dev, attr, nlh->nlmsg_type,
1882                                               extack);
1883                 if (err)
1884                         break;
1885         }
1886         if (!vlans) {
1887                 NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
1888                 err = -EINVAL;
1889         }
1890
1891         return err;
1892 }
1893
1894 void br_vlan_rtnl_init(void)
1895 {
1896         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
1897                              br_vlan_rtm_dump, 0);
1898         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
1899                              br_vlan_rtm_process, NULL, 0);
1900         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
1901                              br_vlan_rtm_process, NULL, 0);
1902 }
1903
1904 void br_vlan_rtnl_uninit(void)
1905 {
1906         rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
1907         rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
1908         rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
1909 }