OSDN Git Service

8cab3435d8dad6700dc1d6d9b6f2b330e3ff34ba
[tomoyo/tomoyo-test1.git] / net / openvswitch / datapath.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2014 Nicira, Inc.
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/if_arp.h>
11 #include <linux/if_vlan.h>
12 #include <linux/in.h>
13 #include <linux/ip.h>
14 #include <linux/jhash.h>
15 #include <linux/delay.h>
16 #include <linux/time.h>
17 #include <linux/etherdevice.h>
18 #include <linux/genetlink.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/percpu.h>
23 #include <linux/rcupdate.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/ethtool.h>
27 #include <linux/wait.h>
28 #include <asm/div64.h>
29 #include <linux/highmem.h>
30 #include <linux/netfilter_bridge.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/inetdevice.h>
33 #include <linux/list.h>
34 #include <linux/openvswitch.h>
35 #include <linux/rculist.h>
36 #include <linux/dmi.h>
37 #include <net/genetlink.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
40
41 #include "datapath.h"
42 #include "flow.h"
43 #include "flow_table.h"
44 #include "flow_netlink.h"
45 #include "meter.h"
46 #include "vport-internal_dev.h"
47 #include "vport-netdev.h"
48
49 unsigned int ovs_net_id __read_mostly;
50
51 static struct genl_family dp_packet_genl_family;
52 static struct genl_family dp_flow_genl_family;
53 static struct genl_family dp_datapath_genl_family;
54
55 static const struct nla_policy flow_policy[];
56
57 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
58         .name = OVS_FLOW_MCGROUP,
59 };
60
61 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
62         .name = OVS_DATAPATH_MCGROUP,
63 };
64
65 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
66         .name = OVS_VPORT_MCGROUP,
67 };
68
69 /* Check if need to build a reply message.
70  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
71 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
72                             unsigned int group)
73 {
74         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
75                genl_has_listeners(family, genl_info_net(info), group);
76 }
77
78 static void ovs_notify(struct genl_family *family,
79                        struct sk_buff *skb, struct genl_info *info)
80 {
81         genl_notify(family, skb, info, 0, GFP_KERNEL);
82 }
83
84 /**
85  * DOC: Locking:
86  *
87  * All writes e.g. Writes to device state (add/remove datapath, port, set
88  * operations on vports, etc.), Writes to other state (flow table
89  * modifications, set miscellaneous datapath parameters, etc.) are protected
90  * by ovs_lock.
91  *
92  * Reads are protected by RCU.
93  *
94  * There are a few special cases (mostly stats) that have their own
95  * synchronization but they nest under all of above and don't interact with
96  * each other.
97  *
98  * The RTNL lock nests inside ovs_mutex.
99  */
100
101 static DEFINE_MUTEX(ovs_mutex);
102
103 void ovs_lock(void)
104 {
105         mutex_lock(&ovs_mutex);
106 }
107
108 void ovs_unlock(void)
109 {
110         mutex_unlock(&ovs_mutex);
111 }
112
113 #ifdef CONFIG_LOCKDEP
114 int lockdep_ovsl_is_held(void)
115 {
116         if (debug_locks)
117                 return lockdep_is_held(&ovs_mutex);
118         else
119                 return 1;
120 }
121 #endif
122
123 static struct vport *new_vport(const struct vport_parms *);
124 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
125                              const struct sw_flow_key *,
126                              const struct dp_upcall_info *,
127                              uint32_t cutlen);
128 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
129                                   const struct sw_flow_key *,
130                                   const struct dp_upcall_info *,
131                                   uint32_t cutlen);
132
133 /* Must be called with rcu_read_lock or ovs_mutex. */
134 const char *ovs_dp_name(const struct datapath *dp)
135 {
136         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
137         return ovs_vport_name(vport);
138 }
139
140 static int get_dpifindex(const struct datapath *dp)
141 {
142         struct vport *local;
143         int ifindex;
144
145         rcu_read_lock();
146
147         local = ovs_vport_rcu(dp, OVSP_LOCAL);
148         if (local)
149                 ifindex = local->dev->ifindex;
150         else
151                 ifindex = 0;
152
153         rcu_read_unlock();
154
155         return ifindex;
156 }
157
158 static void destroy_dp_rcu(struct rcu_head *rcu)
159 {
160         struct datapath *dp = container_of(rcu, struct datapath, rcu);
161
162         ovs_flow_tbl_destroy(&dp->table);
163         free_percpu(dp->stats_percpu);
164         kfree(dp->ports);
165         ovs_meters_exit(dp);
166         kfree(dp);
167 }
168
169 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
170                                             u16 port_no)
171 {
172         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
173 }
174
175 /* Called with ovs_mutex or RCU read lock. */
176 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
177 {
178         struct vport *vport;
179         struct hlist_head *head;
180
181         head = vport_hash_bucket(dp, port_no);
182         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
183                 if (vport->port_no == port_no)
184                         return vport;
185         }
186         return NULL;
187 }
188
189 /* Called with ovs_mutex. */
190 static struct vport *new_vport(const struct vport_parms *parms)
191 {
192         struct vport *vport;
193
194         vport = ovs_vport_add(parms);
195         if (!IS_ERR(vport)) {
196                 struct datapath *dp = parms->dp;
197                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
198
199                 hlist_add_head_rcu(&vport->dp_hash_node, head);
200         }
201         return vport;
202 }
203
204 void ovs_dp_detach_port(struct vport *p)
205 {
206         ASSERT_OVSL();
207
208         /* First drop references to device. */
209         hlist_del_rcu(&p->dp_hash_node);
210
211         /* Then destroy it. */
212         ovs_vport_del(p);
213 }
214
215 /* Must be called with rcu_read_lock. */
216 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
217 {
218         const struct vport *p = OVS_CB(skb)->input_vport;
219         struct datapath *dp = p->dp;
220         struct sw_flow *flow;
221         struct sw_flow_actions *sf_acts;
222         struct dp_stats_percpu *stats;
223         u64 *stats_counter;
224         u32 n_mask_hit;
225         int error;
226
227         stats = this_cpu_ptr(dp->stats_percpu);
228
229         /* Look up flow. */
230         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
231                                          &n_mask_hit);
232         if (unlikely(!flow)) {
233                 struct dp_upcall_info upcall;
234
235                 memset(&upcall, 0, sizeof(upcall));
236                 upcall.cmd = OVS_PACKET_CMD_MISS;
237                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
238                 upcall.mru = OVS_CB(skb)->mru;
239                 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
240                 if (unlikely(error))
241                         kfree_skb(skb);
242                 else
243                         consume_skb(skb);
244                 stats_counter = &stats->n_missed;
245                 goto out;
246         }
247
248         ovs_flow_stats_update(flow, key->tp.flags, skb);
249         sf_acts = rcu_dereference(flow->sf_acts);
250         error = ovs_execute_actions(dp, skb, sf_acts, key);
251         if (unlikely(error))
252                 net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
253                                                         ovs_dp_name(dp), error);
254
255         stats_counter = &stats->n_hit;
256
257 out:
258         /* Update datapath statistics. */
259         u64_stats_update_begin(&stats->syncp);
260         (*stats_counter)++;
261         stats->n_mask_hit += n_mask_hit;
262         u64_stats_update_end(&stats->syncp);
263 }
264
265 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
266                   const struct sw_flow_key *key,
267                   const struct dp_upcall_info *upcall_info,
268                   uint32_t cutlen)
269 {
270         struct dp_stats_percpu *stats;
271         int err;
272
273         if (upcall_info->portid == 0) {
274                 err = -ENOTCONN;
275                 goto err;
276         }
277
278         if (!skb_is_gso(skb))
279                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
280         else
281                 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
282         if (err)
283                 goto err;
284
285         return 0;
286
287 err:
288         stats = this_cpu_ptr(dp->stats_percpu);
289
290         u64_stats_update_begin(&stats->syncp);
291         stats->n_lost++;
292         u64_stats_update_end(&stats->syncp);
293
294         return err;
295 }
296
297 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
298                              const struct sw_flow_key *key,
299                              const struct dp_upcall_info *upcall_info,
300                                  uint32_t cutlen)
301 {
302         unsigned int gso_type = skb_shinfo(skb)->gso_type;
303         struct sw_flow_key later_key;
304         struct sk_buff *segs, *nskb;
305         int err;
306
307         BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
308         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
309         if (IS_ERR(segs))
310                 return PTR_ERR(segs);
311         if (segs == NULL)
312                 return -EINVAL;
313
314         if (gso_type & SKB_GSO_UDP) {
315                 /* The initial flow key extracted by ovs_flow_key_extract()
316                  * in this case is for a first fragment, so we need to
317                  * properly mark later fragments.
318                  */
319                 later_key = *key;
320                 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
321         }
322
323         /* Queue all of the segments. */
324         skb = segs;
325         do {
326                 if (gso_type & SKB_GSO_UDP && skb != segs)
327                         key = &later_key;
328
329                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
330                 if (err)
331                         break;
332
333         } while ((skb = skb->next));
334
335         /* Free all of the segments. */
336         skb = segs;
337         do {
338                 nskb = skb->next;
339                 if (err)
340                         kfree_skb(skb);
341                 else
342                         consume_skb(skb);
343         } while ((skb = nskb));
344         return err;
345 }
346
347 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
348                               unsigned int hdrlen, int actions_attrlen)
349 {
350         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
351                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
352                 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
353                 + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
354                 + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
355
356         /* OVS_PACKET_ATTR_USERDATA */
357         if (upcall_info->userdata)
358                 size += NLA_ALIGN(upcall_info->userdata->nla_len);
359
360         /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
361         if (upcall_info->egress_tun_info)
362                 size += nla_total_size(ovs_tun_key_attr_size());
363
364         /* OVS_PACKET_ATTR_ACTIONS */
365         if (upcall_info->actions_len)
366                 size += nla_total_size(actions_attrlen);
367
368         /* OVS_PACKET_ATTR_MRU */
369         if (upcall_info->mru)
370                 size += nla_total_size(sizeof(upcall_info->mru));
371
372         return size;
373 }
374
375 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
376 {
377         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
378                 size_t plen = NLA_ALIGN(skb->len) - skb->len;
379
380                 if (plen > 0)
381                         skb_put_zero(skb, plen);
382         }
383 }
384
385 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
386                                   const struct sw_flow_key *key,
387                                   const struct dp_upcall_info *upcall_info,
388                                   uint32_t cutlen)
389 {
390         struct ovs_header *upcall;
391         struct sk_buff *nskb = NULL;
392         struct sk_buff *user_skb = NULL; /* to be queued to userspace */
393         struct nlattr *nla;
394         size_t len;
395         unsigned int hlen;
396         int err, dp_ifindex;
397         u64 hash;
398
399         dp_ifindex = get_dpifindex(dp);
400         if (!dp_ifindex)
401                 return -ENODEV;
402
403         if (skb_vlan_tag_present(skb)) {
404                 nskb = skb_clone(skb, GFP_ATOMIC);
405                 if (!nskb)
406                         return -ENOMEM;
407
408                 nskb = __vlan_hwaccel_push_inside(nskb);
409                 if (!nskb)
410                         return -ENOMEM;
411
412                 skb = nskb;
413         }
414
415         if (nla_attr_size(skb->len) > USHRT_MAX) {
416                 err = -EFBIG;
417                 goto out;
418         }
419
420         /* Complete checksum if needed */
421         if (skb->ip_summed == CHECKSUM_PARTIAL &&
422             (err = skb_csum_hwoffload_help(skb, 0)))
423                 goto out;
424
425         /* Older versions of OVS user space enforce alignment of the last
426          * Netlink attribute to NLA_ALIGNTO which would require extensive
427          * padding logic. Only perform zerocopy if padding is not required.
428          */
429         if (dp->user_features & OVS_DP_F_UNALIGNED)
430                 hlen = skb_zerocopy_headlen(skb);
431         else
432                 hlen = skb->len;
433
434         len = upcall_msg_size(upcall_info, hlen - cutlen,
435                               OVS_CB(skb)->acts_origlen);
436         user_skb = genlmsg_new(len, GFP_ATOMIC);
437         if (!user_skb) {
438                 err = -ENOMEM;
439                 goto out;
440         }
441
442         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
443                              0, upcall_info->cmd);
444         if (!upcall) {
445                 err = -EINVAL;
446                 goto out;
447         }
448         upcall->dp_ifindex = dp_ifindex;
449
450         err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
451         if (err)
452                 goto out;
453
454         if (upcall_info->userdata)
455                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
456                           nla_len(upcall_info->userdata),
457                           nla_data(upcall_info->userdata));
458
459         if (upcall_info->egress_tun_info) {
460                 nla = nla_nest_start_noflag(user_skb,
461                                             OVS_PACKET_ATTR_EGRESS_TUN_KEY);
462                 if (!nla) {
463                         err = -EMSGSIZE;
464                         goto out;
465                 }
466                 err = ovs_nla_put_tunnel_info(user_skb,
467                                               upcall_info->egress_tun_info);
468                 if (err)
469                         goto out;
470
471                 nla_nest_end(user_skb, nla);
472         }
473
474         if (upcall_info->actions_len) {
475                 nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
476                 if (!nla) {
477                         err = -EMSGSIZE;
478                         goto out;
479                 }
480                 err = ovs_nla_put_actions(upcall_info->actions,
481                                           upcall_info->actions_len,
482                                           user_skb);
483                 if (!err)
484                         nla_nest_end(user_skb, nla);
485                 else
486                         nla_nest_cancel(user_skb, nla);
487         }
488
489         /* Add OVS_PACKET_ATTR_MRU */
490         if (upcall_info->mru &&
491             nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) {
492                 err = -ENOBUFS;
493                 goto out;
494         }
495
496         /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
497         if (cutlen > 0 &&
498             nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) {
499                 err = -ENOBUFS;
500                 goto out;
501         }
502
503         /* Add OVS_PACKET_ATTR_HASH */
504         hash = skb_get_hash_raw(skb);
505         if (skb->sw_hash)
506                 hash |= OVS_PACKET_HASH_SW_BIT;
507
508         if (skb->l4_hash)
509                 hash |= OVS_PACKET_HASH_L4_BIT;
510
511         if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
512                 err = -ENOBUFS;
513                 goto out;
514         }
515
516         /* Only reserve room for attribute header, packet data is added
517          * in skb_zerocopy() */
518         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
519                 err = -ENOBUFS;
520                 goto out;
521         }
522         nla->nla_len = nla_attr_size(skb->len - cutlen);
523
524         err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
525         if (err)
526                 goto out;
527
528         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
529         pad_packet(dp, user_skb);
530
531         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
532
533         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
534         user_skb = NULL;
535 out:
536         if (err)
537                 skb_tx_error(skb);
538         kfree_skb(user_skb);
539         kfree_skb(nskb);
540         return err;
541 }
542
543 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
544 {
545         struct ovs_header *ovs_header = info->userhdr;
546         struct net *net = sock_net(skb->sk);
547         struct nlattr **a = info->attrs;
548         struct sw_flow_actions *acts;
549         struct sk_buff *packet;
550         struct sw_flow *flow;
551         struct sw_flow_actions *sf_acts;
552         struct datapath *dp;
553         struct vport *input_vport;
554         u16 mru = 0;
555         u64 hash;
556         int len;
557         int err;
558         bool log = !a[OVS_PACKET_ATTR_PROBE];
559
560         err = -EINVAL;
561         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
562             !a[OVS_PACKET_ATTR_ACTIONS])
563                 goto err;
564
565         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
566         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
567         err = -ENOMEM;
568         if (!packet)
569                 goto err;
570         skb_reserve(packet, NET_IP_ALIGN);
571
572         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
573
574         /* Set packet's mru */
575         if (a[OVS_PACKET_ATTR_MRU]) {
576                 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
577                 packet->ignore_df = 1;
578         }
579         OVS_CB(packet)->mru = mru;
580
581         if (a[OVS_PACKET_ATTR_HASH]) {
582                 hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
583
584                 __skb_set_hash(packet, hash & 0xFFFFFFFFULL,
585                                !!(hash & OVS_PACKET_HASH_SW_BIT),
586                                !!(hash & OVS_PACKET_HASH_L4_BIT));
587         }
588
589         /* Build an sw_flow for sending this packet. */
590         flow = ovs_flow_alloc();
591         err = PTR_ERR(flow);
592         if (IS_ERR(flow))
593                 goto err_kfree_skb;
594
595         err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
596                                              packet, &flow->key, log);
597         if (err)
598                 goto err_flow_free;
599
600         err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
601                                    &flow->key, &acts, log);
602         if (err)
603                 goto err_flow_free;
604
605         rcu_assign_pointer(flow->sf_acts, acts);
606         packet->priority = flow->key.phy.priority;
607         packet->mark = flow->key.phy.skb_mark;
608
609         rcu_read_lock();
610         dp = get_dp_rcu(net, ovs_header->dp_ifindex);
611         err = -ENODEV;
612         if (!dp)
613                 goto err_unlock;
614
615         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
616         if (!input_vport)
617                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
618
619         if (!input_vport)
620                 goto err_unlock;
621
622         packet->dev = input_vport->dev;
623         OVS_CB(packet)->input_vport = input_vport;
624         sf_acts = rcu_dereference(flow->sf_acts);
625
626         local_bh_disable();
627         err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
628         local_bh_enable();
629         rcu_read_unlock();
630
631         ovs_flow_free(flow, false);
632         return err;
633
634 err_unlock:
635         rcu_read_unlock();
636 err_flow_free:
637         ovs_flow_free(flow, false);
638 err_kfree_skb:
639         kfree_skb(packet);
640 err:
641         return err;
642 }
643
644 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
645         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
646         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
647         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
648         [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
649         [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
650 };
651
652 static const struct genl_ops dp_packet_genl_ops[] = {
653         { .cmd = OVS_PACKET_CMD_EXECUTE,
654           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
655           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
656           .doit = ovs_packet_cmd_execute
657         }
658 };
659
660 static struct genl_family dp_packet_genl_family __ro_after_init = {
661         .hdrsize = sizeof(struct ovs_header),
662         .name = OVS_PACKET_FAMILY,
663         .version = OVS_PACKET_VERSION,
664         .maxattr = OVS_PACKET_ATTR_MAX,
665         .policy = packet_policy,
666         .netnsok = true,
667         .parallel_ops = true,
668         .ops = dp_packet_genl_ops,
669         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
670         .module = THIS_MODULE,
671 };
672
673 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
674                          struct ovs_dp_megaflow_stats *mega_stats)
675 {
676         int i;
677
678         memset(mega_stats, 0, sizeof(*mega_stats));
679
680         stats->n_flows = ovs_flow_tbl_count(&dp->table);
681         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
682
683         stats->n_hit = stats->n_missed = stats->n_lost = 0;
684
685         for_each_possible_cpu(i) {
686                 const struct dp_stats_percpu *percpu_stats;
687                 struct dp_stats_percpu local_stats;
688                 unsigned int start;
689
690                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
691
692                 do {
693                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
694                         local_stats = *percpu_stats;
695                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
696
697                 stats->n_hit += local_stats.n_hit;
698                 stats->n_missed += local_stats.n_missed;
699                 stats->n_lost += local_stats.n_lost;
700                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
701         }
702 }
703
704 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
705 {
706         return ovs_identifier_is_ufid(sfid) &&
707                !(ufid_flags & OVS_UFID_F_OMIT_KEY);
708 }
709
710 static bool should_fill_mask(uint32_t ufid_flags)
711 {
712         return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
713 }
714
715 static bool should_fill_actions(uint32_t ufid_flags)
716 {
717         return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
718 }
719
720 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
721                                     const struct sw_flow_id *sfid,
722                                     uint32_t ufid_flags)
723 {
724         size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
725
726         /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
727          * see ovs_nla_put_identifier()
728          */
729         if (sfid && ovs_identifier_is_ufid(sfid))
730                 len += nla_total_size(sfid->ufid_len);
731         else
732                 len += nla_total_size(ovs_key_attr_size());
733
734         /* OVS_FLOW_ATTR_KEY */
735         if (!sfid || should_fill_key(sfid, ufid_flags))
736                 len += nla_total_size(ovs_key_attr_size());
737
738         /* OVS_FLOW_ATTR_MASK */
739         if (should_fill_mask(ufid_flags))
740                 len += nla_total_size(ovs_key_attr_size());
741
742         /* OVS_FLOW_ATTR_ACTIONS */
743         if (should_fill_actions(ufid_flags))
744                 len += nla_total_size(acts->orig_len);
745
746         return len
747                 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
748                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
749                 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
750 }
751
752 /* Called with ovs_mutex or RCU read lock. */
753 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
754                                    struct sk_buff *skb)
755 {
756         struct ovs_flow_stats stats;
757         __be16 tcp_flags;
758         unsigned long used;
759
760         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
761
762         if (used &&
763             nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
764                               OVS_FLOW_ATTR_PAD))
765                 return -EMSGSIZE;
766
767         if (stats.n_packets &&
768             nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
769                           sizeof(struct ovs_flow_stats), &stats,
770                           OVS_FLOW_ATTR_PAD))
771                 return -EMSGSIZE;
772
773         if ((u8)ntohs(tcp_flags) &&
774              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
775                 return -EMSGSIZE;
776
777         return 0;
778 }
779
780 /* Called with ovs_mutex or RCU read lock. */
781 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
782                                      struct sk_buff *skb, int skb_orig_len)
783 {
784         struct nlattr *start;
785         int err;
786
787         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
788          * this is the first flow to be dumped into 'skb'.  This is unusual for
789          * Netlink but individual action lists can be longer than
790          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
791          * The userspace caller can always fetch the actions separately if it
792          * really wants them.  (Most userspace callers in fact don't care.)
793          *
794          * This can only fail for dump operations because the skb is always
795          * properly sized for single flows.
796          */
797         start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
798         if (start) {
799                 const struct sw_flow_actions *sf_acts;
800
801                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
802                 err = ovs_nla_put_actions(sf_acts->actions,
803                                           sf_acts->actions_len, skb);
804
805                 if (!err)
806                         nla_nest_end(skb, start);
807                 else {
808                         if (skb_orig_len)
809                                 return err;
810
811                         nla_nest_cancel(skb, start);
812                 }
813         } else if (skb_orig_len) {
814                 return -EMSGSIZE;
815         }
816
817         return 0;
818 }
819
820 /* Called with ovs_mutex or RCU read lock. */
821 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
822                                   struct sk_buff *skb, u32 portid,
823                                   u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
824 {
825         const int skb_orig_len = skb->len;
826         struct ovs_header *ovs_header;
827         int err;
828
829         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
830                                  flags, cmd);
831         if (!ovs_header)
832                 return -EMSGSIZE;
833
834         ovs_header->dp_ifindex = dp_ifindex;
835
836         err = ovs_nla_put_identifier(flow, skb);
837         if (err)
838                 goto error;
839
840         if (should_fill_key(&flow->id, ufid_flags)) {
841                 err = ovs_nla_put_masked_key(flow, skb);
842                 if (err)
843                         goto error;
844         }
845
846         if (should_fill_mask(ufid_flags)) {
847                 err = ovs_nla_put_mask(flow, skb);
848                 if (err)
849                         goto error;
850         }
851
852         err = ovs_flow_cmd_fill_stats(flow, skb);
853         if (err)
854                 goto error;
855
856         if (should_fill_actions(ufid_flags)) {
857                 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
858                 if (err)
859                         goto error;
860         }
861
862         genlmsg_end(skb, ovs_header);
863         return 0;
864
865 error:
866         genlmsg_cancel(skb, ovs_header);
867         return err;
868 }
869
870 /* May not be called with RCU read lock. */
871 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
872                                                const struct sw_flow_id *sfid,
873                                                struct genl_info *info,
874                                                bool always,
875                                                uint32_t ufid_flags)
876 {
877         struct sk_buff *skb;
878         size_t len;
879
880         if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
881                 return NULL;
882
883         len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
884         skb = genlmsg_new(len, GFP_KERNEL);
885         if (!skb)
886                 return ERR_PTR(-ENOMEM);
887
888         return skb;
889 }
890
891 /* Called with ovs_mutex. */
892 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
893                                                int dp_ifindex,
894                                                struct genl_info *info, u8 cmd,
895                                                bool always, u32 ufid_flags)
896 {
897         struct sk_buff *skb;
898         int retval;
899
900         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
901                                       &flow->id, info, always, ufid_flags);
902         if (IS_ERR_OR_NULL(skb))
903                 return skb;
904
905         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
906                                         info->snd_portid, info->snd_seq, 0,
907                                         cmd, ufid_flags);
908         if (WARN_ON_ONCE(retval < 0)) {
909                 kfree_skb(skb);
910                 skb = ERR_PTR(retval);
911         }
912         return skb;
913 }
914
915 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
916 {
917         struct net *net = sock_net(skb->sk);
918         struct nlattr **a = info->attrs;
919         struct ovs_header *ovs_header = info->userhdr;
920         struct sw_flow *flow = NULL, *new_flow;
921         struct sw_flow_mask mask;
922         struct sk_buff *reply;
923         struct datapath *dp;
924         struct sw_flow_actions *acts;
925         struct sw_flow_match match;
926         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
927         int error;
928         bool log = !a[OVS_FLOW_ATTR_PROBE];
929
930         /* Must have key and actions. */
931         error = -EINVAL;
932         if (!a[OVS_FLOW_ATTR_KEY]) {
933                 OVS_NLERR(log, "Flow key attr not present in new flow.");
934                 goto error;
935         }
936         if (!a[OVS_FLOW_ATTR_ACTIONS]) {
937                 OVS_NLERR(log, "Flow actions attr not present in new flow.");
938                 goto error;
939         }
940
941         /* Most of the time we need to allocate a new flow, do it before
942          * locking.
943          */
944         new_flow = ovs_flow_alloc();
945         if (IS_ERR(new_flow)) {
946                 error = PTR_ERR(new_flow);
947                 goto error;
948         }
949
950         /* Extract key. */
951         ovs_match_init(&match, &new_flow->key, false, &mask);
952         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
953                                   a[OVS_FLOW_ATTR_MASK], log);
954         if (error)
955                 goto err_kfree_flow;
956
957         /* Extract flow identifier. */
958         error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
959                                        &new_flow->key, log);
960         if (error)
961                 goto err_kfree_flow;
962
963         /* unmasked key is needed to match when ufid is not used. */
964         if (ovs_identifier_is_key(&new_flow->id))
965                 match.key = new_flow->id.unmasked_key;
966
967         ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
968
969         /* Validate actions. */
970         error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
971                                      &new_flow->key, &acts, log);
972         if (error) {
973                 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
974                 goto err_kfree_flow;
975         }
976
977         reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
978                                         ufid_flags);
979         if (IS_ERR(reply)) {
980                 error = PTR_ERR(reply);
981                 goto err_kfree_acts;
982         }
983
984         ovs_lock();
985         dp = get_dp(net, ovs_header->dp_ifindex);
986         if (unlikely(!dp)) {
987                 error = -ENODEV;
988                 goto err_unlock_ovs;
989         }
990
991         /* Check if this is a duplicate flow */
992         if (ovs_identifier_is_ufid(&new_flow->id))
993                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
994         if (!flow)
995                 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
996         if (likely(!flow)) {
997                 rcu_assign_pointer(new_flow->sf_acts, acts);
998
999                 /* Put flow in bucket. */
1000                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1001                 if (unlikely(error)) {
1002                         acts = NULL;
1003                         goto err_unlock_ovs;
1004                 }
1005
1006                 if (unlikely(reply)) {
1007                         error = ovs_flow_cmd_fill_info(new_flow,
1008                                                        ovs_header->dp_ifindex,
1009                                                        reply, info->snd_portid,
1010                                                        info->snd_seq, 0,
1011                                                        OVS_FLOW_CMD_NEW,
1012                                                        ufid_flags);
1013                         BUG_ON(error < 0);
1014                 }
1015                 ovs_unlock();
1016         } else {
1017                 struct sw_flow_actions *old_acts;
1018
1019                 /* Bail out if we're not allowed to modify an existing flow.
1020                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1021                  * because Generic Netlink treats the latter as a dump
1022                  * request.  We also accept NLM_F_EXCL in case that bug ever
1023                  * gets fixed.
1024                  */
1025                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1026                                                          | NLM_F_EXCL))) {
1027                         error = -EEXIST;
1028                         goto err_unlock_ovs;
1029                 }
1030                 /* The flow identifier has to be the same for flow updates.
1031                  * Look for any overlapping flow.
1032                  */
1033                 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1034                         if (ovs_identifier_is_key(&flow->id))
1035                                 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1036                                                                  &match);
1037                         else /* UFID matches but key is different */
1038                                 flow = NULL;
1039                         if (!flow) {
1040                                 error = -ENOENT;
1041                                 goto err_unlock_ovs;
1042                         }
1043                 }
1044                 /* Update actions. */
1045                 old_acts = ovsl_dereference(flow->sf_acts);
1046                 rcu_assign_pointer(flow->sf_acts, acts);
1047
1048                 if (unlikely(reply)) {
1049                         error = ovs_flow_cmd_fill_info(flow,
1050                                                        ovs_header->dp_ifindex,
1051                                                        reply, info->snd_portid,
1052                                                        info->snd_seq, 0,
1053                                                        OVS_FLOW_CMD_NEW,
1054                                                        ufid_flags);
1055                         BUG_ON(error < 0);
1056                 }
1057                 ovs_unlock();
1058
1059                 ovs_nla_free_flow_actions_rcu(old_acts);
1060                 ovs_flow_free(new_flow, false);
1061         }
1062
1063         if (reply)
1064                 ovs_notify(&dp_flow_genl_family, reply, info);
1065         return 0;
1066
1067 err_unlock_ovs:
1068         ovs_unlock();
1069         kfree_skb(reply);
1070 err_kfree_acts:
1071         ovs_nla_free_flow_actions(acts);
1072 err_kfree_flow:
1073         ovs_flow_free(new_flow, false);
1074 error:
1075         return error;
1076 }
1077
1078 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1079 static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net,
1080                                                 const struct nlattr *a,
1081                                                 const struct sw_flow_key *key,
1082                                                 const struct sw_flow_mask *mask,
1083                                                 bool log)
1084 {
1085         struct sw_flow_actions *acts;
1086         struct sw_flow_key masked_key;
1087         int error;
1088
1089         ovs_flow_mask_key(&masked_key, key, true, mask);
1090         error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1091         if (error) {
1092                 OVS_NLERR(log,
1093                           "Actions may not be safe on all matching packets");
1094                 return ERR_PTR(error);
1095         }
1096
1097         return acts;
1098 }
1099
1100 /* Factor out match-init and action-copy to avoid
1101  * "Wframe-larger-than=1024" warning. Because mask is only
1102  * used to get actions, we new a function to save some
1103  * stack space.
1104  *
1105  * If there are not key and action attrs, we return 0
1106  * directly. In the case, the caller will also not use the
1107  * match as before. If there is action attr, we try to get
1108  * actions and save them to *acts. Before returning from
1109  * the function, we reset the match->mask pointer. Because
1110  * we should not to return match object with dangling reference
1111  * to mask.
1112  * */
1113 static noinline_for_stack int
1114 ovs_nla_init_match_and_action(struct net *net,
1115                               struct sw_flow_match *match,
1116                               struct sw_flow_key *key,
1117                               struct nlattr **a,
1118                               struct sw_flow_actions **acts,
1119                               bool log)
1120 {
1121         struct sw_flow_mask mask;
1122         int error = 0;
1123
1124         if (a[OVS_FLOW_ATTR_KEY]) {
1125                 ovs_match_init(match, key, true, &mask);
1126                 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1127                                           a[OVS_FLOW_ATTR_MASK], log);
1128                 if (error)
1129                         goto error;
1130         }
1131
1132         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1133                 if (!a[OVS_FLOW_ATTR_KEY]) {
1134                         OVS_NLERR(log,
1135                                   "Flow key attribute not present in set flow.");
1136                         error = -EINVAL;
1137                         goto error;
1138                 }
1139
1140                 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1141                                          &mask, log);
1142                 if (IS_ERR(*acts)) {
1143                         error = PTR_ERR(*acts);
1144                         goto error;
1145                 }
1146         }
1147
1148         /* On success, error is 0. */
1149 error:
1150         match->mask = NULL;
1151         return error;
1152 }
1153
1154 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1155 {
1156         struct net *net = sock_net(skb->sk);
1157         struct nlattr **a = info->attrs;
1158         struct ovs_header *ovs_header = info->userhdr;
1159         struct sw_flow_key key;
1160         struct sw_flow *flow;
1161         struct sk_buff *reply = NULL;
1162         struct datapath *dp;
1163         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1164         struct sw_flow_match match;
1165         struct sw_flow_id sfid;
1166         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1167         int error = 0;
1168         bool log = !a[OVS_FLOW_ATTR_PROBE];
1169         bool ufid_present;
1170
1171         ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1172         if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1173                 OVS_NLERR(log,
1174                           "Flow set message rejected, Key attribute missing.");
1175                 return -EINVAL;
1176         }
1177
1178         error = ovs_nla_init_match_and_action(net, &match, &key, a,
1179                                               &acts, log);
1180         if (error)
1181                 goto error;
1182
1183         if (acts) {
1184                 /* Can allocate before locking if have acts. */
1185                 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1186                                                 ufid_flags);
1187                 if (IS_ERR(reply)) {
1188                         error = PTR_ERR(reply);
1189                         goto err_kfree_acts;
1190                 }
1191         }
1192
1193         ovs_lock();
1194         dp = get_dp(net, ovs_header->dp_ifindex);
1195         if (unlikely(!dp)) {
1196                 error = -ENODEV;
1197                 goto err_unlock_ovs;
1198         }
1199         /* Check that the flow exists. */
1200         if (ufid_present)
1201                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1202         else
1203                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1204         if (unlikely(!flow)) {
1205                 error = -ENOENT;
1206                 goto err_unlock_ovs;
1207         }
1208
1209         /* Update actions, if present. */
1210         if (likely(acts)) {
1211                 old_acts = ovsl_dereference(flow->sf_acts);
1212                 rcu_assign_pointer(flow->sf_acts, acts);
1213
1214                 if (unlikely(reply)) {
1215                         error = ovs_flow_cmd_fill_info(flow,
1216                                                        ovs_header->dp_ifindex,
1217                                                        reply, info->snd_portid,
1218                                                        info->snd_seq, 0,
1219                                                        OVS_FLOW_CMD_SET,
1220                                                        ufid_flags);
1221                         BUG_ON(error < 0);
1222                 }
1223         } else {
1224                 /* Could not alloc without acts before locking. */
1225                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1226                                                 info, OVS_FLOW_CMD_SET, false,
1227                                                 ufid_flags);
1228
1229                 if (IS_ERR(reply)) {
1230                         error = PTR_ERR(reply);
1231                         goto err_unlock_ovs;
1232                 }
1233         }
1234
1235         /* Clear stats. */
1236         if (a[OVS_FLOW_ATTR_CLEAR])
1237                 ovs_flow_stats_clear(flow);
1238         ovs_unlock();
1239
1240         if (reply)
1241                 ovs_notify(&dp_flow_genl_family, reply, info);
1242         if (old_acts)
1243                 ovs_nla_free_flow_actions_rcu(old_acts);
1244
1245         return 0;
1246
1247 err_unlock_ovs:
1248         ovs_unlock();
1249         kfree_skb(reply);
1250 err_kfree_acts:
1251         ovs_nla_free_flow_actions(acts);
1252 error:
1253         return error;
1254 }
1255
1256 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1257 {
1258         struct nlattr **a = info->attrs;
1259         struct ovs_header *ovs_header = info->userhdr;
1260         struct net *net = sock_net(skb->sk);
1261         struct sw_flow_key key;
1262         struct sk_buff *reply;
1263         struct sw_flow *flow;
1264         struct datapath *dp;
1265         struct sw_flow_match match;
1266         struct sw_flow_id ufid;
1267         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1268         int err = 0;
1269         bool log = !a[OVS_FLOW_ATTR_PROBE];
1270         bool ufid_present;
1271
1272         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1273         if (a[OVS_FLOW_ATTR_KEY]) {
1274                 ovs_match_init(&match, &key, true, NULL);
1275                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1276                                         log);
1277         } else if (!ufid_present) {
1278                 OVS_NLERR(log,
1279                           "Flow get message rejected, Key attribute missing.");
1280                 err = -EINVAL;
1281         }
1282         if (err)
1283                 return err;
1284
1285         ovs_lock();
1286         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1287         if (!dp) {
1288                 err = -ENODEV;
1289                 goto unlock;
1290         }
1291
1292         if (ufid_present)
1293                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1294         else
1295                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1296         if (!flow) {
1297                 err = -ENOENT;
1298                 goto unlock;
1299         }
1300
1301         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1302                                         OVS_FLOW_CMD_GET, true, ufid_flags);
1303         if (IS_ERR(reply)) {
1304                 err = PTR_ERR(reply);
1305                 goto unlock;
1306         }
1307
1308         ovs_unlock();
1309         return genlmsg_reply(reply, info);
1310 unlock:
1311         ovs_unlock();
1312         return err;
1313 }
1314
1315 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1316 {
1317         struct nlattr **a = info->attrs;
1318         struct ovs_header *ovs_header = info->userhdr;
1319         struct net *net = sock_net(skb->sk);
1320         struct sw_flow_key key;
1321         struct sk_buff *reply;
1322         struct sw_flow *flow = NULL;
1323         struct datapath *dp;
1324         struct sw_flow_match match;
1325         struct sw_flow_id ufid;
1326         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1327         int err;
1328         bool log = !a[OVS_FLOW_ATTR_PROBE];
1329         bool ufid_present;
1330
1331         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1332         if (a[OVS_FLOW_ATTR_KEY]) {
1333                 ovs_match_init(&match, &key, true, NULL);
1334                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1335                                         NULL, log);
1336                 if (unlikely(err))
1337                         return err;
1338         }
1339
1340         ovs_lock();
1341         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1342         if (unlikely(!dp)) {
1343                 err = -ENODEV;
1344                 goto unlock;
1345         }
1346
1347         if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1348                 err = ovs_flow_tbl_flush(&dp->table);
1349                 goto unlock;
1350         }
1351
1352         if (ufid_present)
1353                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1354         else
1355                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1356         if (unlikely(!flow)) {
1357                 err = -ENOENT;
1358                 goto unlock;
1359         }
1360
1361         ovs_flow_tbl_remove(&dp->table, flow);
1362         ovs_unlock();
1363
1364         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1365                                         &flow->id, info, false, ufid_flags);
1366         if (likely(reply)) {
1367                 if (!IS_ERR(reply)) {
1368                         rcu_read_lock();        /*To keep RCU checker happy. */
1369                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1370                                                      reply, info->snd_portid,
1371                                                      info->snd_seq, 0,
1372                                                      OVS_FLOW_CMD_DEL,
1373                                                      ufid_flags);
1374                         rcu_read_unlock();
1375                         BUG_ON(err < 0);
1376
1377                         ovs_notify(&dp_flow_genl_family, reply, info);
1378                 } else {
1379                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1380                 }
1381         }
1382
1383         ovs_flow_free(flow, true);
1384         return 0;
1385 unlock:
1386         ovs_unlock();
1387         return err;
1388 }
1389
1390 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1391 {
1392         struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1393         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1394         struct table_instance *ti;
1395         struct datapath *dp;
1396         u32 ufid_flags;
1397         int err;
1398
1399         err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
1400                                        OVS_FLOW_ATTR_MAX, flow_policy, NULL);
1401         if (err)
1402                 return err;
1403         ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1404
1405         rcu_read_lock();
1406         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1407         if (!dp) {
1408                 rcu_read_unlock();
1409                 return -ENODEV;
1410         }
1411
1412         ti = rcu_dereference(dp->table.ti);
1413         for (;;) {
1414                 struct sw_flow *flow;
1415                 u32 bucket, obj;
1416
1417                 bucket = cb->args[0];
1418                 obj = cb->args[1];
1419                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1420                 if (!flow)
1421                         break;
1422
1423                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1424                                            NETLINK_CB(cb->skb).portid,
1425                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1426                                            OVS_FLOW_CMD_GET, ufid_flags) < 0)
1427                         break;
1428
1429                 cb->args[0] = bucket;
1430                 cb->args[1] = obj;
1431         }
1432         rcu_read_unlock();
1433         return skb->len;
1434 }
1435
1436 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1437         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1438         [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1439         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1440         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1441         [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1442         [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1443         [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1444 };
1445
1446 static const struct genl_ops dp_flow_genl_ops[] = {
1447         { .cmd = OVS_FLOW_CMD_NEW,
1448           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1449           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1450           .doit = ovs_flow_cmd_new
1451         },
1452         { .cmd = OVS_FLOW_CMD_DEL,
1453           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1454           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1455           .doit = ovs_flow_cmd_del
1456         },
1457         { .cmd = OVS_FLOW_CMD_GET,
1458           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1459           .flags = 0,               /* OK for unprivileged users. */
1460           .doit = ovs_flow_cmd_get,
1461           .dumpit = ovs_flow_cmd_dump
1462         },
1463         { .cmd = OVS_FLOW_CMD_SET,
1464           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1465           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1466           .doit = ovs_flow_cmd_set,
1467         },
1468 };
1469
1470 static struct genl_family dp_flow_genl_family __ro_after_init = {
1471         .hdrsize = sizeof(struct ovs_header),
1472         .name = OVS_FLOW_FAMILY,
1473         .version = OVS_FLOW_VERSION,
1474         .maxattr = OVS_FLOW_ATTR_MAX,
1475         .policy = flow_policy,
1476         .netnsok = true,
1477         .parallel_ops = true,
1478         .ops = dp_flow_genl_ops,
1479         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1480         .mcgrps = &ovs_dp_flow_multicast_group,
1481         .n_mcgrps = 1,
1482         .module = THIS_MODULE,
1483 };
1484
1485 static size_t ovs_dp_cmd_msg_size(void)
1486 {
1487         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1488
1489         msgsize += nla_total_size(IFNAMSIZ);
1490         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1491         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1492         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1493
1494         return msgsize;
1495 }
1496
1497 /* Called with ovs_mutex. */
1498 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1499                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1500 {
1501         struct ovs_header *ovs_header;
1502         struct ovs_dp_stats dp_stats;
1503         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1504         int err;
1505
1506         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1507                                    flags, cmd);
1508         if (!ovs_header)
1509                 goto error;
1510
1511         ovs_header->dp_ifindex = get_dpifindex(dp);
1512
1513         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1514         if (err)
1515                 goto nla_put_failure;
1516
1517         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1518         if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1519                           &dp_stats, OVS_DP_ATTR_PAD))
1520                 goto nla_put_failure;
1521
1522         if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1523                           sizeof(struct ovs_dp_megaflow_stats),
1524                           &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1525                 goto nla_put_failure;
1526
1527         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1528                 goto nla_put_failure;
1529
1530         genlmsg_end(skb, ovs_header);
1531         return 0;
1532
1533 nla_put_failure:
1534         genlmsg_cancel(skb, ovs_header);
1535 error:
1536         return -EMSGSIZE;
1537 }
1538
1539 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1540 {
1541         return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1542 }
1543
1544 /* Called with rcu_read_lock or ovs_mutex. */
1545 static struct datapath *lookup_datapath(struct net *net,
1546                                         const struct ovs_header *ovs_header,
1547                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1548 {
1549         struct datapath *dp;
1550
1551         if (!a[OVS_DP_ATTR_NAME])
1552                 dp = get_dp(net, ovs_header->dp_ifindex);
1553         else {
1554                 struct vport *vport;
1555
1556                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1557                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1558         }
1559         return dp ? dp : ERR_PTR(-ENODEV);
1560 }
1561
1562 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1563 {
1564         struct datapath *dp;
1565
1566         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1567         if (IS_ERR(dp))
1568                 return;
1569
1570         WARN(dp->user_features, "Dropping previously announced user features\n");
1571         dp->user_features = 0;
1572 }
1573
1574 DEFINE_STATIC_KEY_FALSE(tc_recirc_sharing_support);
1575
1576 static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1577 {
1578         u32 user_features = 0;
1579
1580         if (a[OVS_DP_ATTR_USER_FEATURES]) {
1581                 user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1582
1583                 if (user_features & ~(OVS_DP_F_VPORT_PIDS |
1584                                       OVS_DP_F_UNALIGNED |
1585                                       OVS_DP_F_TC_RECIRC_SHARING))
1586                         return -EOPNOTSUPP;
1587
1588 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1589                 if (user_features & OVS_DP_F_TC_RECIRC_SHARING)
1590                         return -EOPNOTSUPP;
1591 #endif
1592         }
1593
1594         dp->user_features = user_features;
1595
1596         if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
1597                 static_branch_enable(&tc_recirc_sharing_support);
1598         else
1599                 static_branch_disable(&tc_recirc_sharing_support);
1600
1601         return 0;
1602 }
1603
1604 static int ovs_dp_stats_init(struct datapath *dp)
1605 {
1606         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1607         if (!dp->stats_percpu)
1608                 return -ENOMEM;
1609
1610         return 0;
1611 }
1612
1613 static int ovs_dp_vport_init(struct datapath *dp)
1614 {
1615         int i;
1616
1617         dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1618                                   sizeof(struct hlist_head),
1619                                   GFP_KERNEL);
1620         if (!dp->ports)
1621                 return -ENOMEM;
1622
1623         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1624                 INIT_HLIST_HEAD(&dp->ports[i]);
1625
1626         return 0;
1627 }
1628
1629 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1630 {
1631         struct nlattr **a = info->attrs;
1632         struct vport_parms parms;
1633         struct sk_buff *reply;
1634         struct datapath *dp;
1635         struct vport *vport;
1636         struct ovs_net *ovs_net;
1637         int err;
1638
1639         err = -EINVAL;
1640         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1641                 goto err;
1642
1643         reply = ovs_dp_cmd_alloc_info();
1644         if (!reply)
1645                 return -ENOMEM;
1646
1647         err = -ENOMEM;
1648         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1649         if (dp == NULL)
1650                 goto err_destroy_reply;
1651
1652         ovs_dp_set_net(dp, sock_net(skb->sk));
1653
1654         /* Allocate table. */
1655         err = ovs_flow_tbl_init(&dp->table);
1656         if (err)
1657                 goto err_destroy_dp;
1658
1659         err = ovs_dp_stats_init(dp);
1660         if (err)
1661                 goto err_destroy_table;
1662
1663         err = ovs_dp_vport_init(dp);
1664         if (err)
1665                 goto err_destroy_stats;
1666
1667         err = ovs_meters_init(dp);
1668         if (err)
1669                 goto err_destroy_ports;
1670
1671         /* Set up our datapath device. */
1672         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1673         parms.type = OVS_VPORT_TYPE_INTERNAL;
1674         parms.options = NULL;
1675         parms.dp = dp;
1676         parms.port_no = OVSP_LOCAL;
1677         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1678
1679         err = ovs_dp_change(dp, a);
1680         if (err)
1681                 goto err_destroy_meters;
1682
1683         /* So far only local changes have been made, now need the lock. */
1684         ovs_lock();
1685
1686         vport = new_vport(&parms);
1687         if (IS_ERR(vport)) {
1688                 err = PTR_ERR(vport);
1689                 if (err == -EBUSY)
1690                         err = -EEXIST;
1691
1692                 if (err == -EEXIST) {
1693                         /* An outdated user space instance that does not understand
1694                          * the concept of user_features has attempted to create a new
1695                          * datapath and is likely to reuse it. Drop all user features.
1696                          */
1697                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1698                                 ovs_dp_reset_user_features(skb, info);
1699                 }
1700
1701                 ovs_unlock();
1702                 goto err_destroy_meters;
1703         }
1704
1705         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1706                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1707         BUG_ON(err < 0);
1708
1709         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1710         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1711
1712         ovs_unlock();
1713
1714         ovs_notify(&dp_datapath_genl_family, reply, info);
1715         return 0;
1716
1717 err_destroy_meters:
1718         ovs_meters_exit(dp);
1719 err_destroy_ports:
1720         kfree(dp->ports);
1721 err_destroy_stats:
1722         free_percpu(dp->stats_percpu);
1723 err_destroy_table:
1724         ovs_flow_tbl_destroy(&dp->table);
1725 err_destroy_dp:
1726         kfree(dp);
1727 err_destroy_reply:
1728         kfree_skb(reply);
1729 err:
1730         return err;
1731 }
1732
1733 /* Called with ovs_mutex. */
1734 static void __dp_destroy(struct datapath *dp)
1735 {
1736         int i;
1737
1738         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1739                 struct vport *vport;
1740                 struct hlist_node *n;
1741
1742                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1743                         if (vport->port_no != OVSP_LOCAL)
1744                                 ovs_dp_detach_port(vport);
1745         }
1746
1747         list_del_rcu(&dp->list_node);
1748
1749         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1750          * all ports in datapath are destroyed first before freeing datapath.
1751          */
1752         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1753
1754         /* RCU destroy the flow table */
1755         call_rcu(&dp->rcu, destroy_dp_rcu);
1756 }
1757
1758 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1759 {
1760         struct sk_buff *reply;
1761         struct datapath *dp;
1762         int err;
1763
1764         reply = ovs_dp_cmd_alloc_info();
1765         if (!reply)
1766                 return -ENOMEM;
1767
1768         ovs_lock();
1769         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1770         err = PTR_ERR(dp);
1771         if (IS_ERR(dp))
1772                 goto err_unlock_free;
1773
1774         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1775                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1776         BUG_ON(err < 0);
1777
1778         __dp_destroy(dp);
1779         ovs_unlock();
1780
1781         ovs_notify(&dp_datapath_genl_family, reply, info);
1782
1783         return 0;
1784
1785 err_unlock_free:
1786         ovs_unlock();
1787         kfree_skb(reply);
1788         return err;
1789 }
1790
1791 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1792 {
1793         struct sk_buff *reply;
1794         struct datapath *dp;
1795         int err;
1796
1797         reply = ovs_dp_cmd_alloc_info();
1798         if (!reply)
1799                 return -ENOMEM;
1800
1801         ovs_lock();
1802         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1803         err = PTR_ERR(dp);
1804         if (IS_ERR(dp))
1805                 goto err_unlock_free;
1806
1807         err = ovs_dp_change(dp, info->attrs);
1808         if (err)
1809                 goto err_unlock_free;
1810
1811         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1812                                    info->snd_seq, 0, OVS_DP_CMD_SET);
1813         BUG_ON(err < 0);
1814
1815         ovs_unlock();
1816         ovs_notify(&dp_datapath_genl_family, reply, info);
1817
1818         return 0;
1819
1820 err_unlock_free:
1821         ovs_unlock();
1822         kfree_skb(reply);
1823         return err;
1824 }
1825
1826 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1827 {
1828         struct sk_buff *reply;
1829         struct datapath *dp;
1830         int err;
1831
1832         reply = ovs_dp_cmd_alloc_info();
1833         if (!reply)
1834                 return -ENOMEM;
1835
1836         ovs_lock();
1837         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1838         if (IS_ERR(dp)) {
1839                 err = PTR_ERR(dp);
1840                 goto err_unlock_free;
1841         }
1842         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1843                                    info->snd_seq, 0, OVS_DP_CMD_GET);
1844         BUG_ON(err < 0);
1845         ovs_unlock();
1846
1847         return genlmsg_reply(reply, info);
1848
1849 err_unlock_free:
1850         ovs_unlock();
1851         kfree_skb(reply);
1852         return err;
1853 }
1854
1855 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1856 {
1857         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1858         struct datapath *dp;
1859         int skip = cb->args[0];
1860         int i = 0;
1861
1862         ovs_lock();
1863         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1864                 if (i >= skip &&
1865                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1866                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1867                                          OVS_DP_CMD_GET) < 0)
1868                         break;
1869                 i++;
1870         }
1871         ovs_unlock();
1872
1873         cb->args[0] = i;
1874
1875         return skb->len;
1876 }
1877
1878 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1879         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1880         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1881         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1882 };
1883
1884 static const struct genl_ops dp_datapath_genl_ops[] = {
1885         { .cmd = OVS_DP_CMD_NEW,
1886           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1887           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1888           .doit = ovs_dp_cmd_new
1889         },
1890         { .cmd = OVS_DP_CMD_DEL,
1891           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1892           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1893           .doit = ovs_dp_cmd_del
1894         },
1895         { .cmd = OVS_DP_CMD_GET,
1896           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1897           .flags = 0,               /* OK for unprivileged users. */
1898           .doit = ovs_dp_cmd_get,
1899           .dumpit = ovs_dp_cmd_dump
1900         },
1901         { .cmd = OVS_DP_CMD_SET,
1902           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1903           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1904           .doit = ovs_dp_cmd_set,
1905         },
1906 };
1907
1908 static struct genl_family dp_datapath_genl_family __ro_after_init = {
1909         .hdrsize = sizeof(struct ovs_header),
1910         .name = OVS_DATAPATH_FAMILY,
1911         .version = OVS_DATAPATH_VERSION,
1912         .maxattr = OVS_DP_ATTR_MAX,
1913         .policy = datapath_policy,
1914         .netnsok = true,
1915         .parallel_ops = true,
1916         .ops = dp_datapath_genl_ops,
1917         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1918         .mcgrps = &ovs_dp_datapath_multicast_group,
1919         .n_mcgrps = 1,
1920         .module = THIS_MODULE,
1921 };
1922
1923 /* Called with ovs_mutex or RCU read lock. */
1924 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1925                                    struct net *net, u32 portid, u32 seq,
1926                                    u32 flags, u8 cmd, gfp_t gfp)
1927 {
1928         struct ovs_header *ovs_header;
1929         struct ovs_vport_stats vport_stats;
1930         int err;
1931
1932         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1933                                  flags, cmd);
1934         if (!ovs_header)
1935                 return -EMSGSIZE;
1936
1937         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1938
1939         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1940             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1941             nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1942                            ovs_vport_name(vport)) ||
1943             nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
1944                 goto nla_put_failure;
1945
1946         if (!net_eq(net, dev_net(vport->dev))) {
1947                 int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
1948
1949                 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1950                         goto nla_put_failure;
1951         }
1952
1953         ovs_vport_get_stats(vport, &vport_stats);
1954         if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1955                           sizeof(struct ovs_vport_stats), &vport_stats,
1956                           OVS_VPORT_ATTR_PAD))
1957                 goto nla_put_failure;
1958
1959         if (ovs_vport_get_upcall_portids(vport, skb))
1960                 goto nla_put_failure;
1961
1962         err = ovs_vport_get_options(vport, skb);
1963         if (err == -EMSGSIZE)
1964                 goto error;
1965
1966         genlmsg_end(skb, ovs_header);
1967         return 0;
1968
1969 nla_put_failure:
1970         err = -EMSGSIZE;
1971 error:
1972         genlmsg_cancel(skb, ovs_header);
1973         return err;
1974 }
1975
1976 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1977 {
1978         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1979 }
1980
1981 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1982 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
1983                                          u32 portid, u32 seq, u8 cmd)
1984 {
1985         struct sk_buff *skb;
1986         int retval;
1987
1988         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1989         if (!skb)
1990                 return ERR_PTR(-ENOMEM);
1991
1992         retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
1993                                          GFP_KERNEL);
1994         BUG_ON(retval < 0);
1995
1996         return skb;
1997 }
1998
1999 /* Called with ovs_mutex or RCU read lock. */
2000 static struct vport *lookup_vport(struct net *net,
2001                                   const struct ovs_header *ovs_header,
2002                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2003 {
2004         struct datapath *dp;
2005         struct vport *vport;
2006
2007         if (a[OVS_VPORT_ATTR_IFINDEX])
2008                 return ERR_PTR(-EOPNOTSUPP);
2009         if (a[OVS_VPORT_ATTR_NAME]) {
2010                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
2011                 if (!vport)
2012                         return ERR_PTR(-ENODEV);
2013                 if (ovs_header->dp_ifindex &&
2014                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2015                         return ERR_PTR(-ENODEV);
2016                 return vport;
2017         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
2018                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2019
2020                 if (port_no >= DP_MAX_PORTS)
2021                         return ERR_PTR(-EFBIG);
2022
2023                 dp = get_dp(net, ovs_header->dp_ifindex);
2024                 if (!dp)
2025                         return ERR_PTR(-ENODEV);
2026
2027                 vport = ovs_vport_ovsl_rcu(dp, port_no);
2028                 if (!vport)
2029                         return ERR_PTR(-ENODEV);
2030                 return vport;
2031         } else
2032                 return ERR_PTR(-EINVAL);
2033
2034 }
2035
2036 static unsigned int ovs_get_max_headroom(struct datapath *dp)
2037 {
2038         unsigned int dev_headroom, max_headroom = 0;
2039         struct net_device *dev;
2040         struct vport *vport;
2041         int i;
2042
2043         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2044                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2045                         dev = vport->dev;
2046                         dev_headroom = netdev_get_fwd_headroom(dev);
2047                         if (dev_headroom > max_headroom)
2048                                 max_headroom = dev_headroom;
2049                 }
2050         }
2051
2052         return max_headroom;
2053 }
2054
2055 /* Called with ovs_mutex */
2056 static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
2057 {
2058         struct vport *vport;
2059         int i;
2060
2061         dp->max_headroom = new_headroom;
2062         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
2063                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
2064                         netdev_set_rx_headroom(vport->dev, new_headroom);
2065 }
2066
2067 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2068 {
2069         struct nlattr **a = info->attrs;
2070         struct ovs_header *ovs_header = info->userhdr;
2071         struct vport_parms parms;
2072         struct sk_buff *reply;
2073         struct vport *vport;
2074         struct datapath *dp;
2075         unsigned int new_headroom;
2076         u32 port_no;
2077         int err;
2078
2079         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2080             !a[OVS_VPORT_ATTR_UPCALL_PID])
2081                 return -EINVAL;
2082         if (a[OVS_VPORT_ATTR_IFINDEX])
2083                 return -EOPNOTSUPP;
2084
2085         port_no = a[OVS_VPORT_ATTR_PORT_NO]
2086                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2087         if (port_no >= DP_MAX_PORTS)
2088                 return -EFBIG;
2089
2090         reply = ovs_vport_cmd_alloc_info();
2091         if (!reply)
2092                 return -ENOMEM;
2093
2094         ovs_lock();
2095 restart:
2096         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2097         err = -ENODEV;
2098         if (!dp)
2099                 goto exit_unlock_free;
2100
2101         if (port_no) {
2102                 vport = ovs_vport_ovsl(dp, port_no);
2103                 err = -EBUSY;
2104                 if (vport)
2105                         goto exit_unlock_free;
2106         } else {
2107                 for (port_no = 1; ; port_no++) {
2108                         if (port_no >= DP_MAX_PORTS) {
2109                                 err = -EFBIG;
2110                                 goto exit_unlock_free;
2111                         }
2112                         vport = ovs_vport_ovsl(dp, port_no);
2113                         if (!vport)
2114                                 break;
2115                 }
2116         }
2117
2118         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2119         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2120         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2121         parms.dp = dp;
2122         parms.port_no = port_no;
2123         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2124
2125         vport = new_vport(&parms);
2126         err = PTR_ERR(vport);
2127         if (IS_ERR(vport)) {
2128                 if (err == -EAGAIN)
2129                         goto restart;
2130                 goto exit_unlock_free;
2131         }
2132
2133         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2134                                       info->snd_portid, info->snd_seq, 0,
2135                                       OVS_VPORT_CMD_NEW, GFP_KERNEL);
2136
2137         new_headroom = netdev_get_fwd_headroom(vport->dev);
2138
2139         if (new_headroom > dp->max_headroom)
2140                 ovs_update_headroom(dp, new_headroom);
2141         else
2142                 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2143
2144         BUG_ON(err < 0);
2145         ovs_unlock();
2146
2147         ovs_notify(&dp_vport_genl_family, reply, info);
2148         return 0;
2149
2150 exit_unlock_free:
2151         ovs_unlock();
2152         kfree_skb(reply);
2153         return err;
2154 }
2155
2156 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2157 {
2158         struct nlattr **a = info->attrs;
2159         struct sk_buff *reply;
2160         struct vport *vport;
2161         int err;
2162
2163         reply = ovs_vport_cmd_alloc_info();
2164         if (!reply)
2165                 return -ENOMEM;
2166
2167         ovs_lock();
2168         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2169         err = PTR_ERR(vport);
2170         if (IS_ERR(vport))
2171                 goto exit_unlock_free;
2172
2173         if (a[OVS_VPORT_ATTR_TYPE] &&
2174             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2175                 err = -EINVAL;
2176                 goto exit_unlock_free;
2177         }
2178
2179         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2180                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2181                 if (err)
2182                         goto exit_unlock_free;
2183         }
2184
2185
2186         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2187                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2188
2189                 err = ovs_vport_set_upcall_portids(vport, ids);
2190                 if (err)
2191                         goto exit_unlock_free;
2192         }
2193
2194         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2195                                       info->snd_portid, info->snd_seq, 0,
2196                                       OVS_VPORT_CMD_SET, GFP_KERNEL);
2197         BUG_ON(err < 0);
2198
2199         ovs_unlock();
2200         ovs_notify(&dp_vport_genl_family, reply, info);
2201         return 0;
2202
2203 exit_unlock_free:
2204         ovs_unlock();
2205         kfree_skb(reply);
2206         return err;
2207 }
2208
2209 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2210 {
2211         bool update_headroom = false;
2212         struct nlattr **a = info->attrs;
2213         struct sk_buff *reply;
2214         struct datapath *dp;
2215         struct vport *vport;
2216         unsigned int new_headroom;
2217         int err;
2218
2219         reply = ovs_vport_cmd_alloc_info();
2220         if (!reply)
2221                 return -ENOMEM;
2222
2223         ovs_lock();
2224         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2225         err = PTR_ERR(vport);
2226         if (IS_ERR(vport))
2227                 goto exit_unlock_free;
2228
2229         if (vport->port_no == OVSP_LOCAL) {
2230                 err = -EINVAL;
2231                 goto exit_unlock_free;
2232         }
2233
2234         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2235                                       info->snd_portid, info->snd_seq, 0,
2236                                       OVS_VPORT_CMD_DEL, GFP_KERNEL);
2237         BUG_ON(err < 0);
2238
2239         /* the vport deletion may trigger dp headroom update */
2240         dp = vport->dp;
2241         if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2242                 update_headroom = true;
2243
2244         netdev_reset_rx_headroom(vport->dev);
2245         ovs_dp_detach_port(vport);
2246
2247         if (update_headroom) {
2248                 new_headroom = ovs_get_max_headroom(dp);
2249
2250                 if (new_headroom < dp->max_headroom)
2251                         ovs_update_headroom(dp, new_headroom);
2252         }
2253         ovs_unlock();
2254
2255         ovs_notify(&dp_vport_genl_family, reply, info);
2256         return 0;
2257
2258 exit_unlock_free:
2259         ovs_unlock();
2260         kfree_skb(reply);
2261         return err;
2262 }
2263
2264 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2265 {
2266         struct nlattr **a = info->attrs;
2267         struct ovs_header *ovs_header = info->userhdr;
2268         struct sk_buff *reply;
2269         struct vport *vport;
2270         int err;
2271
2272         reply = ovs_vport_cmd_alloc_info();
2273         if (!reply)
2274                 return -ENOMEM;
2275
2276         rcu_read_lock();
2277         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2278         err = PTR_ERR(vport);
2279         if (IS_ERR(vport))
2280                 goto exit_unlock_free;
2281         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2282                                       info->snd_portid, info->snd_seq, 0,
2283                                       OVS_VPORT_CMD_GET, GFP_ATOMIC);
2284         BUG_ON(err < 0);
2285         rcu_read_unlock();
2286
2287         return genlmsg_reply(reply, info);
2288
2289 exit_unlock_free:
2290         rcu_read_unlock();
2291         kfree_skb(reply);
2292         return err;
2293 }
2294
2295 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2296 {
2297         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2298         struct datapath *dp;
2299         int bucket = cb->args[0], skip = cb->args[1];
2300         int i, j = 0;
2301
2302         rcu_read_lock();
2303         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2304         if (!dp) {
2305                 rcu_read_unlock();
2306                 return -ENODEV;
2307         }
2308         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2309                 struct vport *vport;
2310
2311                 j = 0;
2312                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2313                         if (j >= skip &&
2314                             ovs_vport_cmd_fill_info(vport, skb,
2315                                                     sock_net(skb->sk),
2316                                                     NETLINK_CB(cb->skb).portid,
2317                                                     cb->nlh->nlmsg_seq,
2318                                                     NLM_F_MULTI,
2319                                                     OVS_VPORT_CMD_GET,
2320                                                     GFP_ATOMIC) < 0)
2321                                 goto out;
2322
2323                         j++;
2324                 }
2325                 skip = 0;
2326         }
2327 out:
2328         rcu_read_unlock();
2329
2330         cb->args[0] = i;
2331         cb->args[1] = j;
2332
2333         return skb->len;
2334 }
2335
2336 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2337         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2338         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2339         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2340         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2341         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
2342         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2343         [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2344         [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2345 };
2346
2347 static const struct genl_ops dp_vport_genl_ops[] = {
2348         { .cmd = OVS_VPORT_CMD_NEW,
2349           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2350           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2351           .doit = ovs_vport_cmd_new
2352         },
2353         { .cmd = OVS_VPORT_CMD_DEL,
2354           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2355           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2356           .doit = ovs_vport_cmd_del
2357         },
2358         { .cmd = OVS_VPORT_CMD_GET,
2359           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2360           .flags = 0,               /* OK for unprivileged users. */
2361           .doit = ovs_vport_cmd_get,
2362           .dumpit = ovs_vport_cmd_dump
2363         },
2364         { .cmd = OVS_VPORT_CMD_SET,
2365           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2366           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2367           .doit = ovs_vport_cmd_set,
2368         },
2369 };
2370
2371 struct genl_family dp_vport_genl_family __ro_after_init = {
2372         .hdrsize = sizeof(struct ovs_header),
2373         .name = OVS_VPORT_FAMILY,
2374         .version = OVS_VPORT_VERSION,
2375         .maxattr = OVS_VPORT_ATTR_MAX,
2376         .policy = vport_policy,
2377         .netnsok = true,
2378         .parallel_ops = true,
2379         .ops = dp_vport_genl_ops,
2380         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2381         .mcgrps = &ovs_dp_vport_multicast_group,
2382         .n_mcgrps = 1,
2383         .module = THIS_MODULE,
2384 };
2385
2386 static struct genl_family * const dp_genl_families[] = {
2387         &dp_datapath_genl_family,
2388         &dp_vport_genl_family,
2389         &dp_flow_genl_family,
2390         &dp_packet_genl_family,
2391         &dp_meter_genl_family,
2392 #if     IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2393         &dp_ct_limit_genl_family,
2394 #endif
2395 };
2396
2397 static void dp_unregister_genl(int n_families)
2398 {
2399         int i;
2400
2401         for (i = 0; i < n_families; i++)
2402                 genl_unregister_family(dp_genl_families[i]);
2403 }
2404
2405 static int __init dp_register_genl(void)
2406 {
2407         int err;
2408         int i;
2409
2410         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2411
2412                 err = genl_register_family(dp_genl_families[i]);
2413                 if (err)
2414                         goto error;
2415         }
2416
2417         return 0;
2418
2419 error:
2420         dp_unregister_genl(i);
2421         return err;
2422 }
2423
2424 static int __net_init ovs_init_net(struct net *net)
2425 {
2426         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2427
2428         INIT_LIST_HEAD(&ovs_net->dps);
2429         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2430         return ovs_ct_init(net);
2431 }
2432
2433 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2434                                             struct list_head *head)
2435 {
2436         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2437         struct datapath *dp;
2438
2439         list_for_each_entry(dp, &ovs_net->dps, list_node) {
2440                 int i;
2441
2442                 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2443                         struct vport *vport;
2444
2445                         hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2446                                 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2447                                         continue;
2448
2449                                 if (dev_net(vport->dev) == dnet)
2450                                         list_add(&vport->detach_list, head);
2451                         }
2452                 }
2453         }
2454 }
2455
2456 static void __net_exit ovs_exit_net(struct net *dnet)
2457 {
2458         struct datapath *dp, *dp_next;
2459         struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2460         struct vport *vport, *vport_next;
2461         struct net *net;
2462         LIST_HEAD(head);
2463
2464         ovs_ct_exit(dnet);
2465         ovs_lock();
2466         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2467                 __dp_destroy(dp);
2468
2469         down_read(&net_rwsem);
2470         for_each_net(net)
2471                 list_vports_from_net(net, dnet, &head);
2472         up_read(&net_rwsem);
2473
2474         /* Detach all vports from given namespace. */
2475         list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2476                 list_del(&vport->detach_list);
2477                 ovs_dp_detach_port(vport);
2478         }
2479
2480         ovs_unlock();
2481
2482         cancel_work_sync(&ovs_net->dp_notify_work);
2483 }
2484
2485 static struct pernet_operations ovs_net_ops = {
2486         .init = ovs_init_net,
2487         .exit = ovs_exit_net,
2488         .id   = &ovs_net_id,
2489         .size = sizeof(struct ovs_net),
2490 };
2491
2492 static int __init dp_init(void)
2493 {
2494         int err;
2495
2496         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2497
2498         pr_info("Open vSwitch switching datapath\n");
2499
2500         err = action_fifos_init();
2501         if (err)
2502                 goto error;
2503
2504         err = ovs_internal_dev_rtnl_link_register();
2505         if (err)
2506                 goto error_action_fifos_exit;
2507
2508         err = ovs_flow_init();
2509         if (err)
2510                 goto error_unreg_rtnl_link;
2511
2512         err = ovs_vport_init();
2513         if (err)
2514                 goto error_flow_exit;
2515
2516         err = register_pernet_device(&ovs_net_ops);
2517         if (err)
2518                 goto error_vport_exit;
2519
2520         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2521         if (err)
2522                 goto error_netns_exit;
2523
2524         err = ovs_netdev_init();
2525         if (err)
2526                 goto error_unreg_notifier;
2527
2528         err = dp_register_genl();
2529         if (err < 0)
2530                 goto error_unreg_netdev;
2531
2532         return 0;
2533
2534 error_unreg_netdev:
2535         ovs_netdev_exit();
2536 error_unreg_notifier:
2537         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2538 error_netns_exit:
2539         unregister_pernet_device(&ovs_net_ops);
2540 error_vport_exit:
2541         ovs_vport_exit();
2542 error_flow_exit:
2543         ovs_flow_exit();
2544 error_unreg_rtnl_link:
2545         ovs_internal_dev_rtnl_link_unregister();
2546 error_action_fifos_exit:
2547         action_fifos_exit();
2548 error:
2549         return err;
2550 }
2551
2552 static void dp_cleanup(void)
2553 {
2554         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2555         ovs_netdev_exit();
2556         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2557         unregister_pernet_device(&ovs_net_ops);
2558         rcu_barrier();
2559         ovs_vport_exit();
2560         ovs_flow_exit();
2561         ovs_internal_dev_rtnl_link_unregister();
2562         action_fifos_exit();
2563 }
2564
2565 module_init(dp_init);
2566 module_exit(dp_cleanup);
2567
2568 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2569 MODULE_LICENSE("GPL");
2570 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2571 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2572 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2573 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2574 MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2575 MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);