OSDN Git Service

52bb4213390fb0a0338cfc01b932353d9add5438
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / net / openvswitch / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
60
61 int ovs_net_id __read_mostly;
62 EXPORT_SYMBOL_GPL(ovs_net_id);
63
64 static struct genl_family dp_packet_genl_family;
65 static struct genl_family dp_flow_genl_family;
66 static struct genl_family dp_datapath_genl_family;
67
68 static const struct nla_policy flow_policy[];
69
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
71         .name = OVS_FLOW_MCGROUP,
72 };
73
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
75         .name = OVS_DATAPATH_MCGROUP,
76 };
77
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
79         .name = OVS_VPORT_MCGROUP,
80 };
81
82 /* Check if need to build a reply message.
83  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85                             unsigned int group)
86 {
87         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88                genl_has_listeners(family, genl_info_net(info), group);
89 }
90
91 static void ovs_notify(struct genl_family *family,
92                        struct sk_buff *skb, struct genl_info *info)
93 {
94         genl_notify(family, skb, info, 0, GFP_KERNEL);
95 }
96
97 /**
98  * DOC: Locking:
99  *
100  * All writes e.g. Writes to device state (add/remove datapath, port, set
101  * operations on vports, etc.), Writes to other state (flow table
102  * modifications, set miscellaneous datapath parameters, etc.) are protected
103  * by ovs_lock.
104  *
105  * Reads are protected by RCU.
106  *
107  * There are a few special cases (mostly stats) that have their own
108  * synchronization but they nest under all of above and don't interact with
109  * each other.
110  *
111  * The RTNL lock nests inside ovs_mutex.
112  */
113
114 static DEFINE_MUTEX(ovs_mutex);
115
116 void ovs_lock(void)
117 {
118         mutex_lock(&ovs_mutex);
119 }
120
121 void ovs_unlock(void)
122 {
123         mutex_unlock(&ovs_mutex);
124 }
125
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
128 {
129         if (debug_locks)
130                 return lockdep_is_held(&ovs_mutex);
131         else
132                 return 1;
133 }
134 EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
135 #endif
136
137 static struct vport *new_vport(const struct vport_parms *);
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct sw_flow_key *,
140                              const struct dp_upcall_info *);
141 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
142                                   const struct sw_flow_key *,
143                                   const struct dp_upcall_info *);
144
145 /* Must be called with rcu_read_lock. */
146 static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
147 {
148         struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
149
150         if (dev) {
151                 struct vport *vport = ovs_internal_dev_get_vport(dev);
152                 if (vport)
153                         return vport->dp;
154         }
155
156         return NULL;
157 }
158
159 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
160  * returned dp pointer valid.
161  */
162 static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
163 {
164         struct datapath *dp;
165
166         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
167         rcu_read_lock();
168         dp = get_dp_rcu(net, dp_ifindex);
169         rcu_read_unlock();
170
171         return dp;
172 }
173
174 /* Must be called with rcu_read_lock or ovs_mutex. */
175 const char *ovs_dp_name(const struct datapath *dp)
176 {
177         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
178         return ovs_vport_name(vport);
179 }
180
181 static int get_dpifindex(const struct datapath *dp)
182 {
183         struct vport *local;
184         int ifindex;
185
186         rcu_read_lock();
187
188         local = ovs_vport_rcu(dp, OVSP_LOCAL);
189         if (local)
190                 ifindex = local->dev->ifindex;
191         else
192                 ifindex = 0;
193
194         rcu_read_unlock();
195
196         return ifindex;
197 }
198
199 static void destroy_dp_rcu(struct rcu_head *rcu)
200 {
201         struct datapath *dp = container_of(rcu, struct datapath, rcu);
202
203         ovs_flow_tbl_destroy(&dp->table);
204         free_percpu(dp->stats_percpu);
205         kfree(dp->ports);
206         kfree(dp);
207 }
208
209 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
210                                             u16 port_no)
211 {
212         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
213 }
214
215 /* Called with ovs_mutex or RCU read lock. */
216 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
217 {
218         struct vport *vport;
219         struct hlist_head *head;
220
221         head = vport_hash_bucket(dp, port_no);
222         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
223                 if (vport->port_no == port_no)
224                         return vport;
225         }
226         return NULL;
227 }
228
229 /* Called with ovs_mutex. */
230 static struct vport *new_vport(const struct vport_parms *parms)
231 {
232         struct vport *vport;
233
234         vport = ovs_vport_add(parms);
235         if (!IS_ERR(vport)) {
236                 struct datapath *dp = parms->dp;
237                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
238
239                 hlist_add_head_rcu(&vport->dp_hash_node, head);
240         }
241         return vport;
242 }
243
244 void ovs_dp_detach_port(struct vport *p)
245 {
246         ASSERT_OVSL();
247
248         /* First drop references to device. */
249         hlist_del_rcu(&p->dp_hash_node);
250
251         /* Then destroy it. */
252         ovs_vport_del(p);
253 }
254
255 /* Must be called with rcu_read_lock. */
256 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
257 {
258         const struct vport *p = OVS_CB(skb)->input_vport;
259         struct datapath *dp = p->dp;
260         struct sw_flow *flow;
261         struct sw_flow_actions *sf_acts;
262         struct dp_stats_percpu *stats;
263         u64 *stats_counter;
264         u32 n_mask_hit;
265
266         stats = this_cpu_ptr(dp->stats_percpu);
267
268         /* Look up flow. */
269         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
270         if (unlikely(!flow)) {
271                 struct dp_upcall_info upcall;
272                 int error;
273
274                 memset(&upcall, 0, sizeof(upcall));
275                 upcall.cmd = OVS_PACKET_CMD_MISS;
276                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
277                 upcall.mru = OVS_CB(skb)->mru;
278                 error = ovs_dp_upcall(dp, skb, key, &upcall);
279                 if (unlikely(error))
280                         kfree_skb(skb);
281                 else
282                         consume_skb(skb);
283                 stats_counter = &stats->n_missed;
284                 goto out;
285         }
286
287         ovs_flow_stats_update(flow, key->tp.flags, skb);
288         sf_acts = rcu_dereference(flow->sf_acts);
289         ovs_execute_actions(dp, skb, sf_acts, key);
290
291         stats_counter = &stats->n_hit;
292
293 out:
294         /* Update datapath statistics. */
295         u64_stats_update_begin(&stats->syncp);
296         (*stats_counter)++;
297         stats->n_mask_hit += n_mask_hit;
298         u64_stats_update_end(&stats->syncp);
299 }
300
301 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
302                   const struct sw_flow_key *key,
303                   const struct dp_upcall_info *upcall_info)
304 {
305         struct dp_stats_percpu *stats;
306         int err;
307
308         if (upcall_info->portid == 0) {
309                 err = -ENOTCONN;
310                 goto err;
311         }
312
313         if (!skb_is_gso(skb))
314                 err = queue_userspace_packet(dp, skb, key, upcall_info);
315         else
316                 err = queue_gso_packets(dp, skb, key, upcall_info);
317         if (err)
318                 goto err;
319
320         return 0;
321
322 err:
323         stats = this_cpu_ptr(dp->stats_percpu);
324
325         u64_stats_update_begin(&stats->syncp);
326         stats->n_lost++;
327         u64_stats_update_end(&stats->syncp);
328
329         return err;
330 }
331
332 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
333                              const struct sw_flow_key *key,
334                              const struct dp_upcall_info *upcall_info)
335 {
336         unsigned short gso_type = skb_shinfo(skb)->gso_type;
337         struct sw_flow_key later_key;
338         struct sk_buff *segs, *nskb;
339         int err;
340
341         BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
342         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
343         if (IS_ERR(segs))
344                 return PTR_ERR(segs);
345         if (segs == NULL)
346                 return -EINVAL;
347
348         if (gso_type & SKB_GSO_UDP) {
349                 /* The initial flow key extracted by ovs_flow_key_extract()
350                  * in this case is for a first fragment, so we need to
351                  * properly mark later fragments.
352                  */
353                 later_key = *key;
354                 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
355         }
356
357         /* Queue all of the segments. */
358         skb = segs;
359         do {
360                 if (gso_type & SKB_GSO_UDP && skb != segs)
361                         key = &later_key;
362
363                 err = queue_userspace_packet(dp, skb, key, upcall_info);
364                 if (err)
365                         break;
366
367         } while ((skb = skb->next));
368
369         /* Free all of the segments. */
370         skb = segs;
371         do {
372                 nskb = skb->next;
373                 if (err)
374                         kfree_skb(skb);
375                 else
376                         consume_skb(skb);
377         } while ((skb = nskb));
378         return err;
379 }
380
381 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
382                               unsigned int hdrlen)
383 {
384         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
385                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
386                 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
387
388         /* OVS_PACKET_ATTR_USERDATA */
389         if (upcall_info->userdata)
390                 size += NLA_ALIGN(upcall_info->userdata->nla_len);
391
392         /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
393         if (upcall_info->egress_tun_info)
394                 size += nla_total_size(ovs_tun_key_attr_size());
395
396         /* OVS_PACKET_ATTR_ACTIONS */
397         if (upcall_info->actions_len)
398                 size += nla_total_size(upcall_info->actions_len);
399
400         /* OVS_PACKET_ATTR_MRU */
401         if (upcall_info->mru)
402                 size += nla_total_size(sizeof(upcall_info->mru));
403
404         return size;
405 }
406
407 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
408 {
409         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
410                 size_t plen = NLA_ALIGN(skb->len) - skb->len;
411
412                 if (plen > 0)
413                         memset(skb_put(skb, plen), 0, plen);
414         }
415 }
416
417 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
418                                   const struct sw_flow_key *key,
419                                   const struct dp_upcall_info *upcall_info)
420 {
421         struct ovs_header *upcall;
422         struct sk_buff *nskb = NULL;
423         struct sk_buff *user_skb = NULL; /* to be queued to userspace */
424         struct nlattr *nla;
425         struct genl_info info = {
426                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
427                 .snd_portid = upcall_info->portid,
428         };
429         size_t len;
430         unsigned int hlen;
431         int err, dp_ifindex;
432
433         dp_ifindex = get_dpifindex(dp);
434         if (!dp_ifindex)
435                 return -ENODEV;
436
437         if (skb_vlan_tag_present(skb)) {
438                 nskb = skb_clone(skb, GFP_ATOMIC);
439                 if (!nskb)
440                         return -ENOMEM;
441
442                 nskb = __vlan_hwaccel_push_inside(nskb);
443                 if (!nskb)
444                         return -ENOMEM;
445
446                 skb = nskb;
447         }
448
449         if (nla_attr_size(skb->len) > USHRT_MAX) {
450                 err = -EFBIG;
451                 goto out;
452         }
453
454         /* Complete checksum if needed */
455         if (skb->ip_summed == CHECKSUM_PARTIAL &&
456             (err = skb_checksum_help(skb)))
457                 goto out;
458
459         /* Older versions of OVS user space enforce alignment of the last
460          * Netlink attribute to NLA_ALIGNTO which would require extensive
461          * padding logic. Only perform zerocopy if padding is not required.
462          */
463         if (dp->user_features & OVS_DP_F_UNALIGNED)
464                 hlen = skb_zerocopy_headlen(skb);
465         else
466                 hlen = skb->len;
467
468         len = upcall_msg_size(upcall_info, hlen);
469         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
470         if (!user_skb) {
471                 err = -ENOMEM;
472                 goto out;
473         }
474
475         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
476                              0, upcall_info->cmd);
477         upcall->dp_ifindex = dp_ifindex;
478
479         err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
480         BUG_ON(err);
481
482         if (upcall_info->userdata)
483                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
484                           nla_len(upcall_info->userdata),
485                           nla_data(upcall_info->userdata));
486
487         if (upcall_info->egress_tun_info) {
488                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
489                 err = ovs_nla_put_tunnel_info(user_skb,
490                                               upcall_info->egress_tun_info);
491                 BUG_ON(err);
492                 nla_nest_end(user_skb, nla);
493         }
494
495         if (upcall_info->actions_len) {
496                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
497                 err = ovs_nla_put_actions(upcall_info->actions,
498                                           upcall_info->actions_len,
499                                           user_skb);
500                 if (!err)
501                         nla_nest_end(user_skb, nla);
502                 else
503                         nla_nest_cancel(user_skb, nla);
504         }
505
506         /* Add OVS_PACKET_ATTR_MRU */
507         if (upcall_info->mru) {
508                 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
509                                 upcall_info->mru)) {
510                         err = -ENOBUFS;
511                         goto out;
512                 }
513                 pad_packet(dp, user_skb);
514         }
515
516         /* Only reserve room for attribute header, packet data is added
517          * in skb_zerocopy() */
518         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
519                 err = -ENOBUFS;
520                 goto out;
521         }
522         nla->nla_len = nla_attr_size(skb->len);
523
524         err = skb_zerocopy(user_skb, skb, skb->len, hlen);
525         if (err)
526                 goto out;
527
528         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
529         pad_packet(dp, user_skb);
530
531         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
532
533         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
534         user_skb = NULL;
535 out:
536         if (err)
537                 skb_tx_error(skb);
538         kfree_skb(user_skb);
539         kfree_skb(nskb);
540         return err;
541 }
542
543 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
544 {
545         struct ovs_header *ovs_header = info->userhdr;
546         struct net *net = sock_net(skb->sk);
547         struct nlattr **a = info->attrs;
548         struct sw_flow_actions *acts;
549         struct sk_buff *packet;
550         struct sw_flow *flow;
551         struct sw_flow_actions *sf_acts;
552         struct datapath *dp;
553         struct ethhdr *eth;
554         struct vport *input_vport;
555         u16 mru = 0;
556         int len;
557         int err;
558         bool log = !a[OVS_PACKET_ATTR_PROBE];
559
560         err = -EINVAL;
561         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
562             !a[OVS_PACKET_ATTR_ACTIONS])
563                 goto err;
564
565         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
566         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
567         err = -ENOMEM;
568         if (!packet)
569                 goto err;
570         skb_reserve(packet, NET_IP_ALIGN);
571
572         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
573
574         skb_reset_mac_header(packet);
575         eth = eth_hdr(packet);
576
577         /* Normally, setting the skb 'protocol' field would be handled by a
578          * call to eth_type_trans(), but it assumes there's a sending
579          * device, which we may not have. */
580         if (eth_proto_is_802_3(eth->h_proto))
581                 packet->protocol = eth->h_proto;
582         else
583                 packet->protocol = htons(ETH_P_802_2);
584
585         /* Set packet's mru */
586         if (a[OVS_PACKET_ATTR_MRU]) {
587                 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
588                 packet->ignore_df = 1;
589         }
590         OVS_CB(packet)->mru = mru;
591
592         /* Build an sw_flow for sending this packet. */
593         flow = ovs_flow_alloc();
594         err = PTR_ERR(flow);
595         if (IS_ERR(flow))
596                 goto err_kfree_skb;
597
598         err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
599                                              packet, &flow->key, log);
600         if (err)
601                 goto err_flow_free;
602
603         err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
604                                    &flow->key, &acts, log);
605         if (err)
606                 goto err_flow_free;
607
608         rcu_assign_pointer(flow->sf_acts, acts);
609         packet->priority = flow->key.phy.priority;
610         packet->mark = flow->key.phy.skb_mark;
611
612         rcu_read_lock();
613         dp = get_dp_rcu(net, ovs_header->dp_ifindex);
614         err = -ENODEV;
615         if (!dp)
616                 goto err_unlock;
617
618         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
619         if (!input_vport)
620                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
621
622         if (!input_vport)
623                 goto err_unlock;
624
625         packet->dev = input_vport->dev;
626         OVS_CB(packet)->input_vport = input_vport;
627         sf_acts = rcu_dereference(flow->sf_acts);
628
629         local_bh_disable();
630         err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
631         local_bh_enable();
632         rcu_read_unlock();
633
634         ovs_flow_free(flow, false);
635         return err;
636
637 err_unlock:
638         rcu_read_unlock();
639 err_flow_free:
640         ovs_flow_free(flow, false);
641 err_kfree_skb:
642         kfree_skb(packet);
643 err:
644         return err;
645 }
646
647 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
648         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
649         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
650         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
651         [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
652         [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
653 };
654
655 static const struct genl_ops dp_packet_genl_ops[] = {
656         { .cmd = OVS_PACKET_CMD_EXECUTE,
657           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
658           .policy = packet_policy,
659           .doit = ovs_packet_cmd_execute
660         }
661 };
662
663 static struct genl_family dp_packet_genl_family = {
664         .id = GENL_ID_GENERATE,
665         .hdrsize = sizeof(struct ovs_header),
666         .name = OVS_PACKET_FAMILY,
667         .version = OVS_PACKET_VERSION,
668         .maxattr = OVS_PACKET_ATTR_MAX,
669         .netnsok = true,
670         .parallel_ops = true,
671         .ops = dp_packet_genl_ops,
672         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
673 };
674
675 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
676                          struct ovs_dp_megaflow_stats *mega_stats)
677 {
678         int i;
679
680         memset(mega_stats, 0, sizeof(*mega_stats));
681
682         stats->n_flows = ovs_flow_tbl_count(&dp->table);
683         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
684
685         stats->n_hit = stats->n_missed = stats->n_lost = 0;
686
687         for_each_possible_cpu(i) {
688                 const struct dp_stats_percpu *percpu_stats;
689                 struct dp_stats_percpu local_stats;
690                 unsigned int start;
691
692                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
693
694                 do {
695                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
696                         local_stats = *percpu_stats;
697                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
698
699                 stats->n_hit += local_stats.n_hit;
700                 stats->n_missed += local_stats.n_missed;
701                 stats->n_lost += local_stats.n_lost;
702                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
703         }
704 }
705
706 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
707 {
708         return ovs_identifier_is_ufid(sfid) &&
709                !(ufid_flags & OVS_UFID_F_OMIT_KEY);
710 }
711
712 static bool should_fill_mask(uint32_t ufid_flags)
713 {
714         return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
715 }
716
717 static bool should_fill_actions(uint32_t ufid_flags)
718 {
719         return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
720 }
721
722 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
723                                     const struct sw_flow_id *sfid,
724                                     uint32_t ufid_flags)
725 {
726         size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
727
728         /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
729          * see ovs_nla_put_identifier()
730          */
731         if (sfid && ovs_identifier_is_ufid(sfid))
732                 len += nla_total_size(sfid->ufid_len);
733         else
734                 len += nla_total_size(ovs_key_attr_size());
735
736         /* OVS_FLOW_ATTR_KEY */
737         if (!sfid || should_fill_key(sfid, ufid_flags))
738                 len += nla_total_size(ovs_key_attr_size());
739
740         /* OVS_FLOW_ATTR_MASK */
741         if (should_fill_mask(ufid_flags))
742                 len += nla_total_size(ovs_key_attr_size());
743
744         /* OVS_FLOW_ATTR_ACTIONS */
745         if (should_fill_actions(ufid_flags))
746                 len += nla_total_size(acts->orig_len);
747
748         return len
749                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
750                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
751                 + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
752 }
753
754 /* Called with ovs_mutex or RCU read lock. */
755 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
756                                    struct sk_buff *skb)
757 {
758         struct ovs_flow_stats stats;
759         __be16 tcp_flags;
760         unsigned long used;
761
762         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
763
764         if (used &&
765             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
766                 return -EMSGSIZE;
767
768         if (stats.n_packets &&
769             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
770                 return -EMSGSIZE;
771
772         if ((u8)ntohs(tcp_flags) &&
773              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
774                 return -EMSGSIZE;
775
776         return 0;
777 }
778
779 /* Called with ovs_mutex or RCU read lock. */
780 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
781                                      struct sk_buff *skb, int skb_orig_len)
782 {
783         struct nlattr *start;
784         int err;
785
786         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
787          * this is the first flow to be dumped into 'skb'.  This is unusual for
788          * Netlink but individual action lists can be longer than
789          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
790          * The userspace caller can always fetch the actions separately if it
791          * really wants them.  (Most userspace callers in fact don't care.)
792          *
793          * This can only fail for dump operations because the skb is always
794          * properly sized for single flows.
795          */
796         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
797         if (start) {
798                 const struct sw_flow_actions *sf_acts;
799
800                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
801                 err = ovs_nla_put_actions(sf_acts->actions,
802                                           sf_acts->actions_len, skb);
803
804                 if (!err)
805                         nla_nest_end(skb, start);
806                 else {
807                         if (skb_orig_len)
808                                 return err;
809
810                         nla_nest_cancel(skb, start);
811                 }
812         } else if (skb_orig_len) {
813                 return -EMSGSIZE;
814         }
815
816         return 0;
817 }
818
819 /* Called with ovs_mutex or RCU read lock. */
820 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
821                                   struct sk_buff *skb, u32 portid,
822                                   u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
823 {
824         const int skb_orig_len = skb->len;
825         struct ovs_header *ovs_header;
826         int err;
827
828         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
829                                  flags, cmd);
830         if (!ovs_header)
831                 return -EMSGSIZE;
832
833         ovs_header->dp_ifindex = dp_ifindex;
834
835         err = ovs_nla_put_identifier(flow, skb);
836         if (err)
837                 goto error;
838
839         if (should_fill_key(&flow->id, ufid_flags)) {
840                 err = ovs_nla_put_masked_key(flow, skb);
841                 if (err)
842                         goto error;
843         }
844
845         if (should_fill_mask(ufid_flags)) {
846                 err = ovs_nla_put_mask(flow, skb);
847                 if (err)
848                         goto error;
849         }
850
851         err = ovs_flow_cmd_fill_stats(flow, skb);
852         if (err)
853                 goto error;
854
855         if (should_fill_actions(ufid_flags)) {
856                 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
857                 if (err)
858                         goto error;
859         }
860
861         genlmsg_end(skb, ovs_header);
862         return 0;
863
864 error:
865         genlmsg_cancel(skb, ovs_header);
866         return err;
867 }
868
869 /* May not be called with RCU read lock. */
870 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
871                                                const struct sw_flow_id *sfid,
872                                                struct genl_info *info,
873                                                bool always,
874                                                uint32_t ufid_flags)
875 {
876         struct sk_buff *skb;
877         size_t len;
878
879         if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
880                 return NULL;
881
882         len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
883         skb = genlmsg_new_unicast(len, info, GFP_KERNEL);
884         if (!skb)
885                 return ERR_PTR(-ENOMEM);
886
887         return skb;
888 }
889
890 /* Called with ovs_mutex. */
891 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
892                                                int dp_ifindex,
893                                                struct genl_info *info, u8 cmd,
894                                                bool always, u32 ufid_flags)
895 {
896         struct sk_buff *skb;
897         int retval;
898
899         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
900                                       &flow->id, info, always, ufid_flags);
901         if (IS_ERR_OR_NULL(skb))
902                 return skb;
903
904         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
905                                         info->snd_portid, info->snd_seq, 0,
906                                         cmd, ufid_flags);
907         if (WARN_ON_ONCE(retval < 0)) {
908                 kfree_skb(skb);
909                 skb = ERR_PTR(retval);
910         }
911         return skb;
912 }
913
914 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
915 {
916         struct net *net = sock_net(skb->sk);
917         struct nlattr **a = info->attrs;
918         struct ovs_header *ovs_header = info->userhdr;
919         struct sw_flow *flow = NULL, *new_flow;
920         struct sw_flow_mask mask;
921         struct sk_buff *reply;
922         struct datapath *dp;
923         struct sw_flow_key key;
924         struct sw_flow_actions *acts;
925         struct sw_flow_match match;
926         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
927         int error;
928         bool log = !a[OVS_FLOW_ATTR_PROBE];
929
930         /* Must have key and actions. */
931         error = -EINVAL;
932         if (!a[OVS_FLOW_ATTR_KEY]) {
933                 OVS_NLERR(log, "Flow key attr not present in new flow.");
934                 goto error;
935         }
936         if (!a[OVS_FLOW_ATTR_ACTIONS]) {
937                 OVS_NLERR(log, "Flow actions attr not present in new flow.");
938                 goto error;
939         }
940
941         /* Most of the time we need to allocate a new flow, do it before
942          * locking.
943          */
944         new_flow = ovs_flow_alloc();
945         if (IS_ERR(new_flow)) {
946                 error = PTR_ERR(new_flow);
947                 goto error;
948         }
949
950         /* Extract key. */
951         ovs_match_init(&match, &key, &mask);
952         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
953                                   a[OVS_FLOW_ATTR_MASK], log);
954         if (error)
955                 goto err_kfree_flow;
956
957         ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
958
959         /* Extract flow identifier. */
960         error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
961                                        &key, log);
962         if (error)
963                 goto err_kfree_flow;
964
965         /* Validate actions. */
966         error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
967                                      &new_flow->key, &acts, log);
968         if (error) {
969                 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
970                 goto err_kfree_flow;
971         }
972
973         reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
974                                         ufid_flags);
975         if (IS_ERR(reply)) {
976                 error = PTR_ERR(reply);
977                 goto err_kfree_acts;
978         }
979
980         ovs_lock();
981         dp = get_dp(net, ovs_header->dp_ifindex);
982         if (unlikely(!dp)) {
983                 error = -ENODEV;
984                 goto err_unlock_ovs;
985         }
986
987         /* Check if this is a duplicate flow */
988         if (ovs_identifier_is_ufid(&new_flow->id))
989                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
990         if (!flow)
991                 flow = ovs_flow_tbl_lookup(&dp->table, &key);
992         if (likely(!flow)) {
993                 rcu_assign_pointer(new_flow->sf_acts, acts);
994
995                 /* Put flow in bucket. */
996                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
997                 if (unlikely(error)) {
998                         acts = NULL;
999                         goto err_unlock_ovs;
1000                 }
1001
1002                 if (unlikely(reply)) {
1003                         error = ovs_flow_cmd_fill_info(new_flow,
1004                                                        ovs_header->dp_ifindex,
1005                                                        reply, info->snd_portid,
1006                                                        info->snd_seq, 0,
1007                                                        OVS_FLOW_CMD_NEW,
1008                                                        ufid_flags);
1009                         BUG_ON(error < 0);
1010                 }
1011                 ovs_unlock();
1012         } else {
1013                 struct sw_flow_actions *old_acts;
1014
1015                 /* Bail out if we're not allowed to modify an existing flow.
1016                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1017                  * because Generic Netlink treats the latter as a dump
1018                  * request.  We also accept NLM_F_EXCL in case that bug ever
1019                  * gets fixed.
1020                  */
1021                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1022                                                          | NLM_F_EXCL))) {
1023                         error = -EEXIST;
1024                         goto err_unlock_ovs;
1025                 }
1026                 /* The flow identifier has to be the same for flow updates.
1027                  * Look for any overlapping flow.
1028                  */
1029                 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1030                         if (ovs_identifier_is_key(&flow->id))
1031                                 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1032                                                                  &match);
1033                         else /* UFID matches but key is different */
1034                                 flow = NULL;
1035                         if (!flow) {
1036                                 error = -ENOENT;
1037                                 goto err_unlock_ovs;
1038                         }
1039                 }
1040                 /* Update actions. */
1041                 old_acts = ovsl_dereference(flow->sf_acts);
1042                 rcu_assign_pointer(flow->sf_acts, acts);
1043
1044                 if (unlikely(reply)) {
1045                         error = ovs_flow_cmd_fill_info(flow,
1046                                                        ovs_header->dp_ifindex,
1047                                                        reply, info->snd_portid,
1048                                                        info->snd_seq, 0,
1049                                                        OVS_FLOW_CMD_NEW,
1050                                                        ufid_flags);
1051                         BUG_ON(error < 0);
1052                 }
1053                 ovs_unlock();
1054
1055                 ovs_nla_free_flow_actions_rcu(old_acts);
1056                 ovs_flow_free(new_flow, false);
1057         }
1058
1059         if (reply)
1060                 ovs_notify(&dp_flow_genl_family, reply, info);
1061         return 0;
1062
1063 err_unlock_ovs:
1064         ovs_unlock();
1065         kfree_skb(reply);
1066 err_kfree_acts:
1067         ovs_nla_free_flow_actions(acts);
1068 err_kfree_flow:
1069         ovs_flow_free(new_flow, false);
1070 error:
1071         return error;
1072 }
1073
1074 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1075 static struct sw_flow_actions *get_flow_actions(struct net *net,
1076                                                 const struct nlattr *a,
1077                                                 const struct sw_flow_key *key,
1078                                                 const struct sw_flow_mask *mask,
1079                                                 bool log)
1080 {
1081         struct sw_flow_actions *acts;
1082         struct sw_flow_key masked_key;
1083         int error;
1084
1085         ovs_flow_mask_key(&masked_key, key, true, mask);
1086         error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1087         if (error) {
1088                 OVS_NLERR(log,
1089                           "Actions may not be safe on all matching packets");
1090                 return ERR_PTR(error);
1091         }
1092
1093         return acts;
1094 }
1095
1096 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1097 {
1098         struct net *net = sock_net(skb->sk);
1099         struct nlattr **a = info->attrs;
1100         struct ovs_header *ovs_header = info->userhdr;
1101         struct sw_flow_key key;
1102         struct sw_flow *flow;
1103         struct sw_flow_mask mask;
1104         struct sk_buff *reply = NULL;
1105         struct datapath *dp;
1106         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1107         struct sw_flow_match match;
1108         struct sw_flow_id sfid;
1109         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1110         int error;
1111         bool log = !a[OVS_FLOW_ATTR_PROBE];
1112         bool ufid_present;
1113
1114         /* Extract key. */
1115         error = -EINVAL;
1116         if (!a[OVS_FLOW_ATTR_KEY]) {
1117                 OVS_NLERR(log, "Flow key attribute not present in set flow.");
1118                 goto error;
1119         }
1120
1121         ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1122         ovs_match_init(&match, &key, &mask);
1123         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1124                                   a[OVS_FLOW_ATTR_MASK], log);
1125         if (error)
1126                 goto error;
1127
1128         /* Validate actions. */
1129         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1130                 acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
1131                                         &mask, log);
1132                 if (IS_ERR(acts)) {
1133                         error = PTR_ERR(acts);
1134                         goto error;
1135                 }
1136
1137                 /* Can allocate before locking if have acts. */
1138                 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1139                                                 ufid_flags);
1140                 if (IS_ERR(reply)) {
1141                         error = PTR_ERR(reply);
1142                         goto err_kfree_acts;
1143                 }
1144         }
1145
1146         ovs_lock();
1147         dp = get_dp(net, ovs_header->dp_ifindex);
1148         if (unlikely(!dp)) {
1149                 error = -ENODEV;
1150                 goto err_unlock_ovs;
1151         }
1152         /* Check that the flow exists. */
1153         if (ufid_present)
1154                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1155         else
1156                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1157         if (unlikely(!flow)) {
1158                 error = -ENOENT;
1159                 goto err_unlock_ovs;
1160         }
1161
1162         /* Update actions, if present. */
1163         if (likely(acts)) {
1164                 old_acts = ovsl_dereference(flow->sf_acts);
1165                 rcu_assign_pointer(flow->sf_acts, acts);
1166
1167                 if (unlikely(reply)) {
1168                         error = ovs_flow_cmd_fill_info(flow,
1169                                                        ovs_header->dp_ifindex,
1170                                                        reply, info->snd_portid,
1171                                                        info->snd_seq, 0,
1172                                                        OVS_FLOW_CMD_NEW,
1173                                                        ufid_flags);
1174                         BUG_ON(error < 0);
1175                 }
1176         } else {
1177                 /* Could not alloc without acts before locking. */
1178                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1179                                                 info, OVS_FLOW_CMD_NEW, false,
1180                                                 ufid_flags);
1181
1182                 if (IS_ERR(reply)) {
1183                         error = PTR_ERR(reply);
1184                         goto err_unlock_ovs;
1185                 }
1186         }
1187
1188         /* Clear stats. */
1189         if (a[OVS_FLOW_ATTR_CLEAR])
1190                 ovs_flow_stats_clear(flow);
1191         ovs_unlock();
1192
1193         if (reply)
1194                 ovs_notify(&dp_flow_genl_family, reply, info);
1195         if (old_acts)
1196                 ovs_nla_free_flow_actions_rcu(old_acts);
1197
1198         return 0;
1199
1200 err_unlock_ovs:
1201         ovs_unlock();
1202         kfree_skb(reply);
1203 err_kfree_acts:
1204         ovs_nla_free_flow_actions(acts);
1205 error:
1206         return error;
1207 }
1208
1209 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1210 {
1211         struct nlattr **a = info->attrs;
1212         struct ovs_header *ovs_header = info->userhdr;
1213         struct net *net = sock_net(skb->sk);
1214         struct sw_flow_key key;
1215         struct sk_buff *reply;
1216         struct sw_flow *flow;
1217         struct datapath *dp;
1218         struct sw_flow_match match;
1219         struct sw_flow_id ufid;
1220         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1221         int err = 0;
1222         bool log = !a[OVS_FLOW_ATTR_PROBE];
1223         bool ufid_present;
1224
1225         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1226         if (a[OVS_FLOW_ATTR_KEY]) {
1227                 ovs_match_init(&match, &key, NULL);
1228                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1229                                         log);
1230         } else if (!ufid_present) {
1231                 OVS_NLERR(log,
1232                           "Flow get message rejected, Key attribute missing.");
1233                 err = -EINVAL;
1234         }
1235         if (err)
1236                 return err;
1237
1238         ovs_lock();
1239         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1240         if (!dp) {
1241                 err = -ENODEV;
1242                 goto unlock;
1243         }
1244
1245         if (ufid_present)
1246                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1247         else
1248                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1249         if (!flow) {
1250                 err = -ENOENT;
1251                 goto unlock;
1252         }
1253
1254         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1255                                         OVS_FLOW_CMD_NEW, true, ufid_flags);
1256         if (IS_ERR(reply)) {
1257                 err = PTR_ERR(reply);
1258                 goto unlock;
1259         }
1260
1261         ovs_unlock();
1262         return genlmsg_reply(reply, info);
1263 unlock:
1264         ovs_unlock();
1265         return err;
1266 }
1267
1268 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1269 {
1270         struct nlattr **a = info->attrs;
1271         struct ovs_header *ovs_header = info->userhdr;
1272         struct net *net = sock_net(skb->sk);
1273         struct sw_flow_key key;
1274         struct sk_buff *reply;
1275         struct sw_flow *flow = NULL;
1276         struct datapath *dp;
1277         struct sw_flow_match match;
1278         struct sw_flow_id ufid;
1279         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1280         int err;
1281         bool log = !a[OVS_FLOW_ATTR_PROBE];
1282         bool ufid_present;
1283
1284         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1285         if (a[OVS_FLOW_ATTR_KEY]) {
1286                 ovs_match_init(&match, &key, NULL);
1287                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1288                                         NULL, log);
1289                 if (unlikely(err))
1290                         return err;
1291         }
1292
1293         ovs_lock();
1294         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1295         if (unlikely(!dp)) {
1296                 err = -ENODEV;
1297                 goto unlock;
1298         }
1299
1300         if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1301                 err = ovs_flow_tbl_flush(&dp->table);
1302                 goto unlock;
1303         }
1304
1305         if (ufid_present)
1306                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1307         else
1308                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1309         if (unlikely(!flow)) {
1310                 err = -ENOENT;
1311                 goto unlock;
1312         }
1313
1314         ovs_flow_tbl_remove(&dp->table, flow);
1315         ovs_unlock();
1316
1317         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1318                                         &flow->id, info, false, ufid_flags);
1319         if (likely(reply)) {
1320                 if (likely(!IS_ERR(reply))) {
1321                         rcu_read_lock();        /*To keep RCU checker happy. */
1322                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1323                                                      reply, info->snd_portid,
1324                                                      info->snd_seq, 0,
1325                                                      OVS_FLOW_CMD_DEL,
1326                                                      ufid_flags);
1327                         rcu_read_unlock();
1328                         BUG_ON(err < 0);
1329
1330                         ovs_notify(&dp_flow_genl_family, reply, info);
1331                 } else {
1332                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1333                 }
1334         }
1335
1336         ovs_flow_free(flow, true);
1337         return 0;
1338 unlock:
1339         ovs_unlock();
1340         return err;
1341 }
1342
1343 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1344 {
1345         struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1346         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1347         struct table_instance *ti;
1348         struct datapath *dp;
1349         u32 ufid_flags;
1350         int err;
1351
1352         err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1353                             OVS_FLOW_ATTR_MAX, flow_policy);
1354         if (err)
1355                 return err;
1356         ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1357
1358         rcu_read_lock();
1359         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1360         if (!dp) {
1361                 rcu_read_unlock();
1362                 return -ENODEV;
1363         }
1364
1365         ti = rcu_dereference(dp->table.ti);
1366         for (;;) {
1367                 struct sw_flow *flow;
1368                 u32 bucket, obj;
1369
1370                 bucket = cb->args[0];
1371                 obj = cb->args[1];
1372                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1373                 if (!flow)
1374                         break;
1375
1376                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1377                                            NETLINK_CB(cb->skb).portid,
1378                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1379                                            OVS_FLOW_CMD_NEW, ufid_flags) < 0)
1380                         break;
1381
1382                 cb->args[0] = bucket;
1383                 cb->args[1] = obj;
1384         }
1385         rcu_read_unlock();
1386         return skb->len;
1387 }
1388
1389 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1390         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1391         [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1392         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1393         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1394         [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1395         [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1396         [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1397 };
1398
1399 static const struct genl_ops dp_flow_genl_ops[] = {
1400         { .cmd = OVS_FLOW_CMD_NEW,
1401           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1402           .policy = flow_policy,
1403           .doit = ovs_flow_cmd_new
1404         },
1405         { .cmd = OVS_FLOW_CMD_DEL,
1406           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1407           .policy = flow_policy,
1408           .doit = ovs_flow_cmd_del
1409         },
1410         { .cmd = OVS_FLOW_CMD_GET,
1411           .flags = 0,               /* OK for unprivileged users. */
1412           .policy = flow_policy,
1413           .doit = ovs_flow_cmd_get,
1414           .dumpit = ovs_flow_cmd_dump
1415         },
1416         { .cmd = OVS_FLOW_CMD_SET,
1417           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1418           .policy = flow_policy,
1419           .doit = ovs_flow_cmd_set,
1420         },
1421 };
1422
1423 static struct genl_family dp_flow_genl_family = {
1424         .id = GENL_ID_GENERATE,
1425         .hdrsize = sizeof(struct ovs_header),
1426         .name = OVS_FLOW_FAMILY,
1427         .version = OVS_FLOW_VERSION,
1428         .maxattr = OVS_FLOW_ATTR_MAX,
1429         .netnsok = true,
1430         .parallel_ops = true,
1431         .ops = dp_flow_genl_ops,
1432         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1433         .mcgrps = &ovs_dp_flow_multicast_group,
1434         .n_mcgrps = 1,
1435 };
1436
1437 static size_t ovs_dp_cmd_msg_size(void)
1438 {
1439         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1440
1441         msgsize += nla_total_size(IFNAMSIZ);
1442         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1443         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1444         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1445
1446         return msgsize;
1447 }
1448
1449 /* Called with ovs_mutex. */
1450 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1451                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1452 {
1453         struct ovs_header *ovs_header;
1454         struct ovs_dp_stats dp_stats;
1455         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1456         int err;
1457
1458         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1459                                    flags, cmd);
1460         if (!ovs_header)
1461                 goto error;
1462
1463         ovs_header->dp_ifindex = get_dpifindex(dp);
1464
1465         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1466         if (err)
1467                 goto nla_put_failure;
1468
1469         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1470         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1471                         &dp_stats))
1472                 goto nla_put_failure;
1473
1474         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1475                         sizeof(struct ovs_dp_megaflow_stats),
1476                         &dp_megaflow_stats))
1477                 goto nla_put_failure;
1478
1479         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1480                 goto nla_put_failure;
1481
1482         genlmsg_end(skb, ovs_header);
1483         return 0;
1484
1485 nla_put_failure:
1486         genlmsg_cancel(skb, ovs_header);
1487 error:
1488         return -EMSGSIZE;
1489 }
1490
1491 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1492 {
1493         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1494 }
1495
1496 /* Called with rcu_read_lock or ovs_mutex. */
1497 static struct datapath *lookup_datapath(struct net *net,
1498                                         const struct ovs_header *ovs_header,
1499                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1500 {
1501         struct datapath *dp;
1502
1503         if (!a[OVS_DP_ATTR_NAME])
1504                 dp = get_dp(net, ovs_header->dp_ifindex);
1505         else {
1506                 struct vport *vport;
1507
1508                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1509                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1510         }
1511         return dp ? dp : ERR_PTR(-ENODEV);
1512 }
1513
1514 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1515 {
1516         struct datapath *dp;
1517
1518         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1519         if (IS_ERR(dp))
1520                 return;
1521
1522         WARN(dp->user_features, "Dropping previously announced user features\n");
1523         dp->user_features = 0;
1524 }
1525
1526 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1527 {
1528         if (a[OVS_DP_ATTR_USER_FEATURES])
1529                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1530 }
1531
1532 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1533 {
1534         struct nlattr **a = info->attrs;
1535         struct vport_parms parms;
1536         struct sk_buff *reply;
1537         struct datapath *dp;
1538         struct vport *vport;
1539         struct ovs_net *ovs_net;
1540         int err, i;
1541
1542         err = -EINVAL;
1543         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1544                 goto err;
1545
1546         reply = ovs_dp_cmd_alloc_info(info);
1547         if (!reply)
1548                 return -ENOMEM;
1549
1550         err = -ENOMEM;
1551         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1552         if (dp == NULL)
1553                 goto err_free_reply;
1554
1555         ovs_dp_set_net(dp, sock_net(skb->sk));
1556
1557         /* Allocate table. */
1558         err = ovs_flow_tbl_init(&dp->table);
1559         if (err)
1560                 goto err_free_dp;
1561
1562         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1563         if (!dp->stats_percpu) {
1564                 err = -ENOMEM;
1565                 goto err_destroy_table;
1566         }
1567
1568         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1569                             GFP_KERNEL);
1570         if (!dp->ports) {
1571                 err = -ENOMEM;
1572                 goto err_destroy_percpu;
1573         }
1574
1575         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1576                 INIT_HLIST_HEAD(&dp->ports[i]);
1577
1578         /* Set up our datapath device. */
1579         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1580         parms.type = OVS_VPORT_TYPE_INTERNAL;
1581         parms.options = NULL;
1582         parms.dp = dp;
1583         parms.port_no = OVSP_LOCAL;
1584         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1585
1586         ovs_dp_change(dp, a);
1587
1588         /* So far only local changes have been made, now need the lock. */
1589         ovs_lock();
1590
1591         vport = new_vport(&parms);
1592         if (IS_ERR(vport)) {
1593                 err = PTR_ERR(vport);
1594                 if (err == -EBUSY)
1595                         err = -EEXIST;
1596
1597                 if (err == -EEXIST) {
1598                         /* An outdated user space instance that does not understand
1599                          * the concept of user_features has attempted to create a new
1600                          * datapath and is likely to reuse it. Drop all user features.
1601                          */
1602                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1603                                 ovs_dp_reset_user_features(skb, info);
1604                 }
1605
1606                 goto err_destroy_ports_array;
1607         }
1608
1609         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1610                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1611         BUG_ON(err < 0);
1612
1613         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1614         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1615
1616         ovs_unlock();
1617
1618         ovs_notify(&dp_datapath_genl_family, reply, info);
1619         return 0;
1620
1621 err_destroy_ports_array:
1622         ovs_unlock();
1623         kfree(dp->ports);
1624 err_destroy_percpu:
1625         free_percpu(dp->stats_percpu);
1626 err_destroy_table:
1627         ovs_flow_tbl_destroy(&dp->table);
1628 err_free_dp:
1629         kfree(dp);
1630 err_free_reply:
1631         kfree_skb(reply);
1632 err:
1633         return err;
1634 }
1635
1636 /* Called with ovs_mutex. */
1637 static void __dp_destroy(struct datapath *dp)
1638 {
1639         int i;
1640
1641         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1642                 struct vport *vport;
1643                 struct hlist_node *n;
1644
1645                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1646                         if (vport->port_no != OVSP_LOCAL)
1647                                 ovs_dp_detach_port(vport);
1648         }
1649
1650         list_del_rcu(&dp->list_node);
1651
1652         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1653          * all ports in datapath are destroyed first before freeing datapath.
1654          */
1655         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1656
1657         /* RCU destroy the flow table */
1658         call_rcu(&dp->rcu, destroy_dp_rcu);
1659 }
1660
1661 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1662 {
1663         struct sk_buff *reply;
1664         struct datapath *dp;
1665         int err;
1666
1667         reply = ovs_dp_cmd_alloc_info(info);
1668         if (!reply)
1669                 return -ENOMEM;
1670
1671         ovs_lock();
1672         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1673         err = PTR_ERR(dp);
1674         if (IS_ERR(dp))
1675                 goto err_unlock_free;
1676
1677         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1678                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1679         BUG_ON(err < 0);
1680
1681         __dp_destroy(dp);
1682         ovs_unlock();
1683
1684         ovs_notify(&dp_datapath_genl_family, reply, info);
1685
1686         return 0;
1687
1688 err_unlock_free:
1689         ovs_unlock();
1690         kfree_skb(reply);
1691         return err;
1692 }
1693
1694 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1695 {
1696         struct sk_buff *reply;
1697         struct datapath *dp;
1698         int err;
1699
1700         reply = ovs_dp_cmd_alloc_info(info);
1701         if (!reply)
1702                 return -ENOMEM;
1703
1704         ovs_lock();
1705         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1706         err = PTR_ERR(dp);
1707         if (IS_ERR(dp))
1708                 goto err_unlock_free;
1709
1710         ovs_dp_change(dp, info->attrs);
1711
1712         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1713                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1714         BUG_ON(err < 0);
1715
1716         ovs_unlock();
1717         ovs_notify(&dp_datapath_genl_family, reply, info);
1718
1719         return 0;
1720
1721 err_unlock_free:
1722         ovs_unlock();
1723         kfree_skb(reply);
1724         return err;
1725 }
1726
1727 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1728 {
1729         struct sk_buff *reply;
1730         struct datapath *dp;
1731         int err;
1732
1733         reply = ovs_dp_cmd_alloc_info(info);
1734         if (!reply)
1735                 return -ENOMEM;
1736
1737         ovs_lock();
1738         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1739         if (IS_ERR(dp)) {
1740                 err = PTR_ERR(dp);
1741                 goto err_unlock_free;
1742         }
1743         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1744                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1745         BUG_ON(err < 0);
1746         ovs_unlock();
1747
1748         return genlmsg_reply(reply, info);
1749
1750 err_unlock_free:
1751         ovs_unlock();
1752         kfree_skb(reply);
1753         return err;
1754 }
1755
1756 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1757 {
1758         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1759         struct datapath *dp;
1760         int skip = cb->args[0];
1761         int i = 0;
1762
1763         ovs_lock();
1764         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1765                 if (i >= skip &&
1766                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1767                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1768                                          OVS_DP_CMD_NEW) < 0)
1769                         break;
1770                 i++;
1771         }
1772         ovs_unlock();
1773
1774         cb->args[0] = i;
1775
1776         return skb->len;
1777 }
1778
1779 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1780         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1781         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1782         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1783 };
1784
1785 static const struct genl_ops dp_datapath_genl_ops[] = {
1786         { .cmd = OVS_DP_CMD_NEW,
1787           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1788           .policy = datapath_policy,
1789           .doit = ovs_dp_cmd_new
1790         },
1791         { .cmd = OVS_DP_CMD_DEL,
1792           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1793           .policy = datapath_policy,
1794           .doit = ovs_dp_cmd_del
1795         },
1796         { .cmd = OVS_DP_CMD_GET,
1797           .flags = 0,               /* OK for unprivileged users. */
1798           .policy = datapath_policy,
1799           .doit = ovs_dp_cmd_get,
1800           .dumpit = ovs_dp_cmd_dump
1801         },
1802         { .cmd = OVS_DP_CMD_SET,
1803           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1804           .policy = datapath_policy,
1805           .doit = ovs_dp_cmd_set,
1806         },
1807 };
1808
1809 static struct genl_family dp_datapath_genl_family = {
1810         .id = GENL_ID_GENERATE,
1811         .hdrsize = sizeof(struct ovs_header),
1812         .name = OVS_DATAPATH_FAMILY,
1813         .version = OVS_DATAPATH_VERSION,
1814         .maxattr = OVS_DP_ATTR_MAX,
1815         .netnsok = true,
1816         .parallel_ops = true,
1817         .ops = dp_datapath_genl_ops,
1818         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1819         .mcgrps = &ovs_dp_datapath_multicast_group,
1820         .n_mcgrps = 1,
1821 };
1822
1823 /* Called with ovs_mutex or RCU read lock. */
1824 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1825                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1826 {
1827         struct ovs_header *ovs_header;
1828         struct ovs_vport_stats vport_stats;
1829         int err;
1830
1831         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1832                                  flags, cmd);
1833         if (!ovs_header)
1834                 return -EMSGSIZE;
1835
1836         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1837
1838         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1839             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1840             nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1841                            ovs_vport_name(vport)))
1842                 goto nla_put_failure;
1843
1844         ovs_vport_get_stats(vport, &vport_stats);
1845         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1846                     &vport_stats))
1847                 goto nla_put_failure;
1848
1849         if (ovs_vport_get_upcall_portids(vport, skb))
1850                 goto nla_put_failure;
1851
1852         err = ovs_vport_get_options(vport, skb);
1853         if (err == -EMSGSIZE)
1854                 goto error;
1855
1856         genlmsg_end(skb, ovs_header);
1857         return 0;
1858
1859 nla_put_failure:
1860         err = -EMSGSIZE;
1861 error:
1862         genlmsg_cancel(skb, ovs_header);
1863         return err;
1864 }
1865
1866 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1867 {
1868         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1869 }
1870
1871 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1872 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1873                                          u32 seq, u8 cmd)
1874 {
1875         struct sk_buff *skb;
1876         int retval;
1877
1878         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1879         if (!skb)
1880                 return ERR_PTR(-ENOMEM);
1881
1882         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1883         BUG_ON(retval < 0);
1884
1885         return skb;
1886 }
1887
1888 /* Called with ovs_mutex or RCU read lock. */
1889 static struct vport *lookup_vport(struct net *net,
1890                                   const struct ovs_header *ovs_header,
1891                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1892 {
1893         struct datapath *dp;
1894         struct vport *vport;
1895
1896         if (a[OVS_VPORT_ATTR_NAME]) {
1897                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1898                 if (!vport)
1899                         return ERR_PTR(-ENODEV);
1900                 if (ovs_header->dp_ifindex &&
1901                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1902                         return ERR_PTR(-ENODEV);
1903                 return vport;
1904         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1905                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1906
1907                 if (port_no >= DP_MAX_PORTS)
1908                         return ERR_PTR(-EFBIG);
1909
1910                 dp = get_dp(net, ovs_header->dp_ifindex);
1911                 if (!dp)
1912                         return ERR_PTR(-ENODEV);
1913
1914                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1915                 if (!vport)
1916                         return ERR_PTR(-ENODEV);
1917                 return vport;
1918         } else
1919                 return ERR_PTR(-EINVAL);
1920 }
1921
1922 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1923 {
1924         struct nlattr **a = info->attrs;
1925         struct ovs_header *ovs_header = info->userhdr;
1926         struct vport_parms parms;
1927         struct sk_buff *reply;
1928         struct vport *vport;
1929         struct datapath *dp;
1930         u32 port_no;
1931         int err;
1932
1933         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1934             !a[OVS_VPORT_ATTR_UPCALL_PID])
1935                 return -EINVAL;
1936
1937         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1938                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1939         if (port_no >= DP_MAX_PORTS)
1940                 return -EFBIG;
1941
1942         reply = ovs_vport_cmd_alloc_info();
1943         if (!reply)
1944                 return -ENOMEM;
1945
1946         ovs_lock();
1947 restart:
1948         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1949         err = -ENODEV;
1950         if (!dp)
1951                 goto exit_unlock_free;
1952
1953         if (port_no) {
1954                 vport = ovs_vport_ovsl(dp, port_no);
1955                 err = -EBUSY;
1956                 if (vport)
1957                         goto exit_unlock_free;
1958         } else {
1959                 for (port_no = 1; ; port_no++) {
1960                         if (port_no >= DP_MAX_PORTS) {
1961                                 err = -EFBIG;
1962                                 goto exit_unlock_free;
1963                         }
1964                         vport = ovs_vport_ovsl(dp, port_no);
1965                         if (!vport)
1966                                 break;
1967                 }
1968         }
1969
1970         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1971         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1972         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1973         parms.dp = dp;
1974         parms.port_no = port_no;
1975         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1976
1977         vport = new_vport(&parms);
1978         err = PTR_ERR(vport);
1979         if (IS_ERR(vport)) {
1980                 if (err == -EAGAIN)
1981                         goto restart;
1982                 goto exit_unlock_free;
1983         }
1984
1985         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1986                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1987         BUG_ON(err < 0);
1988         ovs_unlock();
1989
1990         ovs_notify(&dp_vport_genl_family, reply, info);
1991         return 0;
1992
1993 exit_unlock_free:
1994         ovs_unlock();
1995         kfree_skb(reply);
1996         return err;
1997 }
1998
1999 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2000 {
2001         struct nlattr **a = info->attrs;
2002         struct sk_buff *reply;
2003         struct vport *vport;
2004         int err;
2005
2006         reply = ovs_vport_cmd_alloc_info();
2007         if (!reply)
2008                 return -ENOMEM;
2009
2010         ovs_lock();
2011         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2012         err = PTR_ERR(vport);
2013         if (IS_ERR(vport))
2014                 goto exit_unlock_free;
2015
2016         if (a[OVS_VPORT_ATTR_TYPE] &&
2017             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2018                 err = -EINVAL;
2019                 goto exit_unlock_free;
2020         }
2021
2022         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2023                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2024                 if (err)
2025                         goto exit_unlock_free;
2026         }
2027
2028
2029         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2030                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2031
2032                 err = ovs_vport_set_upcall_portids(vport, ids);
2033                 if (err)
2034                         goto exit_unlock_free;
2035         }
2036
2037         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2038                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2039         BUG_ON(err < 0);
2040
2041         ovs_unlock();
2042         ovs_notify(&dp_vport_genl_family, reply, info);
2043         return 0;
2044
2045 exit_unlock_free:
2046         ovs_unlock();
2047         kfree_skb(reply);
2048         return err;
2049 }
2050
2051 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2052 {
2053         struct nlattr **a = info->attrs;
2054         struct sk_buff *reply;
2055         struct vport *vport;
2056         int err;
2057
2058         reply = ovs_vport_cmd_alloc_info();
2059         if (!reply)
2060                 return -ENOMEM;
2061
2062         ovs_lock();
2063         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2064         err = PTR_ERR(vport);
2065         if (IS_ERR(vport))
2066                 goto exit_unlock_free;
2067
2068         if (vport->port_no == OVSP_LOCAL) {
2069                 err = -EINVAL;
2070                 goto exit_unlock_free;
2071         }
2072
2073         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2074                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2075         BUG_ON(err < 0);
2076         ovs_dp_detach_port(vport);
2077         ovs_unlock();
2078
2079         ovs_notify(&dp_vport_genl_family, reply, info);
2080         return 0;
2081
2082 exit_unlock_free:
2083         ovs_unlock();
2084         kfree_skb(reply);
2085         return err;
2086 }
2087
2088 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2089 {
2090         struct nlattr **a = info->attrs;
2091         struct ovs_header *ovs_header = info->userhdr;
2092         struct sk_buff *reply;
2093         struct vport *vport;
2094         int err;
2095
2096         reply = ovs_vport_cmd_alloc_info();
2097         if (!reply)
2098                 return -ENOMEM;
2099
2100         rcu_read_lock();
2101         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2102         err = PTR_ERR(vport);
2103         if (IS_ERR(vport))
2104                 goto exit_unlock_free;
2105         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2106                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2107         BUG_ON(err < 0);
2108         rcu_read_unlock();
2109
2110         return genlmsg_reply(reply, info);
2111
2112 exit_unlock_free:
2113         rcu_read_unlock();
2114         kfree_skb(reply);
2115         return err;
2116 }
2117
2118 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2119 {
2120         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2121         struct datapath *dp;
2122         int bucket = cb->args[0], skip = cb->args[1];
2123         int i, j = 0;
2124
2125         rcu_read_lock();
2126         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2127         if (!dp) {
2128                 rcu_read_unlock();
2129                 return -ENODEV;
2130         }
2131         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2132                 struct vport *vport;
2133
2134                 j = 0;
2135                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2136                         if (j >= skip &&
2137                             ovs_vport_cmd_fill_info(vport, skb,
2138                                                     NETLINK_CB(cb->skb).portid,
2139                                                     cb->nlh->nlmsg_seq,
2140                                                     NLM_F_MULTI,
2141                                                     OVS_VPORT_CMD_NEW) < 0)
2142                                 goto out;
2143
2144                         j++;
2145                 }
2146                 skip = 0;
2147         }
2148 out:
2149         rcu_read_unlock();
2150
2151         cb->args[0] = i;
2152         cb->args[1] = j;
2153
2154         return skb->len;
2155 }
2156
2157 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2158         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2159         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2160         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2161         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2162         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
2163         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2164 };
2165
2166 static const struct genl_ops dp_vport_genl_ops[] = {
2167         { .cmd = OVS_VPORT_CMD_NEW,
2168           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2169           .policy = vport_policy,
2170           .doit = ovs_vport_cmd_new
2171         },
2172         { .cmd = OVS_VPORT_CMD_DEL,
2173           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2174           .policy = vport_policy,
2175           .doit = ovs_vport_cmd_del
2176         },
2177         { .cmd = OVS_VPORT_CMD_GET,
2178           .flags = 0,               /* OK for unprivileged users. */
2179           .policy = vport_policy,
2180           .doit = ovs_vport_cmd_get,
2181           .dumpit = ovs_vport_cmd_dump
2182         },
2183         { .cmd = OVS_VPORT_CMD_SET,
2184           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2185           .policy = vport_policy,
2186           .doit = ovs_vport_cmd_set,
2187         },
2188 };
2189
2190 struct genl_family dp_vport_genl_family = {
2191         .id = GENL_ID_GENERATE,
2192         .hdrsize = sizeof(struct ovs_header),
2193         .name = OVS_VPORT_FAMILY,
2194         .version = OVS_VPORT_VERSION,
2195         .maxattr = OVS_VPORT_ATTR_MAX,
2196         .netnsok = true,
2197         .parallel_ops = true,
2198         .ops = dp_vport_genl_ops,
2199         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2200         .mcgrps = &ovs_dp_vport_multicast_group,
2201         .n_mcgrps = 1,
2202 };
2203
2204 static struct genl_family * const dp_genl_families[] = {
2205         &dp_datapath_genl_family,
2206         &dp_vport_genl_family,
2207         &dp_flow_genl_family,
2208         &dp_packet_genl_family,
2209 };
2210
2211 static void dp_unregister_genl(int n_families)
2212 {
2213         int i;
2214
2215         for (i = 0; i < n_families; i++)
2216                 genl_unregister_family(dp_genl_families[i]);
2217 }
2218
2219 static int dp_register_genl(void)
2220 {
2221         int err;
2222         int i;
2223
2224         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2225
2226                 err = genl_register_family(dp_genl_families[i]);
2227                 if (err)
2228                         goto error;
2229         }
2230
2231         return 0;
2232
2233 error:
2234         dp_unregister_genl(i);
2235         return err;
2236 }
2237
2238 static int __net_init ovs_init_net(struct net *net)
2239 {
2240         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2241
2242         INIT_LIST_HEAD(&ovs_net->dps);
2243         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2244         ovs_ct_init(net);
2245         return 0;
2246 }
2247
2248 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2249                                             struct list_head *head)
2250 {
2251         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2252         struct datapath *dp;
2253
2254         list_for_each_entry(dp, &ovs_net->dps, list_node) {
2255                 int i;
2256
2257                 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2258                         struct vport *vport;
2259
2260                         hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2261                                 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2262                                         continue;
2263
2264                                 if (dev_net(vport->dev) == dnet)
2265                                         list_add(&vport->detach_list, head);
2266                         }
2267                 }
2268         }
2269 }
2270
2271 static void __net_exit ovs_exit_net(struct net *dnet)
2272 {
2273         struct datapath *dp, *dp_next;
2274         struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2275         struct vport *vport, *vport_next;
2276         struct net *net;
2277         LIST_HEAD(head);
2278
2279         ovs_ct_exit(dnet);
2280         ovs_lock();
2281         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2282                 __dp_destroy(dp);
2283
2284         rtnl_lock();
2285         for_each_net(net)
2286                 list_vports_from_net(net, dnet, &head);
2287         rtnl_unlock();
2288
2289         /* Detach all vports from given namespace. */
2290         list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2291                 list_del(&vport->detach_list);
2292                 ovs_dp_detach_port(vport);
2293         }
2294
2295         ovs_unlock();
2296
2297         cancel_work_sync(&ovs_net->dp_notify_work);
2298 }
2299
2300 static struct pernet_operations ovs_net_ops = {
2301         .init = ovs_init_net,
2302         .exit = ovs_exit_net,
2303         .id   = &ovs_net_id,
2304         .size = sizeof(struct ovs_net),
2305 };
2306
2307 static int __init dp_init(void)
2308 {
2309         int err;
2310
2311         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2312
2313         pr_info("Open vSwitch switching datapath\n");
2314
2315         err = action_fifos_init();
2316         if (err)
2317                 goto error;
2318
2319         err = ovs_internal_dev_rtnl_link_register();
2320         if (err)
2321                 goto error_action_fifos_exit;
2322
2323         err = ovs_flow_init();
2324         if (err)
2325                 goto error_unreg_rtnl_link;
2326
2327         err = ovs_vport_init();
2328         if (err)
2329                 goto error_flow_exit;
2330
2331         err = register_pernet_device(&ovs_net_ops);
2332         if (err)
2333                 goto error_vport_exit;
2334
2335         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2336         if (err)
2337                 goto error_netns_exit;
2338
2339         err = ovs_netdev_init();
2340         if (err)
2341                 goto error_unreg_notifier;
2342
2343         err = dp_register_genl();
2344         if (err < 0)
2345                 goto error_unreg_netdev;
2346
2347         return 0;
2348
2349 error_unreg_netdev:
2350         ovs_netdev_exit();
2351 error_unreg_notifier:
2352         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2353 error_netns_exit:
2354         unregister_pernet_device(&ovs_net_ops);
2355 error_vport_exit:
2356         ovs_vport_exit();
2357 error_flow_exit:
2358         ovs_flow_exit();
2359 error_unreg_rtnl_link:
2360         ovs_internal_dev_rtnl_link_unregister();
2361 error_action_fifos_exit:
2362         action_fifos_exit();
2363 error:
2364         return err;
2365 }
2366
2367 static void dp_cleanup(void)
2368 {
2369         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2370         ovs_netdev_exit();
2371         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2372         unregister_pernet_device(&ovs_net_ops);
2373         rcu_barrier();
2374         ovs_vport_exit();
2375         ovs_flow_exit();
2376         ovs_internal_dev_rtnl_link_unregister();
2377         action_fifos_exit();
2378 }
2379
2380 module_init(dp_init);
2381 module_exit(dp_cleanup);
2382
2383 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2384 MODULE_LICENSE("GPL");