OSDN Git Service

Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[uclinux-h8/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc_tun.c
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
3
4 #include <net/vxlan.h>
5 #include <net/gre.h>
6 #include "lib/vxlan.h"
7 #include "en/tc_tun.h"
8
9 static int get_route_and_out_devs(struct mlx5e_priv *priv,
10                                   struct net_device *dev,
11                                   struct net_device **route_dev,
12                                   struct net_device **out_dev)
13 {
14         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
15         struct net_device *uplink_dev, *uplink_upper;
16         bool dst_is_lag_dev;
17
18         uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
19         uplink_upper = netdev_master_upper_dev_get(uplink_dev);
20         dst_is_lag_dev = (uplink_upper &&
21                           netif_is_lag_master(uplink_upper) &&
22                           dev == uplink_upper &&
23                           mlx5_lag_is_sriov(priv->mdev));
24
25         /* if the egress device isn't on the same HW e-switch or
26          * it's a LAG device, use the uplink
27          */
28         if (!switchdev_port_same_parent_id(priv->netdev, dev) ||
29             dst_is_lag_dev) {
30                 *route_dev = uplink_dev;
31                 *out_dev = *route_dev;
32         } else {
33                 *route_dev = dev;
34                 if (is_vlan_dev(*route_dev))
35                         *out_dev = uplink_dev;
36                 else if (mlx5e_eswitch_rep(dev))
37                         *out_dev = *route_dev;
38                 else
39                         return -EOPNOTSUPP;
40         }
41
42         return 0;
43 }
44
45 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
46                                    struct net_device *mirred_dev,
47                                    struct net_device **out_dev,
48                                    struct net_device **route_dev,
49                                    struct flowi4 *fl4,
50                                    struct neighbour **out_n,
51                                    u8 *out_ttl)
52 {
53         struct rtable *rt;
54         struct neighbour *n = NULL;
55
56 #if IS_ENABLED(CONFIG_INET)
57         int ret;
58
59         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
60         ret = PTR_ERR_OR_ZERO(rt);
61         if (ret)
62                 return ret;
63 #else
64         return -EOPNOTSUPP;
65 #endif
66
67         ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
68         if (ret < 0)
69                 return ret;
70
71         if (!(*out_ttl))
72                 *out_ttl = ip4_dst_hoplimit(&rt->dst);
73         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
74         ip_rt_put(rt);
75         if (!n)
76                 return -ENOMEM;
77
78         *out_n = n;
79         return 0;
80 }
81
82 static const char *mlx5e_netdev_kind(struct net_device *dev)
83 {
84         if (dev->rtnl_link_ops)
85                 return dev->rtnl_link_ops->kind;
86         else
87                 return "";
88 }
89
90 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
91                                    struct net_device *mirred_dev,
92                                    struct net_device **out_dev,
93                                    struct net_device **route_dev,
94                                    struct flowi6 *fl6,
95                                    struct neighbour **out_n,
96                                    u8 *out_ttl)
97 {
98         struct neighbour *n = NULL;
99         struct dst_entry *dst;
100
101 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
102         int ret;
103
104         ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
105                                          fl6);
106         if (ret < 0)
107                 return ret;
108
109         if (!(*out_ttl))
110                 *out_ttl = ip6_dst_hoplimit(dst);
111
112         ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
113         if (ret < 0)
114                 return ret;
115 #else
116         return -EOPNOTSUPP;
117 #endif
118
119         n = dst_neigh_lookup(dst, &fl6->daddr);
120         dst_release(dst);
121         if (!n)
122                 return -ENOMEM;
123
124         *out_n = n;
125         return 0;
126 }
127
128 static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
129 {
130         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
131         struct udphdr *udp = (struct udphdr *)(buf);
132         struct vxlanhdr *vxh = (struct vxlanhdr *)
133                                ((char *)udp + sizeof(struct udphdr));
134
135         udp->dest = tun_key->tp_dst;
136         vxh->vx_flags = VXLAN_HF_VNI;
137         vxh->vx_vni = vxlan_vni_field(tun_id);
138
139         return 0;
140 }
141
142 static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
143 {
144         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
145         int hdr_len;
146         struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
147
148         /* the HW does not calculate GRE csum or sequences */
149         if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
150                 return -EOPNOTSUPP;
151
152         greh->protocol = htons(ETH_P_TEB);
153
154         /* GRE key */
155         hdr_len = gre_calc_hlen(tun_key->tun_flags);
156         greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
157         if (tun_key->tun_flags & TUNNEL_KEY) {
158                 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
159
160                 *ptr = tun_id;
161         }
162
163         return 0;
164 }
165
166 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
167                                       struct mlx5e_encap_entry *e)
168 {
169         int err = 0;
170         struct ip_tunnel_key *key = &e->tun_info.key;
171
172         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
173                 *ip_proto = IPPROTO_UDP;
174                 err = mlx5e_gen_vxlan_header(buf, key);
175         } else if  (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
176                 *ip_proto = IPPROTO_GRE;
177                 err = mlx5e_gen_gre_header(buf, key);
178         } else {
179                 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
180                         , e->tunnel_type);
181                 err = -EOPNOTSUPP;
182         }
183
184         return err;
185 }
186
187 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
188                              struct mlx5e_encap_entry *e,
189                              u16 proto)
190 {
191         struct ethhdr *eth = (struct ethhdr *)buf;
192         char *ip;
193
194         ether_addr_copy(eth->h_dest, e->h_dest);
195         ether_addr_copy(eth->h_source, dev->dev_addr);
196         if (is_vlan_dev(dev)) {
197                 struct vlan_hdr *vlan = (struct vlan_hdr *)
198                                         ((char *)eth + ETH_HLEN);
199                 ip = (char *)vlan + VLAN_HLEN;
200                 eth->h_proto = vlan_dev_vlan_proto(dev);
201                 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
202                 vlan->h_vlan_encapsulated_proto = htons(proto);
203         } else {
204                 eth->h_proto = htons(proto);
205                 ip = (char *)eth + ETH_HLEN;
206         }
207
208         return ip;
209 }
210
211 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
212                                     struct net_device *mirred_dev,
213                                     struct mlx5e_encap_entry *e)
214 {
215         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
216         struct ip_tunnel_key *tun_key = &e->tun_info.key;
217         struct net_device *out_dev, *route_dev;
218         struct neighbour *n = NULL;
219         struct flowi4 fl4 = {};
220         int ipv4_encap_size;
221         char *encap_header;
222         u8 nud_state, ttl;
223         struct iphdr *ip;
224         int err;
225
226         /* add the IP fields */
227         fl4.flowi4_tos = tun_key->tos;
228         fl4.daddr = tun_key->u.ipv4.dst;
229         fl4.saddr = tun_key->u.ipv4.src;
230         ttl = tun_key->ttl;
231
232         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
233                                       &fl4, &n, &ttl);
234         if (err)
235                 return err;
236
237         ipv4_encap_size =
238                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
239                 sizeof(struct iphdr) +
240                 e->tunnel_hlen;
241
242         if (max_encap_size < ipv4_encap_size) {
243                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
244                                ipv4_encap_size, max_encap_size);
245                 return -EOPNOTSUPP;
246         }
247
248         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
249         if (!encap_header)
250                 return -ENOMEM;
251
252         /* used by mlx5e_detach_encap to lookup a neigh hash table
253          * entry in the neigh hash table when a user deletes a rule
254          */
255         e->m_neigh.dev = n->dev;
256         e->m_neigh.family = n->ops->family;
257         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
258         e->out_dev = out_dev;
259         e->route_dev = route_dev;
260
261         /* It's important to add the neigh to the hash table before checking
262          * the neigh validity state. So if we'll get a notification, in case the
263          * neigh changes it's validity state, we would find the relevant neigh
264          * in the hash.
265          */
266         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
267         if (err)
268                 goto free_encap;
269
270         read_lock_bh(&n->lock);
271         nud_state = n->nud_state;
272         ether_addr_copy(e->h_dest, n->ha);
273         read_unlock_bh(&n->lock);
274
275         /* add ethernet header */
276         ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
277                                              ETH_P_IP);
278
279         /* add ip header */
280         ip->tos = tun_key->tos;
281         ip->version = 0x4;
282         ip->ihl = 0x5;
283         ip->ttl = ttl;
284         ip->daddr = fl4.daddr;
285         ip->saddr = fl4.saddr;
286
287         /* add tunneling protocol header */
288         err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
289                                          &ip->protocol, e);
290         if (err)
291                 goto destroy_neigh_entry;
292
293         e->encap_size = ipv4_encap_size;
294         e->encap_header = encap_header;
295
296         if (!(nud_state & NUD_VALID)) {
297                 neigh_event_send(n, NULL);
298                 err = -EAGAIN;
299                 goto out;
300         }
301
302         err = mlx5_packet_reformat_alloc(priv->mdev,
303                                          e->reformat_type,
304                                          ipv4_encap_size, encap_header,
305                                          MLX5_FLOW_NAMESPACE_FDB,
306                                          &e->encap_id);
307         if (err)
308                 goto destroy_neigh_entry;
309
310         e->flags |= MLX5_ENCAP_ENTRY_VALID;
311         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
312         neigh_release(n);
313         return err;
314
315 destroy_neigh_entry:
316         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
317 free_encap:
318         kfree(encap_header);
319 out:
320         if (n)
321                 neigh_release(n);
322         return err;
323 }
324
325 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
326                                     struct net_device *mirred_dev,
327                                     struct mlx5e_encap_entry *e)
328 {
329         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
330         struct ip_tunnel_key *tun_key = &e->tun_info.key;
331         struct net_device *out_dev, *route_dev;
332         struct neighbour *n = NULL;
333         struct flowi6 fl6 = {};
334         struct ipv6hdr *ip6h;
335         int ipv6_encap_size;
336         char *encap_header;
337         u8 nud_state, ttl;
338         int err;
339
340         ttl = tun_key->ttl;
341
342         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
343         fl6.daddr = tun_key->u.ipv6.dst;
344         fl6.saddr = tun_key->u.ipv6.src;
345
346         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
347                                       &fl6, &n, &ttl);
348         if (err)
349                 return err;
350
351         ipv6_encap_size =
352                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
353                 sizeof(struct ipv6hdr) +
354                 e->tunnel_hlen;
355
356         if (max_encap_size < ipv6_encap_size) {
357                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
358                                ipv6_encap_size, max_encap_size);
359                 return -EOPNOTSUPP;
360         }
361
362         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
363         if (!encap_header)
364                 return -ENOMEM;
365
366         /* used by mlx5e_detach_encap to lookup a neigh hash table
367          * entry in the neigh hash table when a user deletes a rule
368          */
369         e->m_neigh.dev = n->dev;
370         e->m_neigh.family = n->ops->family;
371         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
372         e->out_dev = out_dev;
373         e->route_dev = route_dev;
374
375         /* It's importent to add the neigh to the hash table before checking
376          * the neigh validity state. So if we'll get a notification, in case the
377          * neigh changes it's validity state, we would find the relevant neigh
378          * in the hash.
379          */
380         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
381         if (err)
382                 goto free_encap;
383
384         read_lock_bh(&n->lock);
385         nud_state = n->nud_state;
386         ether_addr_copy(e->h_dest, n->ha);
387         read_unlock_bh(&n->lock);
388
389         /* add ethernet header */
390         ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
391                                                  ETH_P_IPV6);
392
393         /* add ip header */
394         ip6_flow_hdr(ip6h, tun_key->tos, 0);
395         /* the HW fills up ipv6 payload len */
396         ip6h->hop_limit   = ttl;
397         ip6h->daddr       = fl6.daddr;
398         ip6h->saddr       = fl6.saddr;
399
400         /* add tunneling protocol header */
401         err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
402                                          &ip6h->nexthdr, e);
403         if (err)
404                 goto destroy_neigh_entry;
405
406         e->encap_size = ipv6_encap_size;
407         e->encap_header = encap_header;
408
409         if (!(nud_state & NUD_VALID)) {
410                 neigh_event_send(n, NULL);
411                 err = -EAGAIN;
412                 goto out;
413         }
414
415         err = mlx5_packet_reformat_alloc(priv->mdev,
416                                          e->reformat_type,
417                                          ipv6_encap_size, encap_header,
418                                          MLX5_FLOW_NAMESPACE_FDB,
419                                          &e->encap_id);
420         if (err)
421                 goto destroy_neigh_entry;
422
423         e->flags |= MLX5_ENCAP_ENTRY_VALID;
424         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
425         neigh_release(n);
426         return err;
427
428 destroy_neigh_entry:
429         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
430 free_encap:
431         kfree(encap_header);
432 out:
433         if (n)
434                 neigh_release(n);
435         return err;
436 }
437
438 int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
439 {
440         if (netif_is_vxlan(tunnel_dev))
441                 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
442         else if (netif_is_gretap(tunnel_dev) ||
443                  netif_is_ip6gretap(tunnel_dev))
444                 return MLX5E_TC_TUNNEL_TYPE_GRETAP;
445         else
446                 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
447 }
448
449 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
450                                     struct net_device *netdev)
451 {
452         int tunnel_type = mlx5e_tc_tun_get_type(netdev);
453
454         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
455             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
456                 return true;
457         else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
458                  MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
459                 return true;
460         else
461                 return false;
462 }
463
464 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
465                                  struct mlx5e_priv *priv,
466                                  struct mlx5e_encap_entry *e,
467                                  struct netlink_ext_ack *extack)
468 {
469         e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
470
471         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
472                 int dst_port =  be16_to_cpu(e->tun_info.key.tp_dst);
473
474                 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
475                         NL_SET_ERR_MSG_MOD(extack,
476                                            "vxlan udp dport was not registered with the HW");
477                         netdev_warn(priv->netdev,
478                                     "%d isn't an offloaded vxlan udp dport\n",
479                                     dst_port);
480                         return -EOPNOTSUPP;
481                 }
482                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
483                 e->tunnel_hlen = VXLAN_HLEN;
484         } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
485                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
486                 e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
487         } else {
488                 e->reformat_type = -1;
489                 e->tunnel_hlen = -1;
490                 return -EOPNOTSUPP;
491         }
492         return 0;
493 }
494
495 static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
496                                     struct mlx5_flow_spec *spec,
497                                     struct tc_cls_flower_offload *f,
498                                     void *headers_c,
499                                     void *headers_v)
500 {
501         struct netlink_ext_ack *extack = f->common.extack;
502         struct flow_dissector_key_ports *key =
503                 skb_flow_dissector_target(f->dissector,
504                                           FLOW_DISSECTOR_KEY_ENC_PORTS,
505                                           f->key);
506         struct flow_dissector_key_ports *mask =
507                 skb_flow_dissector_target(f->dissector,
508                                           FLOW_DISSECTOR_KEY_ENC_PORTS,
509                                           f->mask);
510         void *misc_c = MLX5_ADDR_OF(fte_match_param,
511                                     spec->match_criteria,
512                                     misc_parameters);
513         void *misc_v = MLX5_ADDR_OF(fte_match_param,
514                                     spec->match_value,
515                                     misc_parameters);
516
517         /* Full udp dst port must be given */
518         if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
519             memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
520                 NL_SET_ERR_MSG_MOD(extack,
521                                    "VXLAN decap filter must include enc_dst_port condition");
522                 netdev_warn(priv->netdev,
523                             "VXLAN decap filter must include enc_dst_port condition\n");
524                 return -EOPNOTSUPP;
525         }
526
527         /* udp dst port must be knonwn as a VXLAN port */
528         if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
529                 NL_SET_ERR_MSG_MOD(extack,
530                                    "Matched UDP port is not registered as a VXLAN port");
531                 netdev_warn(priv->netdev,
532                             "UDP port %d is not registered as a VXLAN port\n",
533                             be16_to_cpu(key->dst));
534                 return -EOPNOTSUPP;
535         }
536
537         /* dst UDP port is valid here */
538         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
539         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
540
541         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
542         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
543
544         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
545         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
546
547         /* match on VNI */
548         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
549                 struct flow_dissector_key_keyid *key =
550                         skb_flow_dissector_target(f->dissector,
551                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
552                                                   f->key);
553                 struct flow_dissector_key_keyid *mask =
554                         skb_flow_dissector_target(f->dissector,
555                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
556                                                   f->mask);
557                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
558                          be32_to_cpu(mask->keyid));
559                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
560                          be32_to_cpu(key->keyid));
561         }
562         return 0;
563 }
564
565 static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
566                                      struct mlx5_flow_spec *spec,
567                                      struct tc_cls_flower_offload *f,
568                                      void *outer_headers_c,
569                                      void *outer_headers_v)
570 {
571         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
572                                     misc_parameters);
573         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
574                                     misc_parameters);
575
576         if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
577                 NL_SET_ERR_MSG_MOD(f->common.extack,
578                                    "GRE HW offloading is not supported");
579                 netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
580                 return -EOPNOTSUPP;
581         }
582
583         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
584         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
585                  ip_protocol, IPPROTO_GRE);
586
587         /* gre protocol*/
588         MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
589         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
590
591         /* gre key */
592         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
593                 struct flow_dissector_key_keyid *mask = NULL;
594                 struct flow_dissector_key_keyid *key = NULL;
595
596                 mask = skb_flow_dissector_target(f->dissector,
597                                                  FLOW_DISSECTOR_KEY_ENC_KEYID,
598                                                  f->mask);
599                 MLX5_SET(fte_match_set_misc, misc_c,
600                          gre_key.key, be32_to_cpu(mask->keyid));
601
602                 key = skb_flow_dissector_target(f->dissector,
603                                                 FLOW_DISSECTOR_KEY_ENC_KEYID,
604                                                 f->key);
605                 MLX5_SET(fte_match_set_misc, misc_v,
606                          gre_key.key, be32_to_cpu(key->keyid));
607         }
608
609         return 0;
610 }
611
612 int mlx5e_tc_tun_parse(struct net_device *filter_dev,
613                        struct mlx5e_priv *priv,
614                        struct mlx5_flow_spec *spec,
615                        struct tc_cls_flower_offload *f,
616                        void *headers_c,
617                        void *headers_v, u8 *match_level)
618 {
619         int tunnel_type;
620         int err = 0;
621
622         tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
623         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
624                 *match_level = MLX5_MATCH_L4;
625                 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
626                                                headers_c, headers_v);
627         } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
628                 *match_level = MLX5_MATCH_L3;
629                 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
630                                                 headers_c, headers_v);
631         } else {
632                 netdev_warn(priv->netdev,
633                             "decapsulation offload is not supported for %s net device (%d)\n",
634                             mlx5e_netdev_kind(filter_dev), tunnel_type);
635                 return -EOPNOTSUPP;
636         }
637         return err;
638 }