2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <linux/refcount.h>
42 #include <linux/completion.h>
43 #include <net/tc_act/tc_mirred.h>
44 #include <net/tc_act/tc_vlan.h>
45 #include <net/tc_act/tc_tunnel_key.h>
46 #include <net/tc_act/tc_pedit.h>
47 #include <net/tc_act/tc_csum.h>
49 #include <net/ipv6_stubs.h>
56 #include "en/tc_tun.h"
57 #include "lib/devcom.h"
58 #include "lib/geneve.h"
59 #include "diag/en_tc_tracepoint.h"
61 struct mlx5_nic_flow_attr {
64 struct mlx5_modify_hdr *modify_hdr;
67 struct mlx5_flow_table *hairpin_ft;
68 struct mlx5_fc *counter;
71 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
74 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
75 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
76 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
77 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
78 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
79 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
80 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
81 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
82 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
83 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
84 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
85 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
88 #define MLX5E_TC_MAX_SPLITS 1
90 /* Helper struct for accessing a struct containing list_head array.
99 * To access the containing struct from one of the list_head items:
100 * 1. Get the helper item from the list_head item using
102 * container_of(list_head item, helper struct type, list_head field)
103 * 2. Get the contining struct from the helper item and its index in the array:
104 * containing struct =
105 * container_of(helper item, containing struct type, helper field[index])
107 struct encap_flow_item {
108 struct mlx5e_encap_entry *e; /* attached encap instance */
109 struct list_head list;
113 struct mlx5e_tc_flow {
114 struct rhash_head node;
115 struct mlx5e_priv *priv;
118 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
119 /* Flow can be associated with multiple encap IDs.
120 * The number of encaps is bounded by the number of supported
123 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
124 struct mlx5e_tc_flow *peer_flow;
125 struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
126 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
127 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
128 struct list_head hairpin; /* flows sharing the same hairpin */
129 struct list_head peer; /* flows with peer flow */
130 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
132 struct list_head tmp_list; /* temporary flow list used by neigh update */
134 struct rcu_head rcu_head;
135 struct completion init_done;
137 struct mlx5_esw_flow_attr esw_attr[0];
138 struct mlx5_nic_flow_attr nic_attr[0];
142 struct mlx5e_tc_flow_parse_attr {
143 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
144 struct net_device *filter_dev;
145 struct mlx5_flow_spec spec;
146 int num_mod_hdr_actions;
147 int max_mod_hdr_actions;
148 void *mod_hdr_actions;
149 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
152 #define MLX5E_TC_TABLE_NUM_GROUPS 4
153 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
155 struct mlx5e_hairpin {
156 struct mlx5_hairpin *pair;
158 struct mlx5_core_dev *func_mdev;
159 struct mlx5e_priv *func_priv;
164 struct mlx5e_rqt indir_rqt;
165 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
166 struct mlx5e_ttc_table ttc;
169 struct mlx5e_hairpin_entry {
170 /* a node of a hash table which keeps all the hairpin entries */
171 struct hlist_node hairpin_hlist;
173 /* protects flows list */
174 spinlock_t flows_lock;
175 /* flows sharing the same hairpin */
176 struct list_head flows;
177 /* hpe's that were not fully initialized when dead peer update event
178 * function traversed them.
180 struct list_head dead_peer_wait_list;
184 struct mlx5e_hairpin *hp;
186 struct completion res_ready;
194 struct mlx5e_mod_hdr_entry {
195 /* a node of a hash table which keeps all the mod_hdr entries */
196 struct hlist_node mod_hdr_hlist;
198 /* protects flows list */
199 spinlock_t flows_lock;
200 /* flows sharing the same mod_hdr entry */
201 struct list_head flows;
203 struct mod_hdr_key key;
205 struct mlx5_modify_hdr *modify_hdr;
208 struct completion res_ready;
212 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
214 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
215 struct mlx5e_tc_flow *flow);
217 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
219 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
220 return ERR_PTR(-EINVAL);
224 static void mlx5e_flow_put(struct mlx5e_priv *priv,
225 struct mlx5e_tc_flow *flow)
227 if (refcount_dec_and_test(&flow->refcnt)) {
228 mlx5e_tc_del_flow(priv, flow);
229 kfree_rcu(flow, rcu_head);
233 static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
235 /* Complete all memory stores before setting bit. */
236 smp_mb__before_atomic();
237 set_bit(flag, &flow->flags);
240 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
242 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
245 /* test_and_set_bit() provides all necessary barriers */
246 return test_and_set_bit(flag, &flow->flags);
249 #define flow_flag_test_and_set(flow, flag) \
250 __flow_flag_test_and_set(flow, \
251 MLX5E_TC_FLOW_FLAG_##flag)
253 static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
255 /* Complete all memory stores before clearing bit. */
256 smp_mb__before_atomic();
257 clear_bit(flag, &flow->flags);
260 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
261 MLX5E_TC_FLOW_FLAG_##flag)
263 static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
265 bool ret = test_bit(flag, &flow->flags);
267 /* Read fields of flow structure only after checking flags. */
268 smp_mb__after_atomic();
272 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
273 MLX5E_TC_FLOW_FLAG_##flag)
275 static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
277 return flow_flag_test(flow, ESWITCH);
280 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
282 return flow_flag_test(flow, FT);
285 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
287 return flow_flag_test(flow, OFFLOADED);
290 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
292 return jhash(key->actions,
293 key->num_actions * MLX5_MH_ACT_SZ, 0);
296 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
297 struct mod_hdr_key *b)
299 if (a->num_actions != b->num_actions)
302 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
305 static struct mod_hdr_tbl *
306 get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
308 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
310 return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
311 &priv->fs.tc.mod_hdr;
314 static struct mlx5e_mod_hdr_entry *
315 mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
317 struct mlx5e_mod_hdr_entry *mh, *found = NULL;
319 hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
320 if (!cmp_mod_hdr_info(&mh->key, key)) {
321 refcount_inc(&mh->refcnt);
330 static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
331 struct mlx5e_mod_hdr_entry *mh,
334 struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
336 if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
338 hash_del(&mh->mod_hdr_hlist);
339 mutex_unlock(&tbl->lock);
341 WARN_ON(!list_empty(&mh->flows));
342 if (mh->compl_result > 0)
343 mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
348 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
350 return mlx5e_is_eswitch_flow(flow) ?
351 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
353 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
354 struct mlx5e_tc_flow *flow,
355 struct mlx5e_tc_flow_parse_attr *parse_attr)
357 int num_actions, actions_size, namespace, err;
358 struct mlx5e_mod_hdr_entry *mh;
359 struct mod_hdr_tbl *tbl;
360 struct mod_hdr_key key;
363 num_actions = parse_attr->num_mod_hdr_actions;
364 actions_size = MLX5_MH_ACT_SZ * num_actions;
366 key.actions = parse_attr->mod_hdr_actions;
367 key.num_actions = num_actions;
369 hash_key = hash_mod_hdr_info(&key);
371 namespace = get_flow_name_space(flow);
372 tbl = get_mod_hdr_table(priv, namespace);
374 mutex_lock(&tbl->lock);
375 mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
377 mutex_unlock(&tbl->lock);
378 wait_for_completion(&mh->res_ready);
380 if (mh->compl_result < 0) {
382 goto attach_header_err;
387 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
389 mutex_unlock(&tbl->lock);
393 mh->key.actions = (void *)mh + sizeof(*mh);
394 memcpy(mh->key.actions, key.actions, actions_size);
395 mh->key.num_actions = num_actions;
396 spin_lock_init(&mh->flows_lock);
397 INIT_LIST_HEAD(&mh->flows);
398 refcount_set(&mh->refcnt, 1);
399 init_completion(&mh->res_ready);
401 hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
402 mutex_unlock(&tbl->lock);
404 mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
407 if (IS_ERR(mh->modify_hdr)) {
408 err = PTR_ERR(mh->modify_hdr);
409 mh->compl_result = err;
410 goto alloc_header_err;
412 mh->compl_result = 1;
413 complete_all(&mh->res_ready);
417 spin_lock(&mh->flows_lock);
418 list_add(&flow->mod_hdr, &mh->flows);
419 spin_unlock(&mh->flows_lock);
420 if (mlx5e_is_eswitch_flow(flow))
421 flow->esw_attr->modify_hdr = mh->modify_hdr;
423 flow->nic_attr->modify_hdr = mh->modify_hdr;
428 complete_all(&mh->res_ready);
430 mlx5e_mod_hdr_put(priv, mh, namespace);
434 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
435 struct mlx5e_tc_flow *flow)
437 /* flow wasn't fully initialized */
441 spin_lock(&flow->mh->flows_lock);
442 list_del(&flow->mod_hdr);
443 spin_unlock(&flow->mh->flows_lock);
445 mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
450 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
452 struct net_device *netdev;
453 struct mlx5e_priv *priv;
455 netdev = __dev_get_by_index(net, ifindex);
456 priv = netdev_priv(netdev);
460 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
462 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
466 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
470 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
472 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
473 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
474 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
476 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
483 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
488 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
490 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
491 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
494 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
496 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
497 struct mlx5e_priv *priv = hp->func_priv;
498 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
500 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
503 for (i = 0; i < sz; i++) {
505 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
506 ix = mlx5e_bits_invert(i, ilog2(sz));
507 ix = indirection_rqt[ix];
508 rqn = hp->pair->rqn[ix];
509 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
513 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
515 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
516 struct mlx5e_priv *priv = hp->func_priv;
517 struct mlx5_core_dev *mdev = priv->mdev;
521 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
522 in = kvzalloc(inlen, GFP_KERNEL);
526 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
528 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
529 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
531 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
533 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
535 hp->indir_rqt.enabled = true;
541 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
543 struct mlx5e_priv *priv = hp->func_priv;
544 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
548 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
549 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
551 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
552 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
554 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
555 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
556 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
557 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
559 err = mlx5_core_create_tir(hp->func_mdev, in,
560 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
562 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
563 goto err_destroy_tirs;
569 for (i = 0; i < tt; i++)
570 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
574 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
578 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
579 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
582 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
583 struct ttc_params *ttc_params)
585 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
588 memset(ttc_params, 0, sizeof(*ttc_params));
590 ttc_params->any_tt_tirn = hp->tirn;
592 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
593 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
595 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
596 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
597 ft_attr->prio = MLX5E_TC_PRIO;
600 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
602 struct mlx5e_priv *priv = hp->func_priv;
603 struct ttc_params ttc_params;
606 err = mlx5e_hairpin_create_indirect_rqt(hp);
610 err = mlx5e_hairpin_create_indirect_tirs(hp);
612 goto err_create_indirect_tirs;
614 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
615 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
617 goto err_create_ttc_table;
619 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
620 hp->num_channels, hp->ttc.ft.t->id);
624 err_create_ttc_table:
625 mlx5e_hairpin_destroy_indirect_tirs(hp);
626 err_create_indirect_tirs:
627 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
632 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
634 struct mlx5e_priv *priv = hp->func_priv;
636 mlx5e_destroy_ttc_table(priv, &hp->ttc);
637 mlx5e_hairpin_destroy_indirect_tirs(hp);
638 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
641 static struct mlx5e_hairpin *
642 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
645 struct mlx5_core_dev *func_mdev, *peer_mdev;
646 struct mlx5e_hairpin *hp;
647 struct mlx5_hairpin *pair;
650 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
652 return ERR_PTR(-ENOMEM);
654 func_mdev = priv->mdev;
655 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
657 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
660 goto create_pair_err;
663 hp->func_mdev = func_mdev;
664 hp->func_priv = priv;
665 hp->num_channels = params->num_channels;
667 err = mlx5e_hairpin_create_transport(hp);
669 goto create_transport_err;
671 if (hp->num_channels > 1) {
672 err = mlx5e_hairpin_rss_init(hp);
680 mlx5e_hairpin_destroy_transport(hp);
681 create_transport_err:
682 mlx5_core_hairpin_destroy(hp->pair);
688 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
690 if (hp->num_channels > 1)
691 mlx5e_hairpin_rss_cleanup(hp);
692 mlx5e_hairpin_destroy_transport(hp);
693 mlx5_core_hairpin_destroy(hp->pair);
697 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
699 return (peer_vhca_id << 16 | prio);
702 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
703 u16 peer_vhca_id, u8 prio)
705 struct mlx5e_hairpin_entry *hpe;
706 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
708 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
709 hairpin_hlist, hash_key) {
710 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
711 refcount_inc(&hpe->refcnt);
719 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
720 struct mlx5e_hairpin_entry *hpe)
722 /* no more hairpin flows for us, release the hairpin pair */
723 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
725 hash_del(&hpe->hairpin_hlist);
726 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
728 if (!IS_ERR_OR_NULL(hpe->hp)) {
729 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
730 dev_name(hpe->hp->pair->peer_mdev->device));
732 mlx5e_hairpin_destroy(hpe->hp);
735 WARN_ON(!list_empty(&hpe->flows));
739 #define UNKNOWN_MATCH_PRIO 8
741 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
742 struct mlx5_flow_spec *spec, u8 *match_prio,
743 struct netlink_ext_ack *extack)
745 void *headers_c, *headers_v;
746 u8 prio_val, prio_mask = 0;
749 #ifdef CONFIG_MLX5_CORE_EN_DCB
750 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
751 NL_SET_ERR_MSG_MOD(extack,
752 "only PCP trust state supported for hairpin");
756 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
757 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
759 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
761 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
762 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
765 if (!vlan_present || !prio_mask) {
766 prio_val = UNKNOWN_MATCH_PRIO;
767 } else if (prio_mask != 0x7) {
768 NL_SET_ERR_MSG_MOD(extack,
769 "masked priority match not supported for hairpin");
773 *match_prio = prio_val;
777 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
778 struct mlx5e_tc_flow *flow,
779 struct mlx5e_tc_flow_parse_attr *parse_attr,
780 struct netlink_ext_ack *extack)
782 int peer_ifindex = parse_attr->mirred_ifindex[0];
783 struct mlx5_hairpin_params params;
784 struct mlx5_core_dev *peer_mdev;
785 struct mlx5e_hairpin_entry *hpe;
786 struct mlx5e_hairpin *hp;
793 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
794 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
795 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
799 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
800 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
805 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
806 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
808 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
809 wait_for_completion(&hpe->res_ready);
811 if (IS_ERR(hpe->hp)) {
818 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
820 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
824 spin_lock_init(&hpe->flows_lock);
825 INIT_LIST_HEAD(&hpe->flows);
826 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
827 hpe->peer_vhca_id = peer_id;
828 hpe->prio = match_prio;
829 refcount_set(&hpe->refcnt, 1);
830 init_completion(&hpe->res_ready);
832 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
833 hash_hairpin_info(peer_id, match_prio));
834 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
836 params.log_data_size = 15;
837 params.log_data_size = min_t(u8, params.log_data_size,
838 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
839 params.log_data_size = max_t(u8, params.log_data_size,
840 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
842 params.log_num_packets = params.log_data_size -
843 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
844 params.log_num_packets = min_t(u8, params.log_num_packets,
845 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
847 params.q_counter = priv->q_counter;
848 /* set hairpin pair per each 50Gbs share of the link */
849 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
850 link_speed = max_t(u32, link_speed, 50000);
851 link_speed64 = link_speed;
852 do_div(link_speed64, 50000);
853 params.num_channels = link_speed64;
855 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
857 complete_all(&hpe->res_ready);
863 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
864 hp->tirn, hp->pair->rqn[0],
865 dev_name(hp->pair->peer_mdev->device),
866 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
869 if (hpe->hp->num_channels > 1) {
870 flow_flag_set(flow, HAIRPIN_RSS);
871 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
873 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
877 spin_lock(&hpe->flows_lock);
878 list_add(&flow->hairpin, &hpe->flows);
879 spin_unlock(&hpe->flows_lock);
884 mlx5e_hairpin_put(priv, hpe);
888 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
889 struct mlx5e_tc_flow *flow)
891 /* flow wasn't fully initialized */
895 spin_lock(&flow->hpe->flows_lock);
896 list_del(&flow->hairpin);
897 spin_unlock(&flow->hpe->flows_lock);
899 mlx5e_hairpin_put(priv, flow->hpe);
904 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
905 struct mlx5e_tc_flow_parse_attr *parse_attr,
906 struct mlx5e_tc_flow *flow,
907 struct netlink_ext_ack *extack)
909 struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
910 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
911 struct mlx5_core_dev *dev = priv->mdev;
912 struct mlx5_flow_destination dest[2] = {};
913 struct mlx5_flow_act flow_act = {
914 .action = attr->action,
915 .flags = FLOW_ACT_NO_APPEND,
917 struct mlx5_fc *counter = NULL;
918 int err, dest_ix = 0;
920 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
921 flow_context->flow_tag = attr->flow_tag;
923 if (flow_flag_test(flow, HAIRPIN)) {
924 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
928 if (flow_flag_test(flow, HAIRPIN_RSS)) {
929 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
930 dest[dest_ix].ft = attr->hairpin_ft;
932 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
933 dest[dest_ix].tir_num = attr->hairpin_tirn;
936 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
937 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
938 dest[dest_ix].ft = priv->fs.vlan.ft.t;
942 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
943 counter = mlx5_fc_create(dev, true);
945 return PTR_ERR(counter);
947 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
948 dest[dest_ix].counter_id = mlx5_fc_id(counter);
950 attr->counter = counter;
953 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
954 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
955 flow_act.modify_hdr = attr->modify_hdr;
956 kfree(parse_attr->mod_hdr_actions);
961 mutex_lock(&priv->fs.tc.t_lock);
962 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
963 int tc_grp_size, tc_tbl_size;
964 u32 max_flow_counter;
966 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
967 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
969 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
971 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
972 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
975 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
978 MLX5E_TC_TABLE_NUM_GROUPS,
979 MLX5E_TC_FT_LEVEL, 0);
980 if (IS_ERR(priv->fs.tc.t)) {
981 mutex_unlock(&priv->fs.tc.t_lock);
982 NL_SET_ERR_MSG_MOD(extack,
983 "Failed to create tc offload table\n");
984 netdev_err(priv->netdev,
985 "Failed to create tc offload table\n");
986 return PTR_ERR(priv->fs.tc.t);
990 if (attr->match_level != MLX5_MATCH_NONE)
991 parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
993 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
994 &flow_act, dest, dest_ix);
995 mutex_unlock(&priv->fs.tc.t_lock);
997 return PTR_ERR_OR_ZERO(flow->rule[0]);
1000 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1001 struct mlx5e_tc_flow *flow)
1003 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1004 struct mlx5_fc *counter = NULL;
1006 counter = attr->counter;
1007 if (!IS_ERR_OR_NULL(flow->rule[0]))
1008 mlx5_del_flow_rules(flow->rule[0]);
1009 mlx5_fc_destroy(priv->mdev, counter);
1011 mutex_lock(&priv->fs.tc.t_lock);
1012 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1013 mlx5_destroy_flow_table(priv->fs.tc.t);
1014 priv->fs.tc.t = NULL;
1016 mutex_unlock(&priv->fs.tc.t_lock);
1018 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1019 mlx5e_detach_mod_hdr(priv, flow);
1021 if (flow_flag_test(flow, HAIRPIN))
1022 mlx5e_hairpin_flow_del(priv, flow);
1025 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1026 struct mlx5e_tc_flow *flow, int out_index);
1028 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1029 struct mlx5e_tc_flow *flow,
1030 struct net_device *mirred_dev,
1032 struct netlink_ext_ack *extack,
1033 struct net_device **encap_dev,
1036 static struct mlx5_flow_handle *
1037 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1038 struct mlx5e_tc_flow *flow,
1039 struct mlx5_flow_spec *spec,
1040 struct mlx5_esw_flow_attr *attr)
1042 struct mlx5_flow_handle *rule;
1044 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1048 if (attr->split_count) {
1049 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1050 if (IS_ERR(flow->rule[1])) {
1051 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1052 return flow->rule[1];
1060 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1061 struct mlx5e_tc_flow *flow,
1062 struct mlx5_esw_flow_attr *attr)
1064 flow_flag_clear(flow, OFFLOADED);
1066 if (attr->split_count)
1067 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1069 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1072 static struct mlx5_flow_handle *
1073 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1074 struct mlx5e_tc_flow *flow,
1075 struct mlx5_flow_spec *spec,
1076 struct mlx5_esw_flow_attr *slow_attr)
1078 struct mlx5_flow_handle *rule;
1080 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1081 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1082 slow_attr->split_count = 0;
1083 slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
1085 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1087 flow_flag_set(flow, SLOW);
1093 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1094 struct mlx5e_tc_flow *flow,
1095 struct mlx5_esw_flow_attr *slow_attr)
1097 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1098 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1099 slow_attr->split_count = 0;
1100 slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
1101 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1102 flow_flag_clear(flow, SLOW);
1105 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1108 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1109 struct list_head *unready_flows)
1111 flow_flag_set(flow, NOT_READY);
1112 list_add_tail(&flow->unready, unready_flows);
1115 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1118 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1120 list_del(&flow->unready);
1121 flow_flag_clear(flow, NOT_READY);
1124 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1126 struct mlx5_rep_uplink_priv *uplink_priv;
1127 struct mlx5e_rep_priv *rpriv;
1128 struct mlx5_eswitch *esw;
1130 esw = flow->priv->mdev->priv.eswitch;
1131 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1132 uplink_priv = &rpriv->uplink_priv;
1134 mutex_lock(&uplink_priv->unready_flows_lock);
1135 unready_flow_add(flow, &uplink_priv->unready_flows);
1136 mutex_unlock(&uplink_priv->unready_flows_lock);
1139 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1141 struct mlx5_rep_uplink_priv *uplink_priv;
1142 struct mlx5e_rep_priv *rpriv;
1143 struct mlx5_eswitch *esw;
1145 esw = flow->priv->mdev->priv.eswitch;
1146 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1147 uplink_priv = &rpriv->uplink_priv;
1149 mutex_lock(&uplink_priv->unready_flows_lock);
1150 unready_flow_del(flow);
1151 mutex_unlock(&uplink_priv->unready_flows_lock);
1155 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1156 struct mlx5e_tc_flow *flow,
1157 struct netlink_ext_ack *extack)
1159 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1160 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
1161 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1162 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
1163 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
1164 struct net_device *out_dev, *encap_dev = NULL;
1165 struct mlx5_fc *counter = NULL;
1166 struct mlx5e_rep_priv *rpriv;
1167 struct mlx5e_priv *out_priv;
1168 bool encap_valid = true;
1172 if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
1173 NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
1177 /* We check chain range only for tc flows.
1178 * For ft flows, we checked attr->chain was originally 0 and set it to
1179 * FDB_FT_CHAIN which is outside tc range.
1180 * See mlx5e_rep_setup_ft_cb().
1182 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1183 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
1187 if (attr->prio > max_prio) {
1188 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
1192 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1195 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1198 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1199 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1201 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1202 extack, &encap_dev, &encap_valid);
1206 out_priv = netdev_priv(encap_dev);
1207 rpriv = out_priv->ppriv;
1208 attr->dests[out_index].rep = rpriv->rep;
1209 attr->dests[out_index].mdev = out_priv->mdev;
1212 err = mlx5_eswitch_add_vlan_action(esw, attr);
1216 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1217 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1218 kfree(parse_attr->mod_hdr_actions);
1223 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1224 counter = mlx5_fc_create(attr->counter_dev, true);
1225 if (IS_ERR(counter))
1226 return PTR_ERR(counter);
1228 attr->counter = counter;
1231 /* we get here if one of the following takes place:
1232 * (1) there's no error
1233 * (2) there's an encap action and we don't have valid neigh
1236 /* continue with goto slow path rule instead */
1237 struct mlx5_esw_flow_attr slow_attr;
1239 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
1241 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1244 if (IS_ERR(flow->rule[0]))
1245 return PTR_ERR(flow->rule[0]);
1247 flow_flag_set(flow, OFFLOADED);
1252 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1254 struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
1255 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1258 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1260 geneve_tlv_option_0_data);
1262 return !!geneve_tlv_opt_0_data;
1265 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1266 struct mlx5e_tc_flow *flow)
1268 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1269 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1270 struct mlx5_esw_flow_attr slow_attr;
1273 if (flow_flag_test(flow, NOT_READY)) {
1274 remove_unready_flow(flow);
1275 kvfree(attr->parse_attr);
1279 if (mlx5e_is_offloaded_flow(flow)) {
1280 if (flow_flag_test(flow, SLOW))
1281 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1283 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1286 if (mlx5_flow_has_geneve_opt(flow))
1287 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1289 mlx5_eswitch_del_vlan_action(esw, attr);
1291 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1292 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1293 mlx5e_detach_encap(priv, flow, out_index);
1294 kfree(attr->parse_attr->tun_info[out_index]);
1296 kvfree(attr->parse_attr);
1298 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1299 mlx5e_detach_mod_hdr(priv, flow);
1301 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1302 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1305 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1306 struct mlx5e_encap_entry *e,
1307 struct list_head *flow_list)
1309 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1310 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
1311 struct mlx5_flow_handle *rule;
1312 struct mlx5_flow_spec *spec;
1313 struct mlx5e_tc_flow *flow;
1316 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1318 e->encap_size, e->encap_header,
1319 MLX5_FLOW_NAMESPACE_FDB);
1320 if (IS_ERR(e->pkt_reformat)) {
1321 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1322 PTR_ERR(e->pkt_reformat));
1325 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1326 mlx5e_rep_queue_neigh_stats_work(priv);
1328 list_for_each_entry(flow, flow_list, tmp_list) {
1329 bool all_flow_encaps_valid = true;
1332 if (!mlx5e_is_offloaded_flow(flow))
1334 esw_attr = flow->esw_attr;
1335 spec = &esw_attr->parse_attr->spec;
1337 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
1338 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1339 /* Flow can be associated with multiple encap entries.
1340 * Before offloading the flow verify that all of them have
1341 * a valid neighbour.
1343 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1344 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1346 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1347 all_flow_encaps_valid = false;
1351 /* Do not offload flows with unresolved neighbors */
1352 if (!all_flow_encaps_valid)
1354 /* update from slow path rule to encap rule */
1355 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1357 err = PTR_ERR(rule);
1358 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1363 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1364 flow->rule[0] = rule;
1365 /* was unset when slow path rule removed */
1366 flow_flag_set(flow, OFFLOADED);
1370 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1371 struct mlx5e_encap_entry *e,
1372 struct list_head *flow_list)
1374 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1375 struct mlx5_esw_flow_attr slow_attr;
1376 struct mlx5_flow_handle *rule;
1377 struct mlx5_flow_spec *spec;
1378 struct mlx5e_tc_flow *flow;
1381 list_for_each_entry(flow, flow_list, tmp_list) {
1382 if (!mlx5e_is_offloaded_flow(flow))
1384 spec = &flow->esw_attr->parse_attr->spec;
1386 /* update from encap rule to slow path rule */
1387 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1388 /* mark the flow's encap dest as non-valid */
1389 flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1392 err = PTR_ERR(rule);
1393 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1398 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1399 flow->rule[0] = rule;
1400 /* was unset when fast path rule removed */
1401 flow_flag_set(flow, OFFLOADED);
1404 /* we know that the encap is valid */
1405 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1406 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1409 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1411 if (mlx5e_is_eswitch_flow(flow))
1412 return flow->esw_attr->counter;
1414 return flow->nic_attr->counter;
1417 /* Takes reference to all flows attached to encap and adds the flows to
1418 * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
1420 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
1422 struct encap_flow_item *efi;
1423 struct mlx5e_tc_flow *flow;
1425 list_for_each_entry(efi, &e->flows, list) {
1426 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1427 if (IS_ERR(mlx5e_flow_get(flow)))
1429 wait_for_completion(&flow->init_done);
1431 flow->tmp_efi_index = efi->index;
1432 list_add(&flow->tmp_list, flow_list);
1436 /* Iterate over tmp_list of flows attached to flow_list head. */
1437 void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1439 struct mlx5e_tc_flow *flow, *tmp;
1441 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1442 mlx5e_flow_put(priv, flow);
1445 static struct mlx5e_encap_entry *
1446 mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1447 struct mlx5e_encap_entry *e)
1449 struct mlx5e_encap_entry *next = NULL;
1454 /* find encap with non-zero reference counter value */
1456 list_next_or_null_rcu(&nhe->encap_list,
1458 struct mlx5e_encap_entry,
1460 list_first_or_null_rcu(&nhe->encap_list,
1461 struct mlx5e_encap_entry,
1464 next = list_next_or_null_rcu(&nhe->encap_list,
1466 struct mlx5e_encap_entry,
1468 if (mlx5e_encap_take(next))
1473 /* release starting encap */
1475 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1479 /* wait for encap to be fully initialized */
1480 wait_for_completion(&next->res_ready);
1481 /* continue searching if encap entry is not in valid state after completion */
1482 if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1490 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1492 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1493 struct mlx5e_encap_entry *e = NULL;
1494 struct mlx5e_tc_flow *flow;
1495 struct mlx5_fc *counter;
1496 struct neigh_table *tbl;
1497 bool neigh_used = false;
1498 struct neighbour *n;
1501 if (m_neigh->family == AF_INET)
1503 #if IS_ENABLED(CONFIG_IPV6)
1504 else if (m_neigh->family == AF_INET6)
1505 tbl = ipv6_stub->nd_tbl;
1510 /* mlx5e_get_next_valid_encap() releases previous encap before returning
1513 while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
1514 struct mlx5e_priv *priv = netdev_priv(e->out_dev);
1515 struct encap_flow_item *efi, *tmp;
1516 struct mlx5_eswitch *esw;
1517 LIST_HEAD(flow_list);
1519 esw = priv->mdev->priv.eswitch;
1520 mutex_lock(&esw->offloads.encap_tbl_lock);
1521 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1522 flow = container_of(efi, struct mlx5e_tc_flow,
1523 encaps[efi->index]);
1524 if (IS_ERR(mlx5e_flow_get(flow)))
1526 list_add(&flow->tmp_list, &flow_list);
1528 if (mlx5e_is_offloaded_flow(flow)) {
1529 counter = mlx5e_tc_get_counter(flow);
1530 lastuse = mlx5_fc_query_lastuse(counter);
1531 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1537 mutex_unlock(&esw->offloads.encap_tbl_lock);
1539 mlx5e_put_encap_flow_list(priv, &flow_list);
1541 /* release current encap before breaking the loop */
1542 mlx5e_encap_put(priv, e);
1547 trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1550 nhe->reported_lastuse = jiffies;
1552 /* find the relevant neigh according to the cached device and
1555 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1559 neigh_event_send(n, NULL);
1564 static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1566 WARN_ON(!list_empty(&e->flows));
1568 if (e->compl_result > 0) {
1569 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1571 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1572 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1576 kfree(e->encap_header);
1580 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1582 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1584 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1586 hash_del_rcu(&e->encap_hlist);
1587 mutex_unlock(&esw->offloads.encap_tbl_lock);
1589 mlx5e_encap_dealloc(priv, e);
1592 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1593 struct mlx5e_tc_flow *flow, int out_index)
1595 struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1596 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1598 /* flow wasn't fully initialized */
1602 mutex_lock(&esw->offloads.encap_tbl_lock);
1603 list_del(&flow->encaps[out_index].list);
1604 flow->encaps[out_index].e = NULL;
1605 if (!refcount_dec_and_test(&e->refcnt)) {
1606 mutex_unlock(&esw->offloads.encap_tbl_lock);
1609 hash_del_rcu(&e->encap_hlist);
1610 mutex_unlock(&esw->offloads.encap_tbl_lock);
1612 mlx5e_encap_dealloc(priv, e);
1615 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1617 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1619 if (!flow_flag_test(flow, ESWITCH) ||
1620 !flow_flag_test(flow, DUP))
1623 mutex_lock(&esw->offloads.peer_mutex);
1624 list_del(&flow->peer);
1625 mutex_unlock(&esw->offloads.peer_mutex);
1627 flow_flag_clear(flow, DUP);
1629 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1630 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1631 kfree(flow->peer_flow);
1634 flow->peer_flow = NULL;
1637 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1639 struct mlx5_core_dev *dev = flow->priv->mdev;
1640 struct mlx5_devcom *devcom = dev->priv.devcom;
1641 struct mlx5_eswitch *peer_esw;
1643 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1647 __mlx5e_tc_del_fdb_peer_flow(flow);
1648 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1651 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1652 struct mlx5e_tc_flow *flow)
1654 if (mlx5e_is_eswitch_flow(flow)) {
1655 mlx5e_tc_del_fdb_peer_flow(flow);
1656 mlx5e_tc_del_fdb_flow(priv, flow);
1658 mlx5e_tc_del_nic_flow(priv, flow);
1663 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1664 struct mlx5_flow_spec *spec,
1665 struct flow_cls_offload *f,
1666 struct net_device *filter_dev, u8 *match_level)
1668 struct netlink_ext_ack *extack = f->common.extack;
1669 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1671 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1673 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1676 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1677 headers_c, headers_v, match_level);
1679 NL_SET_ERR_MSG_MOD(extack,
1680 "failed to parse tunnel attributes");
1684 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1685 struct flow_match_control match;
1688 flow_rule_match_enc_control(rule, &match);
1689 addr_type = match.key->addr_type;
1691 /* For tunnel addr_type used same key id`s as for non-tunnel */
1692 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1693 struct flow_match_ipv4_addrs match;
1695 flow_rule_match_enc_ipv4_addrs(rule, &match);
1696 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1697 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1698 ntohl(match.mask->src));
1699 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1700 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1701 ntohl(match.key->src));
1703 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1704 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1705 ntohl(match.mask->dst));
1706 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1707 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1708 ntohl(match.key->dst));
1710 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
1712 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1714 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1715 struct flow_match_ipv6_addrs match;
1717 flow_rule_match_enc_ipv6_addrs(rule, &match);
1718 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1719 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1720 &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
1722 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1723 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1724 &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
1727 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1728 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1729 &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
1731 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1732 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1733 &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
1736 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
1738 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1743 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1744 struct flow_match_ip match;
1746 flow_rule_match_enc_ip(rule, &match);
1747 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1748 match.mask->tos & 0x3);
1749 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1750 match.key->tos & 0x3);
1752 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1753 match.mask->tos >> 2);
1754 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1755 match.key->tos >> 2);
1757 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1759 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1762 if (match.mask->ttl &&
1763 !MLX5_CAP_ESW_FLOWTABLE_FDB
1765 ft_field_support.outer_ipv4_ttl)) {
1766 NL_SET_ERR_MSG_MOD(extack,
1767 "Matching on TTL is not supported");
1773 /* Enforce DMAC when offloading incoming tunneled flows.
1774 * Flow counters require a match on the DMAC.
1776 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1777 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1778 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1779 dmac_47_16), priv->netdev->dev_addr);
1781 /* let software handle IP fragments */
1782 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1783 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1788 static void *get_match_headers_criteria(u32 flags,
1789 struct mlx5_flow_spec *spec)
1791 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1792 MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1794 MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1798 static void *get_match_headers_value(u32 flags,
1799 struct mlx5_flow_spec *spec)
1801 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1802 MLX5_ADDR_OF(fte_match_param, spec->match_value,
1804 MLX5_ADDR_OF(fte_match_param, spec->match_value,
1808 static int __parse_cls_flower(struct mlx5e_priv *priv,
1809 struct mlx5_flow_spec *spec,
1810 struct flow_cls_offload *f,
1811 struct net_device *filter_dev,
1812 u8 *inner_match_level, u8 *outer_match_level)
1814 struct netlink_ext_ack *extack = f->common.extack;
1815 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1817 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1819 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1821 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1823 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1824 struct flow_dissector *dissector = rule->match.dissector;
1829 match_level = outer_match_level;
1831 if (dissector->used_keys &
1832 ~(BIT(FLOW_DISSECTOR_KEY_META) |
1833 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1834 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1835 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1836 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1837 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1838 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1839 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1840 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1841 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1842 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1843 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1844 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1845 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1846 BIT(FLOW_DISSECTOR_KEY_TCP) |
1847 BIT(FLOW_DISSECTOR_KEY_IP) |
1848 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
1849 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
1850 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1851 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1852 dissector->used_keys);
1856 if (mlx5e_get_tc_tun(filter_dev)) {
1857 if (parse_tunnel_attr(priv, spec, f, filter_dev,
1861 /* At this point, header pointers should point to the inner
1862 * headers, outer header were already set by parse_tunnel_attr
1864 match_level = inner_match_level;
1865 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1867 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1871 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1872 struct flow_match_basic match;
1874 flow_rule_match_basic(rule, &match);
1875 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1876 ntohs(match.mask->n_proto));
1877 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1878 ntohs(match.key->n_proto));
1880 if (match.mask->n_proto)
1881 *match_level = MLX5_MATCH_L2;
1883 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
1884 is_vlan_dev(filter_dev)) {
1885 struct flow_dissector_key_vlan filter_dev_mask;
1886 struct flow_dissector_key_vlan filter_dev_key;
1887 struct flow_match_vlan match;
1889 if (is_vlan_dev(filter_dev)) {
1890 match.key = &filter_dev_key;
1891 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
1892 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
1893 match.key->vlan_priority = 0;
1894 match.mask = &filter_dev_mask;
1895 memset(match.mask, 0xff, sizeof(*match.mask));
1896 match.mask->vlan_priority = 0;
1898 flow_rule_match_vlan(rule, &match);
1900 if (match.mask->vlan_id ||
1901 match.mask->vlan_priority ||
1902 match.mask->vlan_tpid) {
1903 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1904 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1906 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1909 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1911 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1915 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
1916 match.mask->vlan_id);
1917 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
1918 match.key->vlan_id);
1920 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
1921 match.mask->vlan_priority);
1922 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
1923 match.key->vlan_priority);
1925 *match_level = MLX5_MATCH_L2;
1927 } else if (*match_level != MLX5_MATCH_NONE) {
1928 /* cvlan_tag enabled in match criteria and
1929 * disabled in match value means both S & C tags
1930 * don't exist (untagged of both)
1932 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1933 *match_level = MLX5_MATCH_L2;
1936 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1937 struct flow_match_vlan match;
1939 flow_rule_match_cvlan(rule, &match);
1940 if (match.mask->vlan_id ||
1941 match.mask->vlan_priority ||
1942 match.mask->vlan_tpid) {
1943 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1944 MLX5_SET(fte_match_set_misc, misc_c,
1945 outer_second_svlan_tag, 1);
1946 MLX5_SET(fte_match_set_misc, misc_v,
1947 outer_second_svlan_tag, 1);
1949 MLX5_SET(fte_match_set_misc, misc_c,
1950 outer_second_cvlan_tag, 1);
1951 MLX5_SET(fte_match_set_misc, misc_v,
1952 outer_second_cvlan_tag, 1);
1955 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1956 match.mask->vlan_id);
1957 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1958 match.key->vlan_id);
1959 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1960 match.mask->vlan_priority);
1961 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1962 match.key->vlan_priority);
1964 *match_level = MLX5_MATCH_L2;
1968 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1969 struct flow_match_eth_addrs match;
1971 flow_rule_match_eth_addrs(rule, &match);
1972 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1975 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1979 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1982 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1986 if (!is_zero_ether_addr(match.mask->src) ||
1987 !is_zero_ether_addr(match.mask->dst))
1988 *match_level = MLX5_MATCH_L2;
1991 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1992 struct flow_match_control match;
1994 flow_rule_match_control(rule, &match);
1995 addr_type = match.key->addr_type;
1997 /* the HW doesn't support frag first/later */
1998 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2001 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2002 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2003 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2004 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2006 /* the HW doesn't need L3 inline to match on frag=no */
2007 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2008 *match_level = MLX5_MATCH_L2;
2009 /* *** L2 attributes parsing up to here *** */
2011 *match_level = MLX5_MATCH_L3;
2015 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2016 struct flow_match_basic match;
2018 flow_rule_match_basic(rule, &match);
2019 ip_proto = match.key->ip_proto;
2021 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2022 match.mask->ip_proto);
2023 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2024 match.key->ip_proto);
2026 if (match.mask->ip_proto)
2027 *match_level = MLX5_MATCH_L3;
2030 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2031 struct flow_match_ipv4_addrs match;
2033 flow_rule_match_ipv4_addrs(rule, &match);
2034 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2035 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2036 &match.mask->src, sizeof(match.mask->src));
2037 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2038 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2039 &match.key->src, sizeof(match.key->src));
2040 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2041 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2042 &match.mask->dst, sizeof(match.mask->dst));
2043 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2044 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2045 &match.key->dst, sizeof(match.key->dst));
2047 if (match.mask->src || match.mask->dst)
2048 *match_level = MLX5_MATCH_L3;
2051 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2052 struct flow_match_ipv6_addrs match;
2054 flow_rule_match_ipv6_addrs(rule, &match);
2055 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2056 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2057 &match.mask->src, sizeof(match.mask->src));
2058 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2059 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2060 &match.key->src, sizeof(match.key->src));
2062 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2063 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2064 &match.mask->dst, sizeof(match.mask->dst));
2065 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2066 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2067 &match.key->dst, sizeof(match.key->dst));
2069 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2070 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2071 *match_level = MLX5_MATCH_L3;
2074 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2075 struct flow_match_ip match;
2077 flow_rule_match_ip(rule, &match);
2078 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2079 match.mask->tos & 0x3);
2080 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2081 match.key->tos & 0x3);
2083 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2084 match.mask->tos >> 2);
2085 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2086 match.key->tos >> 2);
2088 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2090 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2093 if (match.mask->ttl &&
2094 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2095 ft_field_support.outer_ipv4_ttl)) {
2096 NL_SET_ERR_MSG_MOD(extack,
2097 "Matching on TTL is not supported");
2101 if (match.mask->tos || match.mask->ttl)
2102 *match_level = MLX5_MATCH_L3;
2105 /* *** L3 attributes parsing up to here *** */
2107 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2108 struct flow_match_ports match;
2110 flow_rule_match_ports(rule, &match);
2113 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2114 tcp_sport, ntohs(match.mask->src));
2115 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2116 tcp_sport, ntohs(match.key->src));
2118 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2119 tcp_dport, ntohs(match.mask->dst));
2120 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2121 tcp_dport, ntohs(match.key->dst));
2125 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2126 udp_sport, ntohs(match.mask->src));
2127 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2128 udp_sport, ntohs(match.key->src));
2130 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2131 udp_dport, ntohs(match.mask->dst));
2132 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2133 udp_dport, ntohs(match.key->dst));
2136 NL_SET_ERR_MSG_MOD(extack,
2137 "Only UDP and TCP transports are supported for L4 matching");
2138 netdev_err(priv->netdev,
2139 "Only UDP and TCP transport are supported\n");
2143 if (match.mask->src || match.mask->dst)
2144 *match_level = MLX5_MATCH_L4;
2147 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2148 struct flow_match_tcp match;
2150 flow_rule_match_tcp(rule, &match);
2151 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2152 ntohs(match.mask->flags));
2153 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2154 ntohs(match.key->flags));
2156 if (match.mask->flags)
2157 *match_level = MLX5_MATCH_L4;
2163 static int parse_cls_flower(struct mlx5e_priv *priv,
2164 struct mlx5e_tc_flow *flow,
2165 struct mlx5_flow_spec *spec,
2166 struct flow_cls_offload *f,
2167 struct net_device *filter_dev)
2169 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2170 struct netlink_ext_ack *extack = f->common.extack;
2171 struct mlx5_core_dev *dev = priv->mdev;
2172 struct mlx5_eswitch *esw = dev->priv.eswitch;
2173 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2174 struct mlx5_eswitch_rep *rep;
2175 bool is_eswitch_flow;
2178 inner_match_level = MLX5_MATCH_NONE;
2179 outer_match_level = MLX5_MATCH_NONE;
2181 err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
2182 &outer_match_level);
2183 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2184 outer_match_level : inner_match_level;
2186 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2187 if (!err && is_eswitch_flow) {
2189 if (rep->vport != MLX5_VPORT_UPLINK &&
2190 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2191 esw->offloads.inline_mode < non_tunnel_match_level)) {
2192 NL_SET_ERR_MSG_MOD(extack,
2193 "Flow is not offloaded due to min inline setting");
2194 netdev_warn(priv->netdev,
2195 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2196 non_tunnel_match_level, esw->offloads.inline_mode);
2201 if (is_eswitch_flow) {
2202 flow->esw_attr->inner_match_level = inner_match_level;
2203 flow->esw_attr->outer_match_level = outer_match_level;
2205 flow->nic_attr->match_level = non_tunnel_match_level;
2211 struct pedit_headers {
2213 struct vlan_hdr vlan;
2220 struct pedit_headers_action {
2221 struct pedit_headers vals;
2222 struct pedit_headers masks;
2226 static int pedit_header_offsets[] = {
2227 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2228 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2229 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2230 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2231 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2234 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2236 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2237 struct pedit_headers_action *hdrs)
2239 u32 *curr_pmask, *curr_pval;
2241 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2242 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2244 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2247 *curr_pmask |= mask;
2248 *curr_pval |= (val & mask);
2256 struct mlx5_fields {
2264 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2265 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2266 offsetof(struct pedit_headers, field) + (off), \
2267 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2269 /* masked values are the same and there are no rewrites that do not have a
2272 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2273 type matchmaskx = *(type *)(matchmaskp); \
2274 type matchvalx = *(type *)(matchvalp); \
2275 type maskx = *(type *)(maskp); \
2276 type valx = *(type *)(valp); \
2278 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2282 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2283 void *matchmaskp, u8 bsize)
2289 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2292 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2295 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2302 static struct mlx5_fields fields[] = {
2303 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2304 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2305 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2306 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2307 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2308 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2310 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2311 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2312 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2313 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2315 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2316 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2317 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2318 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2319 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2320 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2321 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2322 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2323 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2324 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2325 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2326 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2327 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2328 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2329 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2330 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2331 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2333 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2334 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2335 /* in linux iphdr tcp_flags is 8 bits long */
2336 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2338 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2339 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2342 /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
2343 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
2344 * says how many HW actions were actually parsed.
2346 static int offload_pedit_fields(struct pedit_headers_action *hdrs,
2347 struct mlx5e_tc_flow_parse_attr *parse_attr,
2349 struct netlink_ext_ack *extack)
2351 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2352 int i, action_size, nactions, max_actions, first, last, next_z;
2353 void *headers_c, *headers_v, *action, *vals_p;
2354 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2355 struct mlx5_fields *f;
2361 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2362 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2364 set_masks = &hdrs[0].masks;
2365 add_masks = &hdrs[1].masks;
2366 set_vals = &hdrs[0].vals;
2367 add_vals = &hdrs[1].vals;
2369 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2370 action = parse_attr->mod_hdr_actions +
2371 parse_attr->num_mod_hdr_actions * action_size;
2373 max_actions = parse_attr->max_mod_hdr_actions;
2374 nactions = parse_attr->num_mod_hdr_actions;
2376 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2380 /* avoid seeing bits set from previous iterations */
2384 s_masks_p = (void *)set_masks + f->offset;
2385 a_masks_p = (void *)add_masks + f->offset;
2387 s_mask = *s_masks_p & f->field_mask;
2388 a_mask = *a_masks_p & f->field_mask;
2390 if (!s_mask && !a_mask) /* nothing to offload here */
2393 if (s_mask && a_mask) {
2394 NL_SET_ERR_MSG_MOD(extack,
2395 "can't set and add to the same HW field");
2396 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2400 if (nactions == max_actions) {
2401 NL_SET_ERR_MSG_MOD(extack,
2402 "too many pedit actions, can't offload");
2403 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
2409 void *match_mask = headers_c + f->match_offset;
2410 void *match_val = headers_v + f->match_offset;
2412 cmd = MLX5_ACTION_TYPE_SET;
2414 vals_p = (void *)set_vals + f->offset;
2415 /* don't rewrite if we have a match on the same value */
2416 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2417 match_mask, f->field_bsize))
2419 /* clear to denote we consumed this field */
2420 *s_masks_p &= ~f->field_mask;
2422 cmd = MLX5_ACTION_TYPE_ADD;
2424 vals_p = (void *)add_vals + f->offset;
2425 /* add 0 is no change */
2426 if ((*(u32 *)vals_p & f->field_mask) == 0)
2428 /* clear to denote we consumed this field */
2429 *a_masks_p &= ~f->field_mask;
2434 if (f->field_bsize == 32) {
2435 mask_be32 = *(__be32 *)&mask;
2436 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2437 } else if (f->field_bsize == 16) {
2438 mask_be16 = *(__be16 *)&mask;
2439 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2442 first = find_first_bit(&mask, f->field_bsize);
2443 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2444 last = find_last_bit(&mask, f->field_bsize);
2445 if (first < next_z && next_z < last) {
2446 NL_SET_ERR_MSG_MOD(extack,
2447 "rewrite of few sub-fields isn't supported");
2448 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2453 MLX5_SET(set_action_in, action, action_type, cmd);
2454 MLX5_SET(set_action_in, action, field, f->field);
2456 if (cmd == MLX5_ACTION_TYPE_SET) {
2459 /* if field is bit sized it can start not from first bit */
2460 start = find_first_bit((unsigned long *)&f->field_mask,
2463 MLX5_SET(set_action_in, action, offset, first - start);
2464 /* length is num of bits to be written, zero means length of 32 */
2465 MLX5_SET(set_action_in, action, length, (last - first + 1));
2468 if (f->field_bsize == 32)
2469 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2470 else if (f->field_bsize == 16)
2471 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2472 else if (f->field_bsize == 8)
2473 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2475 action += action_size;
2479 parse_attr->num_mod_hdr_actions = nactions;
2483 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2486 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2487 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2488 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2489 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2492 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2493 struct pedit_headers_action *hdrs,
2495 struct mlx5e_tc_flow_parse_attr *parse_attr)
2497 int nkeys, action_size, max_actions;
2499 nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
2500 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
2501 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2503 max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
2504 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2505 max_actions = min(max_actions, nkeys * 16);
2507 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2508 if (!parse_attr->mod_hdr_actions)
2511 parse_attr->max_mod_hdr_actions = max_actions;
2515 static const struct pedit_headers zero_masks = {};
2517 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2518 const struct flow_action_entry *act, int namespace,
2519 struct mlx5e_tc_flow_parse_attr *parse_attr,
2520 struct pedit_headers_action *hdrs,
2521 struct netlink_ext_ack *extack)
2523 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2524 int err = -EOPNOTSUPP;
2525 u32 mask, val, offset;
2528 htype = act->mangle.htype;
2529 err = -EOPNOTSUPP; /* can't be all optimistic */
2531 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2532 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2536 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2537 NL_SET_ERR_MSG_MOD(extack,
2538 "The pedit offload action is not supported");
2542 mask = act->mangle.mask;
2543 val = act->mangle.val;
2544 offset = act->mangle.offset;
2546 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2557 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2558 struct mlx5e_tc_flow_parse_attr *parse_attr,
2559 struct pedit_headers_action *hdrs,
2561 struct netlink_ext_ack *extack)
2563 struct pedit_headers *cmd_masks;
2567 if (!parse_attr->mod_hdr_actions) {
2568 err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
2573 err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
2575 goto out_dealloc_parsed_actions;
2577 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2578 cmd_masks = &hdrs[cmd].masks;
2579 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2580 NL_SET_ERR_MSG_MOD(extack,
2581 "attempt to offload an unsupported field");
2582 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2583 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2584 16, 1, cmd_masks, sizeof(zero_masks), true);
2586 goto out_dealloc_parsed_actions;
2592 out_dealloc_parsed_actions:
2593 kfree(parse_attr->mod_hdr_actions);
2598 static bool csum_offload_supported(struct mlx5e_priv *priv,
2601 struct netlink_ext_ack *extack)
2603 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2604 TCA_CSUM_UPDATE_FLAG_UDP;
2606 /* The HW recalcs checksums only if re-writing headers */
2607 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2608 NL_SET_ERR_MSG_MOD(extack,
2609 "TC csum action is only offloaded with pedit");
2610 netdev_warn(priv->netdev,
2611 "TC csum action is only offloaded with pedit\n");
2615 if (update_flags & ~prot_flags) {
2616 NL_SET_ERR_MSG_MOD(extack,
2617 "can't offload TC csum action for some header/s");
2618 netdev_warn(priv->netdev,
2619 "can't offload TC csum action for some header/s - flags %#x\n",
2627 struct ip_ttl_word {
2633 struct ipv6_hoplimit_word {
2639 static bool is_action_keys_supported(const struct flow_action_entry *act)
2644 htype = act->mangle.htype;
2645 offset = act->mangle.offset;
2646 mask = ~act->mangle.mask;
2647 /* For IPv4 & IPv6 header check 4 byte word,
2648 * to determine that modified fields
2649 * are NOT ttl & hop_limit only.
2651 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2652 struct ip_ttl_word *ttl_word =
2653 (struct ip_ttl_word *)&mask;
2655 if (offset != offsetof(struct iphdr, ttl) ||
2656 ttl_word->protocol ||
2660 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2661 struct ipv6_hoplimit_word *hoplimit_word =
2662 (struct ipv6_hoplimit_word *)&mask;
2664 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2665 hoplimit_word->payload_len ||
2666 hoplimit_word->nexthdr) {
2673 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2674 struct flow_action *flow_action,
2676 struct netlink_ext_ack *extack)
2678 const struct flow_action_entry *act;
2679 bool modify_ip_header;
2685 headers_v = get_match_headers_value(actions, spec);
2686 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2688 /* for non-IP we only re-write MACs, so we're okay */
2689 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2692 modify_ip_header = false;
2693 flow_action_for_each(i, act, flow_action) {
2694 if (act->id != FLOW_ACTION_MANGLE &&
2695 act->id != FLOW_ACTION_ADD)
2698 if (is_action_keys_supported(act)) {
2699 modify_ip_header = true;
2704 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2705 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2706 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2707 NL_SET_ERR_MSG_MOD(extack,
2708 "can't offload re-write of non TCP/UDP");
2709 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2717 static bool actions_match_supported(struct mlx5e_priv *priv,
2718 struct flow_action *flow_action,
2719 struct mlx5e_tc_flow_parse_attr *parse_attr,
2720 struct mlx5e_tc_flow *flow,
2721 struct netlink_ext_ack *extack)
2725 if (mlx5e_is_eswitch_flow(flow))
2726 actions = flow->esw_attr->action;
2728 actions = flow->nic_attr->action;
2730 if (flow_flag_test(flow, EGRESS) &&
2731 !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
2732 (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
2733 (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
2736 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2737 return modify_header_match_supported(&parse_attr->spec,
2738 flow_action, actions,
2744 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2746 struct mlx5_core_dev *fmdev, *pmdev;
2747 u64 fsystem_guid, psystem_guid;
2750 pmdev = peer_priv->mdev;
2752 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2753 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2755 return (fsystem_guid == psystem_guid);
2758 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
2759 const struct flow_action_entry *act,
2760 struct mlx5e_tc_flow_parse_attr *parse_attr,
2761 struct pedit_headers_action *hdrs,
2762 u32 *action, struct netlink_ext_ack *extack)
2764 u16 mask16 = VLAN_VID_MASK;
2765 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
2766 const struct flow_action_entry pedit_act = {
2767 .id = FLOW_ACTION_MANGLE,
2768 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
2769 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
2770 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
2771 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
2773 u8 match_prio_mask, match_prio_val;
2774 void *headers_c, *headers_v;
2777 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
2778 headers_v = get_match_headers_value(*action, &parse_attr->spec);
2780 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
2781 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
2782 NL_SET_ERR_MSG_MOD(extack,
2783 "VLAN rewrite action must have VLAN protocol match");
2787 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
2788 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
2789 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
2790 NL_SET_ERR_MSG_MOD(extack,
2791 "Changing VLAN prio is not supported");
2795 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
2797 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2803 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
2804 struct mlx5e_tc_flow_parse_attr *parse_attr,
2805 struct pedit_headers_action *hdrs,
2806 u32 *action, struct netlink_ext_ack *extack)
2808 const struct flow_action_entry prio_tag_act = {
2811 MLX5_GET(fte_match_set_lyr_2_4,
2812 get_match_headers_value(*action,
2815 MLX5_GET(fte_match_set_lyr_2_4,
2816 get_match_headers_criteria(*action,
2821 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
2822 &prio_tag_act, parse_attr, hdrs, action,
2826 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2827 struct flow_action *flow_action,
2828 struct mlx5e_tc_flow_parse_attr *parse_attr,
2829 struct mlx5e_tc_flow *flow,
2830 struct netlink_ext_ack *extack)
2832 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2833 struct pedit_headers_action hdrs[2] = {};
2834 const struct flow_action_entry *act;
2838 if (!flow_action_has_entries(flow_action))
2841 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2843 flow_action_for_each(i, act, flow_action) {
2845 case FLOW_ACTION_DROP:
2846 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2847 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2848 flow_table_properties_nic_receive.flow_counter))
2849 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2851 case FLOW_ACTION_MANGLE:
2852 case FLOW_ACTION_ADD:
2853 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
2854 parse_attr, hdrs, extack);
2858 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2859 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2861 case FLOW_ACTION_VLAN_MANGLE:
2862 err = add_vlan_rewrite_action(priv,
2863 MLX5_FLOW_NAMESPACE_KERNEL,
2864 act, parse_attr, hdrs,
2870 case FLOW_ACTION_CSUM:
2871 if (csum_offload_supported(priv, action,
2877 case FLOW_ACTION_REDIRECT: {
2878 struct net_device *peer_dev = act->dev;
2880 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2881 same_hw_devs(priv, netdev_priv(peer_dev))) {
2882 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
2883 flow_flag_set(flow, HAIRPIN);
2884 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2885 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2887 NL_SET_ERR_MSG_MOD(extack,
2888 "device is not on same HW, can't offload");
2889 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2895 case FLOW_ACTION_MARK: {
2896 u32 mark = act->mark;
2898 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2899 NL_SET_ERR_MSG_MOD(extack,
2900 "Bad flow mark - only 16 bit is supported");
2904 attr->flow_tag = mark;
2905 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2909 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
2914 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2915 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2916 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
2917 parse_attr, hdrs, &action, extack);
2920 /* in case all pedit actions are skipped, remove the MOD_HDR
2923 if (parse_attr->num_mod_hdr_actions == 0) {
2924 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2925 kfree(parse_attr->mod_hdr_actions);
2929 attr->action = action;
2930 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
2937 const struct ip_tunnel_key *ip_tun_key;
2938 struct mlx5e_tc_tunnel *tc_tunnel;
2941 static inline int cmp_encap_info(struct encap_key *a,
2942 struct encap_key *b)
2944 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2945 a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
2948 static inline int hash_encap_info(struct encap_key *key)
2950 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2951 key->tc_tunnel->tunnel_type);
2955 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2956 struct net_device *peer_netdev)
2958 struct mlx5e_priv *peer_priv;
2960 peer_priv = netdev_priv(peer_netdev);
2962 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2963 mlx5e_eswitch_rep(priv->netdev) &&
2964 mlx5e_eswitch_rep(peer_netdev) &&
2965 same_hw_devs(priv, peer_priv));
2970 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
2972 return refcount_inc_not_zero(&e->refcnt);
2975 static struct mlx5e_encap_entry *
2976 mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
2979 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2980 struct mlx5e_encap_entry *e;
2981 struct encap_key e_key;
2983 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2984 encap_hlist, hash_key) {
2985 e_key.ip_tun_key = &e->tun_info->key;
2986 e_key.tc_tunnel = e->tunnel;
2987 if (!cmp_encap_info(&e_key, key) &&
2988 mlx5e_encap_take(e))
2995 static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
2997 size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
2999 return kmemdup(tun_info, tun_size, GFP_KERNEL);
3002 static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3003 struct mlx5e_tc_flow *flow,
3005 struct mlx5e_encap_entry *e,
3006 struct netlink_ext_ack *extack)
3010 for (i = 0; i < out_index; i++) {
3011 if (flow->encaps[i].e != e)
3013 NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3014 netdev_err(priv->netdev, "can't duplicate encap action\n");
3021 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3022 struct mlx5e_tc_flow *flow,
3023 struct net_device *mirred_dev,
3025 struct netlink_ext_ack *extack,
3026 struct net_device **encap_dev,
3029 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3030 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3031 struct mlx5e_tc_flow_parse_attr *parse_attr;
3032 const struct ip_tunnel_info *tun_info;
3033 struct encap_key key;
3034 struct mlx5e_encap_entry *e;
3035 unsigned short family;
3039 parse_attr = attr->parse_attr;
3040 tun_info = parse_attr->tun_info[out_index];
3041 family = ip_tunnel_info_af(tun_info);
3042 key.ip_tun_key = &tun_info->key;
3043 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3044 if (!key.tc_tunnel) {
3045 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3049 hash_key = hash_encap_info(&key);
3051 mutex_lock(&esw->offloads.encap_tbl_lock);
3052 e = mlx5e_encap_get(priv, &key, hash_key);
3054 /* must verify if encap is valid or not */
3056 /* Check that entry was not already attached to this flow */
3057 if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3062 mutex_unlock(&esw->offloads.encap_tbl_lock);
3063 wait_for_completion(&e->res_ready);
3065 /* Protect against concurrent neigh update. */
3066 mutex_lock(&esw->offloads.encap_tbl_lock);
3067 if (e->compl_result < 0) {
3074 e = kzalloc(sizeof(*e), GFP_KERNEL);
3080 refcount_set(&e->refcnt, 1);
3081 init_completion(&e->res_ready);
3083 tun_info = dup_tun_info(tun_info);
3088 e->tun_info = tun_info;
3089 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3093 INIT_LIST_HEAD(&e->flows);
3094 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3095 mutex_unlock(&esw->offloads.encap_tbl_lock);
3097 if (family == AF_INET)
3098 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3099 else if (family == AF_INET6)
3100 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3102 /* Protect against concurrent neigh update. */
3103 mutex_lock(&esw->offloads.encap_tbl_lock);
3104 complete_all(&e->res_ready);
3106 e->compl_result = err;
3109 e->compl_result = 1;
3112 flow->encaps[out_index].e = e;
3113 list_add(&flow->encaps[out_index].list, &e->flows);
3114 flow->encaps[out_index].index = out_index;
3115 *encap_dev = e->out_dev;
3116 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
3117 attr->dests[out_index].pkt_reformat = e->pkt_reformat;
3118 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
3119 *encap_valid = true;
3121 *encap_valid = false;
3123 mutex_unlock(&esw->offloads.encap_tbl_lock);
3128 mutex_unlock(&esw->offloads.encap_tbl_lock);
3130 mlx5e_encap_put(priv, e);
3134 mutex_unlock(&esw->offloads.encap_tbl_lock);
3140 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3141 const struct flow_action_entry *act,
3142 struct mlx5_esw_flow_attr *attr,
3145 u8 vlan_idx = attr->total_vlan;
3147 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3151 case FLOW_ACTION_VLAN_POP:
3153 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3154 MLX5_FS_VLAN_DEPTH))
3157 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3159 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3162 case FLOW_ACTION_VLAN_PUSH:
3163 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3164 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3165 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3166 if (!attr->vlan_proto[vlan_idx])
3167 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3170 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3171 MLX5_FS_VLAN_DEPTH))
3174 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3176 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3177 (act->vlan.proto != htons(ETH_P_8021Q) ||
3181 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3188 attr->total_vlan = vlan_idx + 1;
3193 static int add_vlan_push_action(struct mlx5e_priv *priv,
3194 struct mlx5_esw_flow_attr *attr,
3195 struct net_device **out_dev,
3198 struct net_device *vlan_dev = *out_dev;
3199 struct flow_action_entry vlan_act = {
3200 .id = FLOW_ACTION_VLAN_PUSH,
3201 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3202 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3207 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3211 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3212 dev_get_iflink(vlan_dev));
3213 if (is_vlan_dev(*out_dev))
3214 err = add_vlan_push_action(priv, attr, out_dev, action);
3219 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3220 struct mlx5_esw_flow_attr *attr,
3223 int nest_level = attr->parse_attr->filter_dev->lower_level;
3224 struct flow_action_entry vlan_act = {
3225 .id = FLOW_ACTION_VLAN_POP,
3229 while (nest_level--) {
3230 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3238 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3239 struct net_device *out_dev)
3241 if (is_merged_eswitch_dev(priv, out_dev))
3244 return mlx5e_eswitch_rep(out_dev) &&
3245 same_hw_devs(priv, netdev_priv(out_dev));
3248 static bool is_duplicated_output_device(struct net_device *dev,
3249 struct net_device *out_dev,
3250 int *ifindexes, int if_count,
3251 struct netlink_ext_ack *extack)
3255 for (i = 0; i < if_count; i++) {
3256 if (ifindexes[i] == out_dev->ifindex) {
3257 NL_SET_ERR_MSG_MOD(extack,
3258 "can't duplicate output to same device");
3259 netdev_err(dev, "can't duplicate output to same device: %s\n",
3268 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3269 struct flow_action *flow_action,
3270 struct mlx5e_tc_flow *flow,
3271 struct netlink_ext_ack *extack)
3273 struct pedit_headers_action hdrs[2] = {};
3274 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3275 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3276 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3277 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3278 const struct ip_tunnel_info *info = NULL;
3279 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3280 bool ft_flow = mlx5e_is_ft_flow(flow);
3281 const struct flow_action_entry *act;
3282 int err, i, if_count = 0;
3286 if (!flow_action_has_entries(flow_action))
3289 flow_action_for_each(i, act, flow_action) {
3291 case FLOW_ACTION_DROP:
3292 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3293 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3295 case FLOW_ACTION_MANGLE:
3296 case FLOW_ACTION_ADD:
3297 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3298 parse_attr, hdrs, extack);
3302 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3303 attr->split_count = attr->out_count;
3305 case FLOW_ACTION_CSUM:
3306 if (csum_offload_supported(priv, action,
3307 act->csum_flags, extack))
3311 case FLOW_ACTION_REDIRECT:
3312 case FLOW_ACTION_MIRRED: {
3313 struct mlx5e_priv *out_priv;
3314 struct net_device *out_dev;
3318 /* out_dev is NULL when filters with
3319 * non-existing mirred device are replayed to
3325 if (ft_flow && out_dev == priv->netdev) {
3326 /* Ignore forward to self rules generated
3327 * by adding both mlx5 devs to the flow table
3328 * block on a normal nft offload setup.
3333 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3334 NL_SET_ERR_MSG_MOD(extack,
3335 "can't support more output ports, can't offload forwarding");
3336 pr_err("can't support more than %d output ports, can't offload forwarding\n",
3341 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3342 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3344 parse_attr->mirred_ifindex[attr->out_count] =
3346 parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
3347 if (!parse_attr->tun_info[attr->out_count])
3350 attr->dests[attr->out_count].flags |=
3351 MLX5_ESW_DEST_ENCAP;
3353 /* attr->dests[].rep is resolved when we
3356 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
3357 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3358 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3359 struct net_device *uplink_upper;
3361 if (is_duplicated_output_device(priv->netdev,
3368 ifindexes[if_count] = out_dev->ifindex;
3373 netdev_master_upper_dev_get_rcu(uplink_dev);
3375 netif_is_lag_master(uplink_upper) &&
3376 uplink_upper == out_dev)
3377 out_dev = uplink_dev;
3380 if (is_vlan_dev(out_dev)) {
3381 err = add_vlan_push_action(priv, attr,
3388 if (is_vlan_dev(parse_attr->filter_dev)) {
3389 err = add_vlan_pop_action(priv, attr,
3395 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3396 NL_SET_ERR_MSG_MOD(extack,
3397 "devices are not on same switch HW, can't offload forwarding");
3398 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3399 priv->netdev->name, out_dev->name);
3403 out_priv = netdev_priv(out_dev);
3404 rpriv = out_priv->ppriv;
3405 attr->dests[attr->out_count].rep = rpriv->rep;
3406 attr->dests[attr->out_count].mdev = out_priv->mdev;
3408 } else if (parse_attr->filter_dev != priv->netdev) {
3409 /* All mlx5 devices are called to configure
3410 * high level device filters. Therefore, the
3411 * *attempt* to install a filter on invalid
3412 * eswitch should not trigger an explicit error
3416 NL_SET_ERR_MSG_MOD(extack,
3417 "devices are not on same switch HW, can't offload forwarding");
3418 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3419 priv->netdev->name, out_dev->name);
3424 case FLOW_ACTION_TUNNEL_ENCAP:
3432 case FLOW_ACTION_VLAN_PUSH:
3433 case FLOW_ACTION_VLAN_POP:
3434 if (act->id == FLOW_ACTION_VLAN_PUSH &&
3435 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
3436 /* Replace vlan pop+push with vlan modify */
3437 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3438 err = add_vlan_rewrite_action(priv,
3439 MLX5_FLOW_NAMESPACE_FDB,
3440 act, parse_attr, hdrs,
3443 err = parse_tc_vlan_action(priv, act, attr, &action);
3448 attr->split_count = attr->out_count;
3450 case FLOW_ACTION_VLAN_MANGLE:
3451 err = add_vlan_rewrite_action(priv,
3452 MLX5_FLOW_NAMESPACE_FDB,
3453 act, parse_attr, hdrs,
3458 attr->split_count = attr->out_count;
3460 case FLOW_ACTION_TUNNEL_DECAP:
3461 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
3463 case FLOW_ACTION_GOTO: {
3464 u32 dest_chain = act->chain_index;
3465 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
3468 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3471 if (dest_chain <= attr->chain) {
3472 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
3475 if (dest_chain > max_chain) {
3476 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
3479 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3480 attr->dest_chain = dest_chain;
3484 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3489 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
3490 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
3491 /* For prio tag mode, replace vlan pop with rewrite vlan prio
3494 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3495 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
3501 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3502 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3503 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3504 parse_attr, hdrs, &action, extack);
3507 /* in case all pedit actions are skipped, remove the MOD_HDR
3508 * flag. we might have set split_count either by pedit or
3509 * pop/push. if there is no pop/push either, reset it too.
3511 if (parse_attr->num_mod_hdr_actions == 0) {
3512 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3513 kfree(parse_attr->mod_hdr_actions);
3514 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3515 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3516 attr->split_count = 0;
3520 attr->action = action;
3521 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3524 if (attr->dest_chain) {
3525 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3526 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3529 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3532 if (!(attr->action &
3533 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3534 NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
3538 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3539 NL_SET_ERR_MSG_MOD(extack,
3540 "current firmware doesn't support split rule for port mirroring");
3541 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3548 static void get_flags(int flags, unsigned long *flow_flags)
3550 unsigned long __flow_flags = 0;
3552 if (flags & MLX5_TC_FLAG(INGRESS))
3553 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3554 if (flags & MLX5_TC_FLAG(EGRESS))
3555 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
3557 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
3558 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3559 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
3560 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3561 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
3562 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
3564 *flow_flags = __flow_flags;
3567 static const struct rhashtable_params tc_ht_params = {
3568 .head_offset = offsetof(struct mlx5e_tc_flow, node),
3569 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
3570 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
3571 .automatic_shrinking = true,
3574 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
3575 unsigned long flags)
3577 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3578 struct mlx5e_rep_priv *uplink_rpriv;
3580 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
3581 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3582 return &uplink_rpriv->uplink_priv.tc_ht;
3583 } else /* NIC offload */
3584 return &priv->fs.tc.ht;
3587 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
3589 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3590 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
3591 flow_flag_test(flow, INGRESS);
3592 bool act_is_encap = !!(attr->action &
3593 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
3594 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
3595 MLX5_DEVCOM_ESW_OFFLOADS);
3600 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
3601 mlx5_lag_is_multipath(attr->in_mdev)) &&
3602 (is_rep_ingress || act_is_encap))
3609 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
3610 struct flow_cls_offload *f, unsigned long flow_flags,
3611 struct mlx5e_tc_flow_parse_attr **__parse_attr,
3612 struct mlx5e_tc_flow **__flow)
3614 struct mlx5e_tc_flow_parse_attr *parse_attr;
3615 struct mlx5e_tc_flow *flow;
3618 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
3619 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3620 if (!parse_attr || !flow) {
3625 flow->cookie = f->cookie;
3626 flow->flags = flow_flags;
3628 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
3629 INIT_LIST_HEAD(&flow->encaps[out_index].list);
3630 INIT_LIST_HEAD(&flow->mod_hdr);
3631 INIT_LIST_HEAD(&flow->hairpin);
3632 refcount_set(&flow->refcnt, 1);
3633 init_completion(&flow->init_done);
3636 *__parse_attr = parse_attr;
3647 mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3648 struct mlx5e_priv *priv,
3649 struct mlx5e_tc_flow_parse_attr *parse_attr,
3650 struct flow_cls_offload *f,
3651 struct mlx5_eswitch_rep *in_rep,
3652 struct mlx5_core_dev *in_mdev)
3654 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3656 esw_attr->parse_attr = parse_attr;
3657 esw_attr->chain = f->common.chain_index;
3658 esw_attr->prio = f->common.prio;
3660 esw_attr->in_rep = in_rep;
3661 esw_attr->in_mdev = in_mdev;
3663 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
3664 MLX5_COUNTER_SOURCE_ESWITCH)
3665 esw_attr->counter_dev = in_mdev;
3667 esw_attr->counter_dev = priv->mdev;
3670 static struct mlx5e_tc_flow *
3671 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3672 struct flow_cls_offload *f,
3673 unsigned long flow_flags,
3674 struct net_device *filter_dev,
3675 struct mlx5_eswitch_rep *in_rep,
3676 struct mlx5_core_dev *in_mdev)
3678 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3679 struct netlink_ext_ack *extack = f->common.extack;
3680 struct mlx5e_tc_flow_parse_attr *parse_attr;
3681 struct mlx5e_tc_flow *flow;
3684 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3685 attr_size = sizeof(struct mlx5_esw_flow_attr);
3686 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3687 &parse_attr, &flow);
3691 parse_attr->filter_dev = filter_dev;
3692 mlx5e_flow_esw_attr_init(flow->esw_attr,
3694 f, in_rep, in_mdev);
3696 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3701 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
3705 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
3706 complete_all(&flow->init_done);
3708 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
3711 add_unready_flow(flow);
3717 mlx5e_flow_put(priv, flow);
3719 return ERR_PTR(err);
3722 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
3723 struct mlx5e_tc_flow *flow,
3724 unsigned long flow_flags)
3726 struct mlx5e_priv *priv = flow->priv, *peer_priv;
3727 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
3728 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3729 struct mlx5e_tc_flow_parse_attr *parse_attr;
3730 struct mlx5e_rep_priv *peer_urpriv;
3731 struct mlx5e_tc_flow *peer_flow;
3732 struct mlx5_core_dev *in_mdev;
3735 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3739 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
3740 peer_priv = netdev_priv(peer_urpriv->netdev);
3742 /* in_mdev is assigned of which the packet originated from.
3743 * So packets redirected to uplink use the same mdev of the
3744 * original flow and packets redirected from uplink use the
3747 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
3748 in_mdev = peer_priv->mdev;
3750 in_mdev = priv->mdev;
3752 parse_attr = flow->esw_attr->parse_attr;
3753 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
3754 parse_attr->filter_dev,
3755 flow->esw_attr->in_rep, in_mdev);
3756 if (IS_ERR(peer_flow)) {
3757 err = PTR_ERR(peer_flow);
3761 flow->peer_flow = peer_flow;
3762 flow_flag_set(flow, DUP);
3763 mutex_lock(&esw->offloads.peer_mutex);
3764 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
3765 mutex_unlock(&esw->offloads.peer_mutex);
3768 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3773 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3774 struct flow_cls_offload *f,
3775 unsigned long flow_flags,
3776 struct net_device *filter_dev,
3777 struct mlx5e_tc_flow **__flow)
3779 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3780 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
3781 struct mlx5_core_dev *in_mdev = priv->mdev;
3782 struct mlx5e_tc_flow *flow;
3785 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
3788 return PTR_ERR(flow);
3790 if (is_peer_flow_needed(flow)) {
3791 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
3793 mlx5e_tc_del_fdb_flow(priv, flow);
3807 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
3808 struct flow_cls_offload *f,
3809 unsigned long flow_flags,
3810 struct net_device *filter_dev,
3811 struct mlx5e_tc_flow **__flow)
3813 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3814 struct netlink_ext_ack *extack = f->common.extack;
3815 struct mlx5e_tc_flow_parse_attr *parse_attr;
3816 struct mlx5e_tc_flow *flow;
3819 /* multi-chain not supported for NIC rules */
3820 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3823 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3824 attr_size = sizeof(struct mlx5_nic_flow_attr);
3825 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3826 &parse_attr, &flow);
3830 parse_attr->filter_dev = filter_dev;
3831 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3836 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
3840 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3844 flow_flag_set(flow, OFFLOADED);
3851 mlx5e_flow_put(priv, flow);
3858 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3859 struct flow_cls_offload *f,
3860 unsigned long flags,
3861 struct net_device *filter_dev,
3862 struct mlx5e_tc_flow **flow)
3864 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3865 unsigned long flow_flags;
3868 get_flags(flags, &flow_flags);
3870 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3873 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
3874 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3877 err = mlx5e_add_nic_flow(priv, f, flow_flags,
3883 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
3884 struct flow_cls_offload *f, unsigned long flags)
3886 struct netlink_ext_ack *extack = f->common.extack;
3887 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3888 struct mlx5e_tc_flow *flow;
3892 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
3895 NL_SET_ERR_MSG_MOD(extack,
3896 "flow cookie already exists, ignoring");
3897 netdev_warn_once(priv->netdev,
3898 "flow cookie %lx already exists, ignoring\n",
3904 trace_mlx5e_configure_flower(f);
3905 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3909 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
3916 mlx5e_flow_put(priv, flow);
3921 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3923 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
3924 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
3926 return flow_flag_test(flow, INGRESS) == dir_ingress &&
3927 flow_flag_test(flow, EGRESS) == dir_egress;
3930 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3931 struct flow_cls_offload *f, unsigned long flags)
3933 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3934 struct mlx5e_tc_flow *flow;
3938 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
3939 if (!flow || !same_flow_direction(flow, flags)) {
3944 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
3947 if (flow_flag_test_and_set(flow, DELETED)) {
3951 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3954 trace_mlx5e_delete_flower(f);
3955 mlx5e_flow_put(priv, flow);
3964 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3965 struct flow_cls_offload *f, unsigned long flags)
3967 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3968 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3969 struct mlx5_eswitch *peer_esw;
3970 struct mlx5e_tc_flow *flow;
3971 struct mlx5_fc *counter;
3978 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
3982 return PTR_ERR(flow);
3984 if (!same_flow_direction(flow, flags)) {
3989 if (mlx5e_is_offloaded_flow(flow)) {
3990 counter = mlx5e_tc_get_counter(flow);
3994 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3997 /* Under multipath it's possible for one rule to be currently
3998 * un-offloaded while the other rule is offloaded.
4000 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4004 if (flow_flag_test(flow, DUP) &&
4005 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4010 counter = mlx5e_tc_get_counter(flow->peer_flow);
4012 goto no_peer_counter;
4013 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4016 packets += packets2;
4017 lastuse = max_t(u64, lastuse, lastuse2);
4021 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4023 flow_stats_update(&f->stats, bytes, packets, lastuse);
4024 trace_mlx5e_stats_flower(f);
4026 mlx5e_flow_put(priv, flow);
4030 static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4031 struct netlink_ext_ack *extack)
4033 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4034 struct mlx5_eswitch *esw;
4039 esw = priv->mdev->priv.eswitch;
4040 /* rate is given in bytes/sec.
4041 * First convert to bits/sec and then round to the nearest mbit/secs.
4042 * mbit means million bits.
4043 * Moreover, if rate is non zero we choose to configure to a minimum of
4046 rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4047 vport_num = rpriv->rep->vport;
4049 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4051 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4056 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4057 struct flow_action *flow_action,
4058 struct netlink_ext_ack *extack)
4060 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4061 const struct flow_action_entry *act;
4065 if (!flow_action_has_entries(flow_action)) {
4066 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4070 if (!flow_offload_has_one_action(flow_action)) {
4071 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4075 flow_action_for_each(i, act, flow_action) {
4077 case FLOW_ACTION_POLICE:
4078 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4082 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4085 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4093 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4094 struct tc_cls_matchall_offload *ma)
4096 struct netlink_ext_ack *extack = ma->common.extack;
4098 if (ma->common.prio != 1) {
4099 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4103 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4106 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4107 struct tc_cls_matchall_offload *ma)
4109 struct netlink_ext_ack *extack = ma->common.extack;
4111 return apply_police_params(priv, 0, extack);
4114 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4115 struct tc_cls_matchall_offload *ma)
4117 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4118 struct rtnl_link_stats64 cur_stats;
4122 cur_stats = priv->stats.vf_vport;
4123 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4124 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4125 rpriv->prev_vf_vport_stats = cur_stats;
4126 flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
4129 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4130 struct mlx5e_priv *peer_priv)
4132 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4133 struct mlx5e_hairpin_entry *hpe, *tmp;
4134 LIST_HEAD(init_wait_list);
4138 if (!same_hw_devs(priv, peer_priv))
4141 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4143 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4144 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4145 if (refcount_inc_not_zero(&hpe->refcnt))
4146 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4147 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4149 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4150 wait_for_completion(&hpe->res_ready);
4151 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4152 hpe->hp->pair->peer_gone = true;
4154 mlx5e_hairpin_put(priv, hpe);
4158 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4159 unsigned long event, void *ptr)
4161 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4162 struct mlx5e_flow_steering *fs;
4163 struct mlx5e_priv *peer_priv;
4164 struct mlx5e_tc_table *tc;
4165 struct mlx5e_priv *priv;
4167 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4168 event != NETDEV_UNREGISTER ||
4169 ndev->reg_state == NETREG_REGISTERED)
4172 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4173 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4174 priv = container_of(fs, struct mlx5e_priv, fs);
4175 peer_priv = netdev_priv(ndev);
4176 if (priv == peer_priv ||
4177 !(priv->netdev->features & NETIF_F_HW_TC))
4180 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4185 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4187 struct mlx5e_tc_table *tc = &priv->fs.tc;
4190 mutex_init(&tc->t_lock);
4191 mutex_init(&tc->mod_hdr.lock);
4192 hash_init(tc->mod_hdr.hlist);
4193 mutex_init(&tc->hairpin_tbl_lock);
4194 hash_init(tc->hairpin_tbl);
4196 err = rhashtable_init(&tc->ht, &tc_ht_params);
4200 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4201 if (register_netdevice_notifier(&tc->netdevice_nb)) {
4202 tc->netdevice_nb.notifier_call = NULL;
4203 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4209 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4211 struct mlx5e_tc_flow *flow = ptr;
4212 struct mlx5e_priv *priv = flow->priv;
4214 mlx5e_tc_del_flow(priv, flow);
4218 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4220 struct mlx5e_tc_table *tc = &priv->fs.tc;
4222 if (tc->netdevice_nb.notifier_call)
4223 unregister_netdevice_notifier(&tc->netdevice_nb);
4225 mutex_destroy(&tc->mod_hdr.lock);
4226 mutex_destroy(&tc->hairpin_tbl_lock);
4228 rhashtable_destroy(&tc->ht);
4230 if (!IS_ERR_OR_NULL(tc->t)) {
4231 mlx5_destroy_flow_table(tc->t);
4234 mutex_destroy(&tc->t_lock);
4237 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4239 return rhashtable_init(tc_ht, &tc_ht_params);
4242 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4244 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4247 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
4249 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4251 return atomic_read(&tc_ht->nelems);
4254 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
4256 struct mlx5e_tc_flow *flow, *tmp;
4258 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
4259 __mlx5e_tc_del_fdb_peer_flow(flow);
4262 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4264 struct mlx5_rep_uplink_priv *rpriv =
4265 container_of(work, struct mlx5_rep_uplink_priv,
4266 reoffload_flows_work);
4267 struct mlx5e_tc_flow *flow, *tmp;
4269 mutex_lock(&rpriv->unready_flows_lock);
4270 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4271 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4272 unready_flow_del(flow);
4274 mutex_unlock(&rpriv->unready_flows_lock);